diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000..4e622170b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,42 @@
+.DS_Store
+__pycache__
+/TEMP
+/DATASETS
+/RUNTIME
+*.pyd
+hubert_base.pt
+.venv
+alexforkINSTALL.bat
+Changelog_CN.md
+Changelog_EN.md
+Changelog_KO.md
+difdep.py
+EasierGUI.py
+envfilescheck.bat
+export_onnx.py
+export_onnx_old.py
+ffmpeg.exe
+ffprobe.exe
+Fixes/Launch_Tensorboard.bat
+Fixes/LOCAL_CREPE_FIX.bat
+Fixes/local_fixes.py
+Fixes/tensor-launch.py
+gui.py
+infer-web — backup.py
+infer-webbackup.py
+install_easy_dependencies.py
+install_easyGUI.bat
+installstft.bat
+Launch_Tensorboard.bat
+listdepend.bat
+LOCAL_CREPE_FIX.bat
+local_fixes.py
+oldinfer.py
+onnx_inference_demo.py
+Praat.exe
+requirementsNEW.txt
+rmvpe.pt
+run_easiergui.bat
+tensor-launch.py
+values1.json
+使用需遵守的协议-LICENSE.txt
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 000000000..49f62d5f9
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,13 @@
+# syntax=docker/dockerfile:1
+
+FROM python:3.10-bullseye
+
+EXPOSE 7865
+
+WORKDIR /app
+
+COPY . .
+
+RUN pip3 install -r requirements.txt
+
+CMD ["python3", "infer-web.py"]
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000..2b8d8ce41
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2023 liujing04
+Copyright (c) 2023 源文雨
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/LazyImport.py b/LazyImport.py
new file mode 100644
index 000000000..5bdb05ddd
--- /dev/null
+++ b/LazyImport.py
@@ -0,0 +1,13 @@
+from importlib.util import find_spec, LazyLoader, module_from_spec
+from sys import modules
+
+def lazyload(name):
+ if name in modules:
+ return modules[name]
+ else:
+ spec = find_spec(name)
+ loader = LazyLoader(spec.loader)
+ module = module_from_spec(spec)
+ modules[name] = module
+ loader.exec_module(module)
+ return module
\ No newline at end of file
diff --git a/MDXNet.py b/MDXNet.py
new file mode 100644
index 000000000..9b7eb4384
--- /dev/null
+++ b/MDXNet.py
@@ -0,0 +1,272 @@
+import soundfile as sf
+import torch, pdb, os, warnings, librosa
+import numpy as np
+import onnxruntime as ort
+from tqdm import tqdm
+import torch
+
+dim_c = 4
+
+
+class Conv_TDF_net_trim:
+ def __init__(
+ self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024
+ ):
+ super(Conv_TDF_net_trim, self).__init__()
+
+ self.dim_f = dim_f
+ self.dim_t = 2**dim_t
+ self.n_fft = n_fft
+ self.hop = hop
+ self.n_bins = self.n_fft // 2 + 1
+ self.chunk_size = hop * (self.dim_t - 1)
+ self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(
+ device
+ )
+ self.target_name = target_name
+ self.blender = "blender" in model_name
+
+ out_c = dim_c * 4 if target_name == "*" else dim_c
+ self.freq_pad = torch.zeros(
+ [1, out_c, self.n_bins - self.dim_f, self.dim_t]
+ ).to(device)
+
+ self.n = L // 2
+
+ def stft(self, x):
+ x = x.reshape([-1, self.chunk_size])
+ x = torch.stft(
+ x,
+ n_fft=self.n_fft,
+ hop_length=self.hop,
+ window=self.window,
+ center=True,
+ return_complex=True,
+ )
+ x = torch.view_as_real(x)
+ x = x.permute([0, 3, 1, 2])
+ x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape(
+ [-1, dim_c, self.n_bins, self.dim_t]
+ )
+ return x[:, :, : self.dim_f]
+
+ def istft(self, x, freq_pad=None):
+ freq_pad = (
+ self.freq_pad.repeat([x.shape[0], 1, 1, 1])
+ if freq_pad is None
+ else freq_pad
+ )
+ x = torch.cat([x, freq_pad], -2)
+ c = 4 * 2 if self.target_name == "*" else 2
+ x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape(
+ [-1, 2, self.n_bins, self.dim_t]
+ )
+ x = x.permute([0, 2, 3, 1])
+ x = x.contiguous()
+ x = torch.view_as_complex(x)
+ x = torch.istft(
+ x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True
+ )
+ return x.reshape([-1, c, self.chunk_size])
+
+
+def get_models(device, dim_f, dim_t, n_fft):
+ return Conv_TDF_net_trim(
+ device=device,
+ model_name="Conv-TDF",
+ target_name="vocals",
+ L=11,
+ dim_f=dim_f,
+ dim_t=dim_t,
+ n_fft=n_fft,
+ )
+
+
+warnings.filterwarnings("ignore")
+cpu = torch.device("cpu")
+if torch.cuda.is_available():
+ device = torch.device("cuda:0")
+elif torch.backends.mps.is_available():
+ device = torch.device("mps")
+else:
+ device = torch.device("cpu")
+
+
+class Predictor:
+ def __init__(self, args):
+ self.args = args
+ self.model_ = get_models(
+ device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
+ )
+ self.model = ort.InferenceSession(
+ os.path.join(args.onnx, self.model_.target_name + ".onnx"),
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
+ )
+ print("onnx load done")
+
+ def demix(self, mix):
+ samples = mix.shape[-1]
+ margin = self.args.margin
+ chunk_size = self.args.chunks * 44100
+ assert not margin == 0, "margin cannot be zero!"
+ if margin > chunk_size:
+ margin = chunk_size
+
+ segmented_mix = {}
+
+ if self.args.chunks == 0 or samples < chunk_size:
+ chunk_size = samples
+
+ counter = -1
+ for skip in range(0, samples, chunk_size):
+ counter += 1
+
+ s_margin = 0 if counter == 0 else margin
+ end = min(skip + chunk_size + margin, samples)
+
+ start = skip - s_margin
+
+ segmented_mix[skip] = mix[:, start:end].copy()
+ if end == samples:
+ break
+
+ sources = self.demix_base(segmented_mix, margin_size=margin)
+ """
+ mix:(2,big_sample)
+ segmented_mix:offset->(2,small_sample)
+ sources:(1,2,big_sample)
+ """
+ return sources
+
+ def demix_base(self, mixes, margin_size):
+ chunked_sources = []
+ progress_bar = tqdm(total=len(mixes))
+ progress_bar.set_description("Processing")
+ for mix in mixes:
+ cmix = mixes[mix]
+ sources = []
+ n_sample = cmix.shape[1]
+ model = self.model_
+ trim = model.n_fft // 2
+ gen_size = model.chunk_size - 2 * trim
+ pad = gen_size - n_sample % gen_size
+ mix_p = np.concatenate(
+ (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1
+ )
+ mix_waves = []
+ i = 0
+ while i < n_sample + pad:
+ waves = np.array(mix_p[:, i : i + model.chunk_size])
+ mix_waves.append(waves)
+ i += gen_size
+ mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu)
+ with torch.no_grad():
+ _ort = self.model
+ spek = model.stft(mix_waves)
+ if self.args.denoise:
+ spec_pred = (
+ -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5
+ + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5
+ )
+ tar_waves = model.istft(torch.tensor(spec_pred))
+ else:
+ tar_waves = model.istft(
+ torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])
+ )
+ tar_signal = (
+ tar_waves[:, :, trim:-trim]
+ .transpose(0, 1)
+ .reshape(2, -1)
+ .numpy()[:, :-pad]
+ )
+
+ start = 0 if mix == 0 else margin_size
+ end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
+ if margin_size == 0:
+ end = None
+ sources.append(tar_signal[:, start:end])
+
+ progress_bar.update(1)
+
+ chunked_sources.append(sources)
+ _sources = np.concatenate(chunked_sources, axis=-1)
+ # del self.model
+ progress_bar.close()
+ return _sources
+
+ def prediction(self, m, vocal_root, others_root, format):
+ os.makedirs(vocal_root, exist_ok=True)
+ os.makedirs(others_root, exist_ok=True)
+ basename = os.path.basename(m)
+ mix, rate = librosa.load(m, mono=False, sr=44100)
+ if mix.ndim == 1:
+ mix = np.asfortranarray([mix, mix])
+ mix = mix.T
+ sources = self.demix(mix.T)
+ opt = sources[0].T
+ if format in ["wav", "flac"]:
+ sf.write(
+ "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate
+ )
+ sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate)
+ else:
+ path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename)
+ path_other = "%s/%s_others.wav" % (others_root, basename)
+ sf.write(path_vocal, mix - opt, rate)
+ sf.write(path_other, opt, rate)
+ if os.path.exists(path_vocal):
+ os.system(
+ "ffmpeg -i %s -vn %s -q:a 2 -y"
+ % (path_vocal, path_vocal[:-4] + ".%s" % format)
+ )
+ if os.path.exists(path_other):
+ os.system(
+ "ffmpeg -i %s -vn %s -q:a 2 -y"
+ % (path_other, path_other[:-4] + ".%s" % format)
+ )
+
+
+class MDXNetDereverb:
+ def __init__(self, chunks):
+ self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy"
+ self.shifts = 10 #'Predict with randomised equivariant stabilisation'
+ self.mixing = "min_mag" # ['default','min_mag','max_mag']
+ self.chunks = chunks
+ self.margin = 44100
+ self.dim_t = 9
+ self.dim_f = 3072
+ self.n_fft = 6144
+ self.denoise = True
+ self.pred = Predictor(self)
+
+ def _path_audio_(self, input, vocal_root, others_root, format):
+ self.pred.prediction(input, vocal_root, others_root, format)
+
+
+if __name__ == "__main__":
+ dereverb = MDXNetDereverb(15)
+ from time import time as ttime
+
+ t0 = ttime()
+ dereverb._path_audio_(
+ "雪雪伴奏对消HP5.wav",
+ "vocal",
+ "others",
+ )
+ t1 = ttime()
+ print(t1 - t0)
+
+
+"""
+
+runtime\python.exe MDXNet.py
+
+6G:
+15/9:0.8G->6.8G
+14:0.8G->6.5G
+25:炸
+
+half15:0.7G->6.6G,22.69s
+fp32-15:0.7G->6.6G,20.85s
+
+"""
diff --git a/MIT b/MIT
new file mode 100644
index 000000000..b3ea37212
--- /dev/null
+++ b/MIT
@@ -0,0 +1,44 @@
+This software and its related code are open-sourced under the MIT License. The author disclaims any control over the software. Users of the software and those who distribute sound produced by the software bear full responsibility. If you do not agree with these terms, you cannot use or reference any code or files within the software package.
+
+Hereby, anyone obtaining a copy of this software and associated documentation files (hereinafter referred to as "the software") is granted the right to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the software, and to permit persons to whom the software is provided to do the same, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the software.
+
+The software is provided "as is," without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.
+
+The licenses for related libraries are as follows:
+
+ContentVec
+https://github.com/auspicious3000/contentvec/blob/main/LICENSE
+MIT License
+
+VITS
+https://github.com/jaywalnut310/vits/blob/main/LICENSE
+MIT License
+
+HIFIGAN
+https://github.com/jik876/hifi-gan/blob/master/LICENSE
+MIT License
+
+gradio
+https://github.com/gradio-app/gradio/blob/main/LICENSE
+Apache License 2.0
+
+ffmpeg
+https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
+https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2021-02-28-12-32/ffmpeg-n4.3.2-160-gfbb9368226-win64-lgpl-4.3.zip
+LPGLv3 License
+MIT License
+
+ultimatevocalremovergui
+https://github.com/Anjok07/ultimatevocalremovergui/blob/master/LICENSE
+https://github.com/yang123qwe/vocal_separation_by_uvr5
+MIT License
+
+audio-slicer
+https://github.com/openvpi/audio-slicer/blob/main/LICENSE
+MIT License
+
+PySimpleGUI
+https://github.com/PySimpleGUI/PySimpleGUI/blob/master/license.txt
+LPGLv3 License
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 000000000..44de020e6
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,63 @@
+.PHONY:
+.ONESHELL:
+
+help: ## Show this help and exit
+ @grep -hE '^[A-Za-z0-9_ \-]*?:.*##.*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+install: ## Install dependencies (Do everytime you start up a paperspace machine)
+ apt-get -y install build-essential python3-dev ffmpeg
+ pip install --upgrade setuptools wheel
+ pip install --upgrade pip
+ pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.1
+ pip install -r requirements.txt
+ pip install --upgrade lxml
+ apt-get update
+ apt -y install -qq aria2
+
+basev1: ## Download version 1 pre-trained models (Do only once after cloning the fork)
+ mkdir -p pretrained uvr5_weights
+ git pull
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d pretrained -o D32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d pretrained -o D40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d pretrained -o D48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d pretrained -o G32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d pretrained -o G40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d pretrained -o G48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d pretrained -o f0D32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d pretrained -o f0D40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d pretrained -o f0D48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d pretrained -o f0G32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d pretrained -o f0G40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d pretrained -o f0G48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt
+
+basev2: ## Download version 2 pre-trained models (Do only once after cloning the fork)
+ mkdir -p pretrained_v2 uvr5_weights
+ git pull
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D32k.pth -d pretrained_v2 -o D32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d pretrained_v2 -o D40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D48k.pth -d pretrained_v2 -o D48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G32k.pth -d pretrained_v2 -o G32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d pretrained_v2 -o G40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G48k.pth -d pretrained_v2 -o G48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D32k.pth -d pretrained_v2 -o f0D32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d pretrained_v2 -o f0D40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D48k.pth -d pretrained_v2 -o f0D48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G32k.pth -d pretrained_v2 -o f0G32k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d pretrained_v2 -o f0G40k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G48k.pth -d pretrained_v2 -o f0G48k.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt
+
+run-ui: ## Run the python GUI
+ python infer-web.py --paperspace --pycmd python
+
+run-cli: ## Run the python CLI
+ python infer-web.py --pycmd python --is_cli
+
+tensorboard: ## Start the tensorboard (Run on separate terminal)
+ echo https://tensorboard-$$(hostname).clg07azjl.paperspacegradient.com
+ tensorboard --logdir logs --bind_all
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 000000000..6c16d3c96
--- /dev/null
+++ b/README.md
@@ -0,0 +1,53 @@
+
+
🍏 Applio (Mangio-RVC-Fork)
+
+ Applio is a user-friendly fork of Mangio, designed to provide an intuitive interface, especially for newcomers.
+
+
+
+
+
Links
+
+
+
+
+
+
Credits
+
+
+
+
+
+
Contributors
+
+
+
diff --git a/app.py b/app.py
new file mode 100644
index 000000000..22afd4ac8
--- /dev/null
+++ b/app.py
@@ -0,0 +1,310 @@
+import os
+import torch
+
+import gradio as gr
+import librosa
+import numpy as np
+import logging
+from fairseq import checkpoint_utils
+from vc_infer_pipeline import VC
+import traceback
+from config import Config
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+from i18n import I18nAuto
+
+logging.getLogger("numba").setLevel(logging.WARNING)
+logging.getLogger("markdown_it").setLevel(logging.WARNING)
+logging.getLogger("urllib3").setLevel(logging.WARNING)
+logging.getLogger("matplotlib").setLevel(logging.WARNING)
+
+i18n = I18nAuto()
+i18n.print()
+
+config = Config()
+
+weight_root = "weights"
+weight_uvr5_root = "uvr5_weights"
+index_root = "logs"
+names = []
+hubert_model = None
+for name in os.listdir(weight_root):
+ if name.endswith(".pth"):
+ names.append(name)
+index_paths = []
+for root, dirs, files in os.walk(index_root, topdown=False):
+ for name in files:
+ if name.endswith(".index") and "trained" not in name:
+ index_paths.append("%s/%s" % (root, name))
+
+
+def get_vc(sid):
+ global n_spk, tgt_sr, net_g, vc, cpt, version
+ if sid == "" or sid == []:
+ global hubert_model
+ if hubert_model != None:
+ del net_g, n_spk, vc, hubert_model, tgt_sr
+ hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ if_f0 = cpt.get("f0", 1)
+ version = cpt.get("version", "v1")
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif version == "v2":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs768NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del net_g, cpt
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ cpt = None
+ return {"visible": False, "__type__": "update"}
+ person = "%s/%s" % (weight_root, sid)
+ print("loading %s" % person)
+ cpt = torch.load(person, map_location="cpu")
+ tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
+ if_f0 = cpt.get("f0", 1)
+ version = cpt.get("version", "v1")
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif version == "v2":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del net_g.enc_q
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
+ net_g.eval().to(config.device)
+ if config.is_half:
+ net_g = net_g.half()
+ else:
+ net_g = net_g.float()
+ vc = VC(tgt_sr, config)
+ n_spk = cpt["config"][-3]
+ return {"visible": True, "maximum": n_spk, "__type__": "update"}
+
+
+def load_hubert():
+ global hubert_model
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
+ ["hubert_base.pt"],
+ suffix="",
+ )
+ hubert_model = models[0]
+ hubert_model = hubert_model.to(config.device)
+ if config.is_half:
+ hubert_model = hubert_model.half()
+ else:
+ hubert_model = hubert_model.float()
+ hubert_model.eval()
+
+
+def vc_single(
+ sid,
+ input_audio_path,
+ f0_up_key,
+ f0_file,
+ f0_method,
+ file_index,
+ file_index2,
+ index_rate,
+ filter_radius,
+ resample_sr,
+ rms_mix_rate,
+ protect,
+):
+ global tgt_sr, net_g, vc, hubert_model, version
+ if input_audio_path is None:
+ return "You need to upload an audio", None
+ f0_up_key = int(f0_up_key)
+ try:
+ audio = input_audio_path[1] / 32768.0
+ if len(audio.shape) == 2:
+ audio = np.mean(audio, -1)
+ audio = librosa.resample(audio, orig_sr=input_audio_path[0], target_sr=16000)
+ audio_max = np.abs(audio).max() / 0.95
+ if audio_max > 1:
+ audio /= audio_max
+ times = [0, 0, 0]
+ if hubert_model == None:
+ load_hubert()
+ if_f0 = cpt.get("f0", 1)
+ file_index = (
+ (
+ file_index.strip(" ")
+ .strip('"')
+ .strip("\n")
+ .strip('"')
+ .strip(" ")
+ .replace("trained", "added")
+ )
+ if file_index != ""
+ else file_index2
+ )
+ audio_opt = vc.pipeline(
+ hubert_model,
+ net_g,
+ sid,
+ audio,
+ input_audio_path,
+ times,
+ f0_up_key,
+ f0_method,
+ file_index,
+ index_rate,
+ if_f0,
+ filter_radius,
+ tgt_sr,
+ resample_sr,
+ rms_mix_rate,
+ version,
+ protect,
+ f0_file=f0_file,
+ )
+ if resample_sr >= 16000 and tgt_sr != resample_sr:
+ tgt_sr = resample_sr
+ index_info = (
+ "Using index:%s." % file_index
+ if os.path.exists(file_index)
+ else "Index not used."
+ )
+ return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss" % (
+ index_info,
+ times[0],
+ times[1],
+ times[2],
+ ), (tgt_sr, audio_opt)
+ except:
+ info = traceback.format_exc()
+ print(info)
+ return info, (None, None)
+
+
+app = gr.Blocks()
+with app:
+ with gr.Tabs():
+ with gr.TabItem("在线demo"):
+ gr.Markdown(
+ value="""
+ RVC 在线demo
+ """
+ )
+ sid = gr.Dropdown(label=i18n("Inferencing voice:"), choices=sorted(names))
+ with gr.Column():
+ spk_item = gr.Slider(
+ minimum=0,
+ maximum=2333,
+ step=1,
+ label=i18n("Select Speaker/Singer ID:"),
+ value=0,
+ visible=False,
+ interactive=True,
+ )
+ sid.change(
+ fn=get_vc,
+ inputs=[sid],
+ outputs=[spk_item],
+ )
+ gr.Markdown(
+ value=i18n("Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.")
+ )
+ vc_input3 = gr.Audio(label="上传音频(长度小于90秒)")
+ vc_transform0 = gr.Number(label=i18n("Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):"), value=0)
+ f0method0 = gr.Radio(
+ label=i18n("Select the pitch extraction algorithm:"),
+ choices=["pm", "harvest", "crepe"],
+ value="pm",
+ interactive=True,
+ )
+ filter_radius0 = gr.Slider(
+ minimum=0,
+ maximum=7,
+ label=i18n("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."),
+ value=3,
+ step=1,
+ interactive=True,
+ )
+ with gr.Column():
+ file_index1 = gr.Textbox(
+ label=i18n("Feature search dataset file path"),
+ value="",
+ interactive=False,
+ visible=False,
+ )
+ file_index2 = gr.Dropdown(
+ label=i18n("Auto-detect index path and select from the dropdown"),
+ choices=sorted(index_paths),
+ interactive=True,
+ )
+ index_rate1 = gr.Slider(
+ minimum=0,
+ maximum=1,
+ label=i18n("Search feature ratio:"),
+ value=0.88,
+ interactive=True,
+ )
+ resample_sr0 = gr.Slider(
+ minimum=0,
+ maximum=48000,
+ label=i18n("Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:"),
+ value=0,
+ step=1,
+ interactive=True,
+ )
+ rms_mix_rate0 = gr.Slider(
+ minimum=0,
+ maximum=1,
+ label=i18n("Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:"),
+ value=1,
+ interactive=True,
+ )
+ protect0 = gr.Slider(
+ minimum=0,
+ maximum=0.5,
+ label=i18n("Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:"),
+ value=0.33,
+ step=0.01,
+ interactive=True,
+ )
+ f0_file = gr.File(label=i18n("F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:"))
+ but0 = gr.Button(i18n("Convert"), variant="primary")
+ vc_output1 = gr.Textbox(label=i18n("Output information"))
+ vc_output2 = gr.Audio(label=i18n("Export audio (click on the three dots in the lower right corner to download)"))
+ but0.click(
+ vc_single,
+ [
+ spk_item,
+ vc_input3,
+ vc_transform0,
+ f0_file,
+ f0method0,
+ file_index1,
+ file_index2,
+ index_rate1,
+ filter_radius0,
+ resample_sr0,
+ rms_mix_rate0,
+ protect0,
+ ],
+ [vc_output1, vc_output2],
+ )
+
+
+app.launch()
diff --git a/audioEffects.py b/audioEffects.py
new file mode 100644
index 000000000..9dfe12f49
--- /dev/null
+++ b/audioEffects.py
@@ -0,0 +1,36 @@
+from pedalboard import Pedalboard, Compressor, Reverb, NoiseGate
+from pedalboard.io import AudioFile
+import sys
+import os
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+import i18n
+from pydub import AudioSegment
+import numpy as np
+import soundfile as sf
+from pydub.playback import play
+
+def process_audio(input_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled, ):
+ print(reverb_enabled)
+ print(compressor_enabled)
+ print(noise_gate_enabled)
+ effects = []
+ if reverb_enabled:
+ effects.append(Reverb(room_size=0.01))
+ if compressor_enabled:
+ effects.append(Compressor(threshold_db=-10, ratio=25))
+ if noise_gate_enabled:
+ effects.append(NoiseGate(threshold_db=-16, ratio=1.5, release_ms=250))
+
+ board = Pedalboard(effects)
+
+ with AudioFile(input_path) as f:
+ with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
+ while f.tell() < f.frames:
+ chunk = f.read(f.samplerate)
+ effected = board(chunk, f.samplerate, reset=False)
+ o.write(effected)
+
+
+ print(i18n("Processed audio saved at: "), output_path)
+ return output_path
\ No newline at end of file
diff --git a/audios/.gitignore b/audios/.gitignore
new file mode 100644
index 000000000..e69de29bb
diff --git a/config.py b/config.py
new file mode 100644
index 000000000..d92d78d78
--- /dev/null
+++ b/config.py
@@ -0,0 +1,208 @@
+import argparse
+import sys
+import torch
+import json
+from multiprocessing import cpu_count
+
+global usefp16
+usefp16 = False
+
+def decide_fp_config():
+ global usefp16
+ usefp16 = False
+ device_capability = 0
+ if torch.cuda.is_available():
+ device = torch.device("cuda:0")
+ device_capability = torch.cuda.get_device_capability(device)[0]
+ if device_capability >= 7:
+ usefp16 = True
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
+ with open(f"configs/{config_file}", "r") as d:
+ data = json.load(d)
+
+ if "train" in data and "fp16_run" in data["train"]:
+ data["train"]["fp16_run"] = True
+
+ with open(f"configs/{config_file}", "w") as d:
+ json.dump(data, d, indent=4)
+
+
+
+ with open(
+ "trainset_preprocess_pipeline_print.py", "r", encoding="utf-8"
+ ) as f:
+ strr = f.read()
+
+ strr = strr.replace("3.0", "3.7")
+
+ with open(
+ "trainset_preprocess_pipeline_print.py", "w", encoding="utf-8"
+ ) as f:
+ f.write(strr)
+ else:
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
+ with open(f"configs/{config_file}", "r") as f:
+ data = json.load(f)
+
+ if "train" in data and "fp16_run" in data["train"]:
+ data["train"]["fp16_run"] = False
+
+ with open(f"configs/{config_file}", "w") as d:
+ json.dump(data, d, indent=4)
+
+ print(f"Set fp16_run to false in {config_file}")
+
+ with open(
+ "trainset_preprocess_pipeline_print.py", "r", encoding="utf-8"
+ ) as f:
+ strr = f.read()
+
+ strr = strr.replace("3.7", "3.0")
+
+ with open(
+ "trainset_preprocess_pipeline_print.py", "w", encoding="utf-8"
+ ) as f:
+ f.write(strr)
+ else:
+ print(
+ "CUDA is not available. Make sure you have an NVIDIA GPU and CUDA installed."
+ )
+ return (usefp16, device_capability)
+
+class Config:
+ def __init__(self):
+ self.device = "cuda:0"
+ self.is_half = True
+ self.n_cpu = 0
+ self.gpu_name = None
+ self.gpu_mem = None
+ (
+ self.python_cmd,
+ self.listen_port,
+ self.iscolab,
+ self.noparallel,
+ self.noautoopen,
+ self.paperspace,
+ self.is_cli,
+ self.grtheme,
+ ) = self.arg_parse()
+
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
+
+ @staticmethod
+ def arg_parse() -> tuple:
+ exe = sys.executable or "python"
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--port", type=int, default=7865, help="Listen port")
+ parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
+ parser.add_argument(
+ "--noparallel", action="store_true", help="Disable parallel processing"
+ )
+ parser.add_argument(
+ "--noautoopen",
+ action="store_true",
+ help="Do not open in browser automatically",
+ )
+ parser.add_argument(
+ "--paperspace",
+ action="store_true",
+ help="Note that this argument just shares a gradio link for the web UI. Thus can be used on other non-local CLI systems.",
+ )
+ parser.add_argument(
+ "--is_cli",
+ action="store_true",
+ help="Use the CLI instead of setting up a gradio UI. This flag will launch an RVC text interface where you can execute functions from infer-web.py!",
+ )
+
+ parser.add_argument(
+ "-t",
+ "--theme",
+ help = "Theme for Gradio. Format - `JohnSmith9982/small_and_pretty` (no backticks)",
+ default = "JohnSmith9982/small_and_pretty",
+ type = str
+ )
+
+ cmd_opts = parser.parse_args()
+
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
+
+ return (
+ cmd_opts.pycmd,
+ cmd_opts.port,
+ cmd_opts.colab,
+ cmd_opts.noparallel,
+ cmd_opts.noautoopen,
+ cmd_opts.paperspace,
+ cmd_opts.is_cli,
+ cmd_opts.theme,
+ )
+
+ @staticmethod
+ def has_mps() -> bool:
+ if not torch.backends.mps.is_available():
+ return False
+ try:
+ torch.zeros(1).to(torch.device("mps"))
+ return True
+ except Exception:
+ return False
+
+ def device_config(self) -> tuple:
+ if torch.cuda.is_available():
+ i_device = int(self.device.split(":")[-1])
+ self.gpu_name = torch.cuda.get_device_name(i_device)
+ if (
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
+ or "P40" in self.gpu_name.upper()
+ or "1060" in self.gpu_name
+ or "1070" in self.gpu_name
+ ):
+ print("Found GPU", self.gpu_name, ", force to fp32")
+ self.is_half = False
+ else:
+ decide_fp_config()
+ self.gpu_mem = int(
+ torch.cuda.get_device_properties(i_device).total_memory
+ / 1024
+ / 1024
+ / 1024
+ + 0.4
+ )
+ if self.gpu_mem <= 4:
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
+ strr = f.read().replace("3.7", "3.0")
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
+ f.write(strr)
+ elif self.has_mps():
+ print("No supported Nvidia GPU found, using MPS instead")
+ self.device = "mps"
+ self.is_half = False
+ decide_fp_config()
+ else:
+ print("No supported Nvidia GPU found, using CPU instead")
+ self.device = "cpu"
+ self.is_half = False
+ decide_fp_config()
+
+ if self.n_cpu == 0:
+ self.n_cpu = cpu_count()
+
+ if self.is_half:
+ x_pad = 3
+ x_query = 10
+ x_center = 60
+ x_max = 65
+ else:
+ x_pad = 1
+ x_query = 6
+ x_center = 38
+ x_max = 41
+
+ if self.gpu_mem != None and self.gpu_mem <= 4:
+ x_pad = 1
+ x_query = 5
+ x_center = 30
+ x_max = 32
+
+ return x_pad, x_query, x_center, x_max
diff --git a/configs/32k.json b/configs/32k.json
new file mode 100644
index 000000000..400b6be80
--- /dev/null
+++ b/configs/32k.json
@@ -0,0 +1,46 @@
+{
+ "train": {
+ "log_interval": 200,
+ "seed": 1234,
+ "epochs": 20000,
+ "learning_rate": 1e-4,
+ "betas": [0.8, 0.99],
+ "eps": 1e-9,
+ "batch_size": 4,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 12800,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "max_wav_value": 32768.0,
+ "sampling_rate": 32000,
+ "filter_length": 1024,
+ "hop_length": 320,
+ "win_length": 1024,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": null
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0,
+ "resblock": "1",
+ "resblock_kernel_sizes": [3,7,11],
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
+ "upsample_rates": [10,4,2,2,2],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [16,16,4,4,4],
+ "use_spectral_norm": false,
+ "gin_channels": 256,
+ "spk_embed_dim": 109
+ }
+}
diff --git a/configs/32k_v2.json b/configs/32k_v2.json
new file mode 100644
index 000000000..70e534f4c
--- /dev/null
+++ b/configs/32k_v2.json
@@ -0,0 +1,46 @@
+{
+ "train": {
+ "log_interval": 200,
+ "seed": 1234,
+ "epochs": 20000,
+ "learning_rate": 1e-4,
+ "betas": [0.8, 0.99],
+ "eps": 1e-9,
+ "batch_size": 4,
+ "fp16_run": true,
+ "lr_decay": 0.999875,
+ "segment_size": 12800,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "max_wav_value": 32768.0,
+ "sampling_rate": 32000,
+ "filter_length": 1024,
+ "hop_length": 320,
+ "win_length": 1024,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": null
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0,
+ "resblock": "1",
+ "resblock_kernel_sizes": [3,7,11],
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
+ "upsample_rates": [10,8,2,2],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [20,16,4,4],
+ "use_spectral_norm": false,
+ "gin_channels": 256,
+ "spk_embed_dim": 109
+ }
+}
diff --git a/configs/40k.json b/configs/40k.json
new file mode 100644
index 000000000..cb30b8be4
--- /dev/null
+++ b/configs/40k.json
@@ -0,0 +1,46 @@
+{
+ "train": {
+ "log_interval": 200,
+ "seed": 1234,
+ "epochs": 20000,
+ "learning_rate": 1e-4,
+ "betas": [0.8, 0.99],
+ "eps": 1e-9,
+ "batch_size": 4,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 12800,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "max_wav_value": 32768.0,
+ "sampling_rate": 40000,
+ "filter_length": 2048,
+ "hop_length": 400,
+ "win_length": 2048,
+ "n_mel_channels": 125,
+ "mel_fmin": 0.0,
+ "mel_fmax": null
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0,
+ "resblock": "1",
+ "resblock_kernel_sizes": [3,7,11],
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
+ "upsample_rates": [10,10,2,2],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [16,16,4,4],
+ "use_spectral_norm": false,
+ "gin_channels": 256,
+ "spk_embed_dim": 109
+ }
+}
diff --git a/configs/48k.json b/configs/48k.json
new file mode 100644
index 000000000..687599100
--- /dev/null
+++ b/configs/48k.json
@@ -0,0 +1,46 @@
+{
+ "train": {
+ "log_interval": 200,
+ "seed": 1234,
+ "epochs": 20000,
+ "learning_rate": 1e-4,
+ "betas": [0.8, 0.99],
+ "eps": 1e-9,
+ "batch_size": 4,
+ "fp16_run": false,
+ "lr_decay": 0.999875,
+ "segment_size": 11520,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "max_wav_value": 32768.0,
+ "sampling_rate": 48000,
+ "filter_length": 2048,
+ "hop_length": 480,
+ "win_length": 2048,
+ "n_mel_channels": 128,
+ "mel_fmin": 0.0,
+ "mel_fmax": null
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0,
+ "resblock": "1",
+ "resblock_kernel_sizes": [3,7,11],
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
+ "upsample_rates": [10,6,2,2,2],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [16,16,4,4,4],
+ "use_spectral_norm": false,
+ "gin_channels": 256,
+ "spk_embed_dim": 109
+ }
+}
diff --git a/configs/48k_v2.json b/configs/48k_v2.json
new file mode 100644
index 000000000..75f770cda
--- /dev/null
+++ b/configs/48k_v2.json
@@ -0,0 +1,46 @@
+{
+ "train": {
+ "log_interval": 200,
+ "seed": 1234,
+ "epochs": 20000,
+ "learning_rate": 1e-4,
+ "betas": [0.8, 0.99],
+ "eps": 1e-9,
+ "batch_size": 4,
+ "fp16_run": true,
+ "lr_decay": 0.999875,
+ "segment_size": 17280,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "max_wav_value": 32768.0,
+ "sampling_rate": 48000,
+ "filter_length": 2048,
+ "hop_length": 480,
+ "win_length": 2048,
+ "n_mel_channels": 128,
+ "mel_fmin": 0.0,
+ "mel_fmax": null
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0,
+ "resblock": "1",
+ "resblock_kernel_sizes": [3,7,11],
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
+ "upsample_rates": [12,10,2,2],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [24,20,4,4],
+ "use_spectral_norm": false,
+ "gin_channels": 256,
+ "spk_embed_dim": 109
+ }
+}
diff --git a/csvdb/stop.csv b/csvdb/stop.csv
new file mode 100644
index 000000000..bc59c12aa
--- /dev/null
+++ b/csvdb/stop.csv
@@ -0,0 +1 @@
+False
diff --git a/easy_infer.py b/easy_infer.py
new file mode 100644
index 000000000..b200608c5
--- /dev/null
+++ b/easy_infer.py
@@ -0,0 +1,1092 @@
+import subprocess
+import os
+import sys
+import errno
+import shutil
+import yt_dlp
+from mega import Mega
+import datetime
+import unicodedata
+import torch
+import glob
+import gradio as gr
+import gdown
+import zipfile
+import traceback
+import json
+import requests
+import wget
+import ffmpeg
+import hashlib
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from unidecode import unidecode
+import re
+import time
+from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
+from vc_infer_pipeline import VC
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+from MDXNet import MDXNetDereverb
+from config import Config
+from infer_uvr5 import _audio_pre_, _audio_pre_new
+from huggingface_hub import HfApi, list_models
+from huggingface_hub import login
+from i18n import I18nAuto
+i18n = I18nAuto()
+from bs4 import BeautifulSoup
+from sklearn.cluster import MiniBatchKMeans
+
+config = Config()
+tmp = os.path.join(now_dir, "TEMP")
+shutil.rmtree(tmp, ignore_errors=True)
+os.environ["TEMP"] = tmp
+weight_root = "weights"
+weight_uvr5_root = "uvr5_weights"
+index_root = "./logs/"
+audio_root = "audios"
+names = []
+for name in os.listdir(weight_root):
+ if name.endswith(".pth"):
+ names.append(name)
+index_paths = []
+
+global indexes_list
+indexes_list = []
+
+audio_paths = []
+for root, dirs, files in os.walk(index_root, topdown=False):
+ for name in files:
+ if name.endswith(".index") and "trained" not in name:
+ index_paths.append("%s\\%s" % (root, name))
+
+for root, dirs, files in os.walk(audio_root, topdown=False):
+ for name in files:
+ audio_paths.append("%s/%s" % (root, name))
+
+uvr5_names = []
+for name in os.listdir(weight_uvr5_root):
+ if name.endswith(".pth") or "onnx" in name:
+ uvr5_names.append(name.replace(".pth", ""))
+
+def calculate_md5(file_path):
+ hash_md5 = hashlib.md5()
+ with open(file_path, "rb") as f:
+ for chunk in iter(lambda: f.read(4096), b""):
+ hash_md5.update(chunk)
+ return hash_md5.hexdigest()
+
+def silentremove(filename):
+ try:
+ os.remove(filename)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+def get_md5(temp_folder):
+ for root, subfolders, files in os.walk(temp_folder):
+ for file in files:
+ if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
+ md5_hash = calculate_md5(os.path.join(root, file))
+ return md5_hash
+
+ return None
+
+def find_parent(search_dir, file_name):
+ for dirpath, dirnames, filenames in os.walk(search_dir):
+ if file_name in filenames:
+ return os.path.abspath(dirpath)
+ return None
+
+def find_folder_parent(search_dir, folder_name):
+ for dirpath, dirnames, filenames in os.walk(search_dir):
+ if folder_name in dirnames:
+ return os.path.abspath(dirpath)
+ return None
+
+
+
+def download_from_url(url):
+ parent_path = find_folder_parent(".", "pretrained_v2")
+ zips_path = os.path.join(parent_path, 'zips')
+
+ if url != '':
+ print(i18n("Downloading the file: ") + f"{url}")
+ if "drive.google.com" in url:
+ if "file/d/" in url:
+ file_id = url.split("file/d/")[1].split("/")[0]
+ elif "id=" in url:
+ file_id = url.split("id=")[1].split("&")[0]
+ else:
+ return None
+
+ if file_id:
+ os.chdir('./zips')
+ result = subprocess.run(["gdown", f"https://drive.google.com/uc?id={file_id}", "--fuzzy"], capture_output=True, text=True, encoding='utf-8')
+ if "Too many users have viewed or downloaded this file recently" in str(result.stderr):
+ return "too much use"
+ if "Cannot retrieve the public link of the file." in str(result.stderr):
+ return "private link"
+ print(result.stderr)
+
+ elif "/blob/" in url:
+ os.chdir('./zips')
+ url = url.replace("blob", "resolve")
+ wget.download(url)
+ elif "mega.nz" in url:
+ if "#!" in url:
+ file_id = url.split("#!")[1].split("!")[0]
+ elif "file/" in url:
+ file_id = url.split("file/")[1].split("/")[0]
+ else:
+ return None
+ if file_id:
+ m = Mega()
+ m.download_url(url, zips_path)
+ elif "/tree/main" in url:
+ response = requests.get(url)
+ soup = BeautifulSoup(response.content, 'html.parser')
+ temp_url = ''
+ for link in soup.find_all('a', href=True):
+ if link['href'].endswith('.zip'):
+ temp_url = link['href']
+ break
+ if temp_url:
+ url = temp_url
+ url = url.replace("blob", "resolve")
+ if "huggingface.co" not in url:
+ url = "https://huggingface.co" + url
+
+ wget.download(url)
+ else:
+ print("No .zip file found on the page.")
+ elif "cdn.discordapp.com" in url:
+ file = requests.get(url)
+ if file.status_code == 200:
+ name = url.split('/')
+ with open(os.path.join(zips_path, name[len(name)-1]), "wb") as newfile:
+ newfile.write(file.content)
+ else:
+ return None
+ elif "pixeldrain.com" in url:
+ try:
+ file_id = url.split("pixeldrain.com/")[1]
+ os.chdir('./zips')
+ response = requests.get(f"https://pixeldrain.com/api/file/{file_id}")
+ if response.status_code == 200:
+ file_data = response.json()
+ download_url = file_data.get("item").get("file")
+ file_name = file_data.get("name")
+ with open(file_name, "wb") as newfile:
+ file_response = requests.get(download_url)
+ if file_response.status_code == 200:
+ newfile.write(file_response.content)
+ os.chdir(parent_path)
+ return "downloaded"
+ else:
+ os.chdir(parent_path)
+ return None
+ else:
+ os.chdir(parent_path)
+ return None
+ except Exception as e:
+ os.chdir(parent_path)
+ return None
+ else:
+ os.chdir('./zips')
+ wget.download(url)
+
+ os.chdir(parent_path)
+ print(i18n("Full download"))
+ return "downloaded"
+ else:
+ return None
+
+class error_message(Exception):
+ def __init__(self, mensaje):
+ self.mensaje = mensaje
+ super().__init__(mensaje)
+
+def get_vc(sid, to_return_protect0, to_return_protect1):
+ global n_spk, tgt_sr, net_g, vc, cpt, version
+ if sid == "" or sid == []:
+ global hubert_model
+ if hubert_model is not None:
+ print("clean_empty_cache")
+ del net_g, n_spk, vc, hubert_model, tgt_sr
+ hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ if_f0 = cpt.get("f0", 1)
+ version = cpt.get("version", "v1")
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif version == "v2":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs768NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del net_g, cpt
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ cpt = None
+ return (
+ {"visible": False, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ )
+ person = "%s/%s" % (weight_root, sid)
+ print("loading %s" % person)
+ cpt = torch.load(person, map_location="cpu")
+ tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
+ if_f0 = cpt.get("f0", 1)
+ if if_f0 == 0:
+ to_return_protect0 = to_return_protect1 = {
+ "visible": False,
+ "value": 0.5,
+ "__type__": "update",
+ }
+ else:
+ to_return_protect0 = {
+ "visible": True,
+ "value": to_return_protect0,
+ "__type__": "update",
+ }
+ to_return_protect1 = {
+ "visible": True,
+ "value": to_return_protect1,
+ "__type__": "update",
+ }
+ version = cpt.get("version", "v1")
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif version == "v2":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del net_g.enc_q
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
+ net_g.eval().to(config.device)
+ if config.is_half:
+ net_g = net_g.half()
+ else:
+ net_g = net_g.float()
+ vc = VC(tgt_sr, config)
+ n_spk = cpt["config"][-3]
+ return (
+ {"visible": True, "maximum": n_spk, "__type__": "update"},
+ to_return_protect0,
+ to_return_protect1,
+ )
+
+def load_downloaded_model(url):
+ parent_path = find_folder_parent(".", "pretrained_v2")
+ try:
+ infos = []
+ logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
+ zips_path = os.path.join(parent_path, 'zips')
+ unzips_path = os.path.join(parent_path, 'unzips')
+ weights_path = os.path.join(parent_path, 'weights')
+ logs_dir = ""
+
+ if os.path.exists(zips_path):
+ shutil.rmtree(zips_path)
+ if os.path.exists(unzips_path):
+ shutil.rmtree(unzips_path)
+
+ os.mkdir(zips_path)
+ os.mkdir(unzips_path)
+
+ download_file = download_from_url(url)
+ if not download_file:
+ print(i18n("The file could not be downloaded."))
+ infos.append(i18n("The file could not be downloaded."))
+ yield "\n".join(infos)
+ elif download_file == "downloaded":
+ print(i18n("It has been downloaded successfully."))
+ infos.append(i18n("It has been downloaded successfully."))
+ yield "\n".join(infos)
+ elif download_file == "too much use":
+ raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
+ elif download_file == "private link":
+ raise Exception(i18n("Cannot get file from this private link"))
+
+ for filename in os.listdir(zips_path):
+ if filename.endswith(".zip"):
+ zipfile_path = os.path.join(zips_path,filename)
+ print(i18n("Proceeding with the extraction..."))
+ infos.append(i18n("Proceeding with the extraction..."))
+ shutil.unpack_archive(zipfile_path, unzips_path, 'zip')
+ model_name = os.path.basename(zipfile_path)
+ logs_dir = os.path.join(parent_path,'logs', os.path.normpath(str(model_name).replace(".zip","")))
+ yield "\n".join(infos)
+ else:
+ print(i18n("Unzip error."))
+ infos.append(i18n("Unzip error."))
+ yield "\n".join(infos)
+
+ index_file = False
+ model_file = False
+ D_file = False
+ G_file = False
+
+ for path, subdirs, files in os.walk(unzips_path):
+ for item in files:
+ item_path = os.path.join(path, item)
+ if not 'G_' in item and not 'D_' in item and item.endswith('.pth'):
+ model_file = True
+ model_name = item.replace(".pth","")
+ logs_dir = os.path.join(parent_path,'logs', model_name)
+ if os.path.exists(logs_dir):
+ shutil.rmtree(logs_dir)
+ os.mkdir(logs_dir)
+ if not os.path.exists(weights_path):
+ os.mkdir(weights_path)
+ if os.path.exists(os.path.join(weights_path, item)):
+ os.remove(os.path.join(weights_path, item))
+ if os.path.exists(item_path):
+ shutil.move(item_path, weights_path)
+
+ if not model_file and not os.path.exists(logs_dir):
+ os.mkdir(logs_dir)
+ for path, subdirs, files in os.walk(unzips_path):
+ for item in files:
+ item_path = os.path.join(path, item)
+ if item.startswith('added_') and item.endswith('.index'):
+ index_file = True
+ if os.path.exists(item_path):
+ if os.path.exists(os.path.join(logs_dir, item)):
+ os.remove(os.path.join(logs_dir, item))
+ shutil.move(item_path, logs_dir)
+ if item.startswith('total_fea.npy') or item.startswith('events.'):
+ if os.path.exists(item_path):
+ if os.path.exists(os.path.join(logs_dir, item)):
+ os.remove(os.path.join(logs_dir, item))
+ shutil.move(item_path, logs_dir)
+
+
+ result = ""
+ if model_file:
+ if index_file:
+ print(i18n("The model works for inference, and has the .index file."))
+ infos.append("\n" + i18n("The model works for inference, and has the .index file."))
+ yield "\n".join(infos)
+ else:
+ print(i18n("The model works for inference, but it doesn't have the .index file."))
+ infos.append("\n" + i18n("The model works for inference, but it doesn't have the .index file."))
+ yield "\n".join(infos)
+
+ if not index_file and not model_file:
+ print(i18n("No relevant file was found to upload."))
+ infos.append(i18n("No relevant file was found to upload."))
+ yield "\n".join(infos)
+
+ if os.path.exists(zips_path):
+ shutil.rmtree(zips_path)
+ if os.path.exists(unzips_path):
+ shutil.rmtree(unzips_path)
+ os.chdir(parent_path)
+ return result
+ except Exception as e:
+ os.chdir(parent_path)
+ if "too much use" in str(e):
+ print(i18n("Too many users have recently viewed or downloaded this file"))
+ yield i18n("Too many users have recently viewed or downloaded this file")
+ elif "private link" in str(e):
+ print(i18n("Cannot get file from this private link"))
+ yield i18n("Cannot get file from this private link")
+ else:
+ print(e)
+ yield i18n("An error occurred downloading")
+ finally:
+ os.chdir(parent_path)
+
+def load_dowloaded_dataset(url):
+ parent_path = find_folder_parent(".", "pretrained_v2")
+ infos = []
+ try:
+ zips_path = os.path.join(parent_path, 'zips')
+ unzips_path = os.path.join(parent_path, 'unzips')
+ datasets_path = os.path.join(parent_path, 'datasets')
+ audio_extenions =["flac","wav"]
+
+ if os.path.exists(zips_path):
+ shutil.rmtree(zips_path)
+ if os.path.exists(unzips_path):
+ shutil.rmtree(unzips_path)
+
+ if not os.path.exists(datasets_path):
+ os.mkdir(datasets_path)
+
+ os.mkdir(zips_path)
+ os.mkdir(unzips_path)
+
+ download_file = download_from_url(url)
+
+ if not download_file:
+ print(i18n("An error occurred downloading"))
+ infos.append(i18n("An error occurred downloading"))
+ yield "\n".join(infos)
+ raise Exception(i18n("An error occurred downloading"))
+ elif download_file == "downloaded":
+ print(i18n("It has been downloaded successfully."))
+ infos.append(i18n("It has been downloaded successfully."))
+ yield "\n".join(infos)
+ elif download_file == "too much use":
+ raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
+ elif download_file == "private link":
+ raise Exception(i18n("Cannot get file from this private link"))
+
+ zip_path = os.listdir(zips_path)
+ foldername = ""
+ for file in zip_path:
+ if file.endswith('.zip'):
+ file_path = os.path.join(zips_path, file)
+ print("....")
+ foldername = file.replace(".zip","").replace(" ","").replace("-","_")
+ dataset_path = os.path.join(datasets_path, foldername)
+ print(i18n("Proceeding with the extraction..."))
+ infos.append(i18n("Proceeding with the extraction..."))
+ yield "\n".join(infos)
+ shutil.unpack_archive(file_path, unzips_path, 'zip')
+ if os.path.exists(dataset_path):
+ shutil.rmtree(dataset_path)
+
+ os.mkdir(dataset_path)
+
+ for root, subfolders, songs in os.walk(unzips_path):
+ for song in songs:
+ song_path = os.path.join(root, song)
+ if song.endswith(tuple(audio_extenions)):
+ shutil.move(song_path, dataset_path)
+ else:
+ print(i18n("Unzip error."))
+ infos.append(i18n("Unzip error."))
+ yield "\n".join(infos)
+
+
+
+ if os.path.exists(zips_path):
+ shutil.rmtree(zips_path)
+ if os.path.exists(unzips_path):
+ shutil.rmtree(unzips_path)
+
+ print(i18n("The Dataset has been loaded successfully"))
+ infos.append(i18n("The Dataset has been loaded successfully"))
+ yield "\n".join(infos)
+ except Exception as e:
+ os.chdir(parent_path)
+ if "too much use" in str(e):
+ print(i18n("Too many users have recently viewed or downloaded this file"))
+ yield i18n("Too many users have recently viewed or downloaded this file")
+ elif "private link" in str(e):
+ print(i18n("Cannot get file from this private link"))
+ yield i18n("Cannot get file from this private link")
+ else:
+ print(e)
+ yield i18n("An error occurred downloading")
+ finally:
+ os.chdir(parent_path)
+
+def save_model(modelname, save_action):
+
+ parent_path = find_folder_parent(".", "pretrained_v2")
+ zips_path = os.path.join(parent_path, 'zips')
+ dst = os.path.join(zips_path,modelname)
+ logs_path = os.path.join(parent_path, 'logs', modelname)
+ weights_path = os.path.join(parent_path, 'weights', f"{modelname}.pth")
+ save_folder = parent_path
+ infos = []
+
+ try:
+ if not os.path.exists(logs_path):
+ raise Exception("No model found.")
+
+ if not 'content' in parent_path:
+ save_folder = os.path.join(parent_path, 'RVC_Backup')
+ else:
+ save_folder = '/content/drive/MyDrive/RVC_Backup'
+
+ infos.append(i18n("Save model"))
+ yield "\n".join(infos)
+
+ if not os.path.exists(save_folder):
+ os.mkdir(save_folder)
+ if not os.path.exists(os.path.join(save_folder, 'ManualTrainingBackup')):
+ os.mkdir(os.path.join(save_folder, 'ManualTrainingBackup'))
+ if not os.path.exists(os.path.join(save_folder, 'Finished')):
+ os.mkdir(os.path.join(save_folder, 'Finished'))
+
+ if os.path.exists(zips_path):
+ shutil.rmtree(zips_path)
+
+ os.mkdir(zips_path)
+ added_file = glob.glob(os.path.join(logs_path, "added_*.index"))
+ d_file = glob.glob(os.path.join(logs_path, "D_*.pth"))
+ g_file = glob.glob(os.path.join(logs_path, "G_*.pth"))
+
+ if save_action == i18n("Choose the method"):
+ raise Exception("No method choosen.")
+
+ if save_action == i18n("Save all"):
+ print(i18n("Save all"))
+ save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
+ shutil.copytree(logs_path, dst)
+ else:
+ if not os.path.exists(dst):
+ os.mkdir(dst)
+
+ if save_action == i18n("Save D and G"):
+ print(i18n("Save D and G"))
+ save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
+ if len(d_file) > 0:
+ shutil.copy(d_file[0], dst)
+ if len(g_file) > 0:
+ shutil.copy(g_file[0], dst)
+
+ if len(added_file) > 0:
+ shutil.copy(added_file[0], dst)
+ else:
+ infos.append(i18n("Saved without index..."))
+
+ if save_action == i18n("Save voice"):
+ print(i18n("Save voice"))
+ save_folder = os.path.join(save_folder, 'Finished')
+ if len(added_file) > 0:
+ shutil.copy(added_file[0], dst)
+ else:
+ infos.append(i18n("Saved without index..."))
+
+ yield "\n".join(infos)
+ if not os.path.exists(weights_path):
+ infos.append(i18n("Saved without inference model..."))
+ else:
+ shutil.copy(weights_path, dst)
+
+ yield "\n".join(infos)
+ infos.append("\n" + i18n("This may take a few minutes, please wait..."))
+ yield "\n".join(infos)
+
+ shutil.make_archive(os.path.join(zips_path,f"{modelname}"), 'zip', zips_path)
+ shutil.move(os.path.join(zips_path,f"{modelname}.zip"), os.path.join(save_folder, f'{modelname}.zip'))
+
+ shutil.rmtree(zips_path)
+ infos.append("\n" + i18n("Model saved successfully"))
+ yield "\n".join(infos)
+
+ except Exception as e:
+ print(e)
+ if "No model found." in str(e):
+ infos.append(i18n("The model you want to save does not exist, be sure to enter the correct name."))
+ else:
+ infos.append(i18n("An error occurred saving the model"))
+
+ yield "\n".join(infos)
+
+def load_downloaded_backup(url):
+ parent_path = find_folder_parent(".", "pretrained_v2")
+ try:
+ infos = []
+ logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
+ zips_path = os.path.join(parent_path, 'zips')
+ unzips_path = os.path.join(parent_path, 'unzips')
+ weights_path = os.path.join(parent_path, 'weights')
+ logs_dir = os.path.join(parent_path, 'logs')
+
+ if os.path.exists(zips_path):
+ shutil.rmtree(zips_path)
+ if os.path.exists(unzips_path):
+ shutil.rmtree(unzips_path)
+
+ os.mkdir(zips_path)
+ os.mkdir(unzips_path)
+
+ download_file = download_from_url(url)
+ if not download_file:
+ print(i18n("The file could not be downloaded."))
+ infos.append(i18n("The file could not be downloaded."))
+ yield "\n".join(infos)
+ elif download_file == "downloaded":
+ print(i18n("It has been downloaded successfully."))
+ infos.append(i18n("It has been downloaded successfully."))
+ yield "\n".join(infos)
+ elif download_file == "too much use":
+ raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
+ elif download_file == "private link":
+ raise Exception(i18n("Cannot get file from this private link"))
+
+ for filename in os.listdir(zips_path):
+ if filename.endswith(".zip"):
+ zipfile_path = os.path.join(zips_path,filename)
+ zip_dir_name = os.path.splitext(filename)[0]
+ unzip_dir = unzips_path
+ print(i18n("Proceeding with the extraction..."))
+ infos.append(i18n("Proceeding with the extraction..."))
+ shutil.unpack_archive(zipfile_path, unzip_dir, 'zip')
+
+ if os.path.exists(os.path.join(unzip_dir, zip_dir_name)):
+ shutil.move(os.path.join(unzip_dir, zip_dir_name), logs_dir)
+ else:
+ new_folder_path = os.path.join(logs_dir, zip_dir_name)
+ os.mkdir(new_folder_path)
+ for item_name in os.listdir(unzip_dir):
+ item_path = os.path.join(unzip_dir, item_name)
+ if os.path.isfile(item_path):
+ shutil.move(item_path, new_folder_path)
+ elif os.path.isdir(item_path):
+ shutil.move(item_path, new_folder_path)
+
+ yield "\n".join(infos)
+ else:
+ print(i18n("Unzip error."))
+ infos.append(i18n("Unzip error."))
+ yield "\n".join(infos)
+
+ result = ""
+
+ for filename in os.listdir(unzips_path):
+ if filename.endswith(".zip"):
+ silentremove(filename)
+
+ if os.path.exists(zips_path):
+ shutil.rmtree(zips_path)
+ if os.path.exists(os.path.join(parent_path, 'unzips')):
+ shutil.rmtree(os.path.join(parent_path, 'unzips'))
+ print(i18n("The Backup has been uploaded successfully."))
+ infos.append("\n" + i18n("The Backup has been uploaded successfully."))
+ yield "\n".join(infos)
+ os.chdir(parent_path)
+ return result
+ except Exception as e:
+ os.chdir(parent_path)
+ if "too much use" in str(e):
+ print(i18n("Too many users have recently viewed or downloaded this file"))
+ yield i18n("Too many users have recently viewed or downloaded this file")
+ elif "private link" in str(e):
+ print(i18n("Cannot get file from this private link"))
+ yield i18n("Cannot get file from this private link")
+ else:
+ print(e)
+ yield i18n("An error occurred downloading")
+ finally:
+ os.chdir(parent_path)
+
+def save_to_wav(record_button):
+ if record_button is None:
+ pass
+ else:
+ path_to_file=record_button
+ new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
+ new_path='./audios/'+new_name
+ shutil.move(path_to_file,new_path)
+ return new_name
+
+
+def change_choices2():
+ audio_paths=[]
+ for filename in os.listdir("./audios"):
+ if filename.endswith(('wav', 'mp3', 'flac', 'ogg', 'opus',
+ 'm4a', 'mp4', 'aac', 'alac', 'wma',
+ 'aiff', 'webm', 'ac3')):
+ audio_paths.append(os.path.join('./audios',filename).replace('\\', '/'))
+ return {"choices": sorted(audio_paths), "__type__": "update"}, {"__type__": "update"}
+
+
+
+
+
+def uvr(input_url, output_path, model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
+ carpeta_a_eliminar = "yt_downloads"
+ if os.path.exists(carpeta_a_eliminar) and os.path.isdir(carpeta_a_eliminar):
+ for archivo in os.listdir(carpeta_a_eliminar):
+ ruta_archivo = os.path.join(carpeta_a_eliminar, archivo)
+ if os.path.isfile(ruta_archivo):
+ os.remove(ruta_archivo)
+ elif os.path.isdir(ruta_archivo):
+ shutil.rmtree(ruta_archivo)
+
+ def format_title(title):
+ formatted_title = re.sub(r'[^\w\s-]', '', title)
+ formatted_title = formatted_title.replace(" ", "_")
+ return formatted_title
+
+ ydl_opts = {
+ 'no-windows-filenames': True,
+ 'restrict-filenames': True,
+ 'extract_audio': True,
+ 'format': 'bestaudio',
+ 'quiet': True,
+ 'no-warnings': True,
+ }
+
+ try:
+ print(i18n("Downloading audio from the video..."))
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
+ info_dict = ydl.extract_info(input_url, download=False)
+ formatted_title = format_title(info_dict.get('title', 'default_title'))
+ formatted_outtmpl = output_path + '/' + formatted_title + '.wav'
+ ydl_opts['outtmpl'] = formatted_outtmpl
+ ydl = yt_dlp.YoutubeDL(ydl_opts)
+ ydl.download([input_url])
+ print(i18n("Audio downloaded!"))
+ except Exception as error:
+ print(i18n("An error occurred:"), error)
+
+ actual_directory = os.path.dirname(__file__)
+ instrumental_source_directory = os.path.join(actual_directory, "wav")
+ instrumental_directory = os.path.join(actual_directory, "audio-others")
+ instrumental_formatted = f"instrument_{formatted_title}.wav.reformatted.wav_10.wav"
+ instrumental_audio_path = os.path.join(instrumental_directory, instrumental_formatted)
+ old_instrumental_audio_path = os.path.join(instrumental_source_directory, instrumental_formatted)
+ format0 = "wav"
+
+ infos = []
+ pre_fun = None
+ try:
+ print(i18n("Separating audio..."))
+ inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") if isinstance(x, str) else x for x in [inp_root, save_root_vocal, save_root_ins]]
+ if model_name == "onnx_dereverb_By_FoxJoy":
+ pre_fun = MDXNetDereverb(15)
+ else:
+ func = _audio_pre_ if "DeEcho" not in model_name else _audio_pre_new
+ pre_fun = func(
+ agg=10,
+ model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
+ device=config.device,
+ is_half=config.is_half,
+ )
+ if inp_root != "":
+ paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
+ else:
+ paths = [path.name for path in paths]
+ for path in paths:
+ inp_path = os.path.join(inp_root, path)
+ need_reformat = 1
+ done = 0
+ try:
+ info = ffmpeg.probe(inp_path, cmd="ffprobe")
+ if (
+ info["streams"][0]["channels"] == 2
+ and info["streams"][0]["sample_rate"] == "44100"
+ ):
+ need_reformat = 0
+ pre_fun._path_audio_(
+ inp_path, save_root_ins, save_root_vocal, format0
+ )
+ done = 1
+ except:
+ need_reformat = 1
+ traceback.print_exc()
+ if need_reformat == 1:
+ tmp_path = "%s/%s.reformatted.wav" % (tmp, os.path.basename(inp_path))
+ os.system(
+ "ffmpeg -loglevel fatal -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
+ % (inp_path, tmp_path)
+ )
+ inp_path = tmp_path
+ try:
+ if done == 0:
+ pre_fun._path_audio_(
+ inp_path, save_root_ins, save_root_vocal, format0
+ )
+ infos.append("%s->Success" % (os.path.basename(inp_path)))
+ yield "\n".join(infos)
+ except:
+ infos.append(
+ "%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
+ )
+ yield "\n".join(infos)
+ except:
+ infos.append(traceback.format_exc())
+ yield "\n".join(infos)
+ finally:
+ try:
+ if pre_fun is not None:
+ if model_name == "onnx_dereverb_By_FoxJoy":
+ del pre_fun.pred.model
+ del pre_fun.pred.model_
+ else:
+ del pre_fun.model
+ del pre_fun
+ except:
+ traceback.print_exc()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ yield "\n".join(infos)
+
+ try:
+ if os.path.exists(old_instrumental_audio_path):
+ if not os.path.exists(instrumental_directory):
+ os.makedirs(instrumental_directory)
+
+ shutil.move(old_instrumental_audio_path, instrumental_audio_path)
+ print(i18n("File moved successfully."))
+ print(i18n("Finished!"))
+ else:
+ print(i18n("The source file does not exist."))
+ except Exception as e:
+ print(i18n("Error moving the file:", e))
+
+sup_audioext = {'wav', 'mp3', 'flac', 'ogg', 'opus',
+ 'm4a', 'mp4', 'aac', 'alac', 'wma',
+ 'aiff', 'webm', 'ac3'}
+
+def load_downloaded_audio(url):
+ parent_path = find_folder_parent(".", "pretrained_v2")
+ try:
+ infos = []
+ audios_path = os.path.join(parent_path, 'audios')
+
+ if not os.path.exists(audios_path):
+ os.mkdir(audios_path)
+
+ download_file = download_from_url(url)
+ if not download_file:
+ print(i18n("The file could not be downloaded."))
+ infos.append(i18n("The file could not be downloaded."))
+ yield "\n".join(infos)
+ elif download_file == "downloaded":
+ print(i18n("It has been downloaded successfully."))
+ infos.append(i18n("It has been downloaded successfully."))
+ yield "\n".join(infos)
+ elif download_file == "too much use":
+ raise Exception(i18n("Too many users have recently viewed or downloaded this file"))
+ elif download_file == "private link":
+ raise Exception(i18n("Cannot get file from this private link"))
+
+ for filename in os.listdir(audios_path):
+ item_path = os.path.join(audios_path, filename)
+ if item_path.split('.')[-1] in sup_audioext:
+ if os.path.exists(item_path):
+ shutil.move(item_path, audios_path)
+
+ result = ""
+ print(i18n("Audio files have been moved to the 'audios' folder."))
+ infos.append(i18n("Audio files have been moved to the 'audios' folder."))
+ yield "\n".join(infos)
+
+ os.chdir(parent_path)
+ return result
+ except Exception as e:
+ os.chdir(parent_path)
+ if "too much use" in str(e):
+ print(i18n("Too many users have recently viewed or downloaded this file"))
+ yield i18n("Too many users have recently viewed or downloaded this file")
+ elif "private link" in str(e):
+ print(i18n("Cannot get file from this private link"))
+ yield i18n("Cannot get file from this private link")
+ else:
+ print(e)
+ yield i18n("An error occurred downloading")
+ finally:
+ os.chdir(parent_path)
+
+
+class error_message(Exception):
+ def __init__(self, mensaje):
+ self.mensaje = mensaje
+ super().__init__(mensaje)
+
+def get_vc(sid, to_return_protect0, to_return_protect1):
+ global n_spk, tgt_sr, net_g, vc, cpt, version
+ if sid == "" or sid == []:
+ global hubert_model
+ if hubert_model is not None:
+ print("clean_empty_cache")
+ del net_g, n_spk, vc, hubert_model, tgt_sr
+ hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ if_f0 = cpt.get("f0", 1)
+ version = cpt.get("version", "v1")
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif version == "v2":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs768NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del net_g, cpt
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ cpt = None
+ return (
+ {"visible": False, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ )
+ person = "%s/%s" % (weight_root, sid)
+ print("loading %s" % person)
+ cpt = torch.load(person, map_location="cpu")
+ tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
+ if_f0 = cpt.get("f0", 1)
+ if if_f0 == 0:
+ to_return_protect0 = to_return_protect1 = {
+ "visible": False,
+ "value": 0.5,
+ "__type__": "update",
+ }
+ else:
+ to_return_protect0 = {
+ "visible": True,
+ "value": to_return_protect0,
+ "__type__": "update",
+ }
+ to_return_protect1 = {
+ "visible": True,
+ "value": to_return_protect1,
+ "__type__": "update",
+ }
+ version = cpt.get("version", "v1")
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif version == "v2":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del net_g.enc_q
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
+ net_g.eval().to(config.device)
+ if config.is_half:
+ net_g = net_g.half()
+ else:
+ net_g = net_g.float()
+ vc = VC(tgt_sr, config)
+ n_spk = cpt["config"][-3]
+ return (
+ {"visible": True, "maximum": n_spk, "__type__": "update"},
+ to_return_protect0,
+ to_return_protect1,
+ )
+
+
+def download_model():
+ gr.Markdown(value="# " + i18n("Download Model"))
+ gr.Markdown(value=i18n("It is used to download your inference models."))
+ with gr.Row():
+ model_url=gr.Textbox(label=i18n("Url:"))
+ with gr.Row():
+ download_model_status_bar=gr.Textbox(label=i18n("Status"))
+ with gr.Row():
+ download_button=gr.Button(i18n("Download"))
+ download_button.click(fn=load_downloaded_model, inputs=[model_url], outputs=[download_model_status_bar])
+
+def download_backup():
+ gr.Markdown(value="# " + i18n("Download Backup"))
+ gr.Markdown(value=i18n("It is used to download your training backups."))
+ with gr.Row():
+ model_url=gr.Textbox(label=i18n("Url:"))
+ with gr.Row():
+ download_model_status_bar=gr.Textbox(label=i18n("Status"))
+ with gr.Row():
+ download_button=gr.Button(i18n("Download"))
+ download_button.click(fn=load_downloaded_backup, inputs=[model_url], outputs=[download_model_status_bar])
+
+def update_dataset_list(name):
+ new_datasets = []
+ for foldername in os.listdir("./datasets"):
+ if "." not in foldername:
+ new_datasets.append(os.path.join(find_folder_parent(".","pretrained"),"datasets",foldername))
+ return gr.Dropdown.update(choices=new_datasets)
+
+def download_dataset(trainset_dir4):
+ gr.Markdown(value="# " + i18n("Download Dataset"))
+ gr.Markdown(value=i18n("Download the dataset with the audios in a compatible format (.wav/.flac) to train your model."))
+ with gr.Row():
+ dataset_url=gr.Textbox(label=i18n("Url:"))
+ with gr.Row():
+ load_dataset_status_bar=gr.Textbox(label=i18n("Status"))
+ with gr.Row():
+ load_dataset_button=gr.Button(i18n("Download"))
+ load_dataset_button.click(fn=load_dowloaded_dataset, inputs=[dataset_url], outputs=[load_dataset_status_bar])
+ load_dataset_status_bar.change(update_dataset_list, dataset_url, trainset_dir4)
+
+def download_audio():
+ gr.Markdown(value="# " + i18n("Download Audio"))
+ gr.Markdown(value=i18n("Download audios of any format for use in inference (recommended for mobile users)"))
+ with gr.Row():
+ audio_url=gr.Textbox(label=i18n("Url:"))
+ with gr.Row():
+ download_audio_status_bar=gr.Textbox(label=i18n("Status"))
+ with gr.Row():
+ download_button2=gr.Button(i18n("Download"))
+ download_button2.click(fn=load_downloaded_audio, inputs=[audio_url], outputs=[download_audio_status_bar])
+
+def youtube_separator():
+ gr.Markdown(value="# " + i18n("Separate YouTube tracks"))
+ gr.Markdown(value=i18n("Download audio from a YouTube video and automatically separate the vocal and instrumental tracks"))
+ with gr.Row():
+ input_url = gr.inputs.Textbox(label=i18n("Enter the youtube link"))
+ output_path = gr.Textbox(
+ label=i18n("Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):"),
+ value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/yt_downloads",
+ visible=False,
+ )
+ save_root_ins = gr.Textbox(
+ label=i18n("Enter the path of the audio folder to be processed:"),
+ value=((os.getcwd()).replace('\\', '/') + "/yt_downloads"),
+ visible=False,
+ )
+ model_choose = gr.Textbox(
+ value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/uvr5_weights/HP5_only_main_vocal",
+ visible=False,
+ )
+ save_root_vocal = gr.Textbox(
+ label=i18n("Specify the output folder for vocals:"), value="audios",
+ visible=False,
+ )
+ opt_ins_root = gr.Textbox(
+ label=i18n("Specify the output folder for accompaniment:"), value="opt",
+ visible=False,
+ )
+ format0 = gr.Radio(
+ label=i18n("Export file format"),
+ choices=["wav", "flac", "mp3", "m4a"],
+ value="wav",
+ interactive=True,
+ visible=False,
+ )
+ with gr.Row():
+ vc_output4 = gr.Textbox(label=i18n("Status"))
+ with gr.Row():
+ but2 = gr.Button(i18n("Download and Separate"))
+ but2.click(
+ uvr,
+ [
+ input_url,
+ output_path,
+ model_choose,
+ save_root_ins,
+ save_root_vocal,
+ opt_ins_root,
+ format0,
+ ],
+ [vc_output4],
+ )
diff --git a/environment_dml.yaml b/environment_dml.yaml
new file mode 100644
index 000000000..0fb3f2225
--- /dev/null
+++ b/environment_dml.yaml
@@ -0,0 +1,186 @@
+name: pydml
+channels:
+ - pytorch
+ - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
+ - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/
+ - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
+ - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/
+ - defaults
+ - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/fastai/
+ - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/
+ - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/bioconda/
+dependencies:
+ - abseil-cpp=20211102.0=hd77b12b_0
+ - absl-py=1.3.0=py310haa95532_0
+ - aiohttp=3.8.3=py310h2bbff1b_0
+ - aiosignal=1.2.0=pyhd3eb1b0_0
+ - async-timeout=4.0.2=py310haa95532_0
+ - attrs=22.1.0=py310haa95532_0
+ - blas=1.0=mkl
+ - blinker=1.4=py310haa95532_0
+ - bottleneck=1.3.5=py310h9128911_0
+ - brotli=1.0.9=h2bbff1b_7
+ - brotli-bin=1.0.9=h2bbff1b_7
+ - brotlipy=0.7.0=py310h2bbff1b_1002
+ - bzip2=1.0.8=he774522_0
+ - c-ares=1.19.0=h2bbff1b_0
+ - ca-certificates=2023.05.30=haa95532_0
+ - cachetools=4.2.2=pyhd3eb1b0_0
+ - certifi=2023.5.7=py310haa95532_0
+ - cffi=1.15.1=py310h2bbff1b_3
+ - charset-normalizer=2.0.4=pyhd3eb1b0_0
+ - click=8.0.4=py310haa95532_0
+ - colorama=0.4.6=py310haa95532_0
+ - contourpy=1.0.5=py310h59b6b97_0
+ - cryptography=39.0.1=py310h21b164f_0
+ - cycler=0.11.0=pyhd3eb1b0_0
+ - fonttools=4.25.0=pyhd3eb1b0_0
+ - freetype=2.12.1=ha860e81_0
+ - frozenlist=1.3.3=py310h2bbff1b_0
+ - giflib=5.2.1=h8cc25b3_3
+ - glib=2.69.1=h5dc1a3c_2
+ - google-auth=2.6.0=pyhd3eb1b0_0
+ - google-auth-oauthlib=0.4.4=pyhd3eb1b0_0
+ - grpc-cpp=1.48.2=hf108199_0
+ - grpcio=1.48.2=py310hf108199_0
+ - gst-plugins-base=1.18.5=h9e645db_0
+ - gstreamer=1.18.5=hd78058f_0
+ - icu=58.2=ha925a31_3
+ - idna=3.4=py310haa95532_0
+ - intel-openmp=2023.1.0=h59b6b97_46319
+ - jpeg=9e=h2bbff1b_1
+ - kiwisolver=1.4.4=py310hd77b12b_0
+ - krb5=1.19.4=h5b6d351_0
+ - lerc=3.0=hd77b12b_0
+ - libbrotlicommon=1.0.9=h2bbff1b_7
+ - libbrotlidec=1.0.9=h2bbff1b_7
+ - libbrotlienc=1.0.9=h2bbff1b_7
+ - libclang=14.0.6=default_hb5a9fac_1
+ - libclang13=14.0.6=default_h8e68704_1
+ - libdeflate=1.17=h2bbff1b_0
+ - libffi=3.4.4=hd77b12b_0
+ - libiconv=1.16=h2bbff1b_2
+ - libogg=1.3.5=h2bbff1b_1
+ - libpng=1.6.39=h8cc25b3_0
+ - libprotobuf=3.20.3=h23ce68f_0
+ - libtiff=4.5.0=h6c2663c_2
+ - libuv=1.44.2=h2bbff1b_0
+ - libvorbis=1.3.7=he774522_0
+ - libwebp=1.2.4=hbc33d0d_1
+ - libwebp-base=1.2.4=h2bbff1b_1
+ - libxml2=2.10.3=h0ad7f3c_0
+ - libxslt=1.1.37=h2bbff1b_0
+ - lz4-c=1.9.4=h2bbff1b_0
+ - markdown=3.4.1=py310haa95532_0
+ - markupsafe=2.1.1=py310h2bbff1b_0
+ - matplotlib=3.7.1=py310haa95532_1
+ - matplotlib-base=3.7.1=py310h4ed8f06_1
+ - mkl=2023.1.0=h8bd8f75_46356
+ - mkl-service=2.4.0=py310h2bbff1b_1
+ - mkl_fft=1.3.6=py310h4ed8f06_1
+ - mkl_random=1.2.2=py310h4ed8f06_1
+ - multidict=6.0.2=py310h2bbff1b_0
+ - munkres=1.1.4=py_0
+ - numexpr=2.8.4=py310h2cd9be0_1
+ - numpy=1.24.3=py310h055cbcc_1
+ - numpy-base=1.24.3=py310h65a83cf_1
+ - oauthlib=3.2.2=py310haa95532_0
+ - openssl=1.1.1t=h2bbff1b_0
+ - packaging=23.0=py310haa95532_0
+ - pandas=1.5.3=py310h4ed8f06_0
+ - pcre=8.45=hd77b12b_0
+ - pillow=9.4.0=py310hd77b12b_0
+ - pip=23.0.1=py310haa95532_0
+ - ply=3.11=py310haa95532_0
+ - protobuf=3.20.3=py310hd77b12b_0
+ - pyasn1=0.4.8=pyhd3eb1b0_0
+ - pyasn1-modules=0.2.8=py_0
+ - pycparser=2.21=pyhd3eb1b0_0
+ - pyjwt=2.4.0=py310haa95532_0
+ - pyopenssl=23.0.0=py310haa95532_0
+ - pyparsing=3.0.9=py310haa95532_0
+ - pyqt=5.15.7=py310hd77b12b_0
+ - pyqt5-sip=12.11.0=py310hd77b12b_0
+ - pysocks=1.7.1=py310haa95532_0
+ - python=3.10.11=h966fe2a_2
+ - python-dateutil=2.8.2=pyhd3eb1b0_0
+ - pytorch-mutex=1.0=cpu
+ - pytz=2022.7=py310haa95532_0
+ - pyyaml=6.0=py310h2bbff1b_1
+ - qt-main=5.15.2=he8e5bd7_8
+ - qt-webengine=5.15.9=hb9a9bb5_5
+ - qtwebkit=5.212=h2bbfb41_5
+ - re2=2022.04.01=hd77b12b_0
+ - requests=2.29.0=py310haa95532_0
+ - requests-oauthlib=1.3.0=py_0
+ - rsa=4.7.2=pyhd3eb1b0_1
+ - setuptools=67.8.0=py310haa95532_0
+ - sip=6.6.2=py310hd77b12b_0
+ - six=1.16.0=pyhd3eb1b0_1
+ - sqlite=3.41.2=h2bbff1b_0
+ - tbb=2021.8.0=h59b6b97_0
+ - tensorboard=2.10.0=py310haa95532_0
+ - tensorboard-data-server=0.6.1=py310haa95532_0
+ - tensorboard-plugin-wit=1.8.1=py310haa95532_0
+ - tk=8.6.12=h2bbff1b_0
+ - toml=0.10.2=pyhd3eb1b0_0
+ - tornado=6.2=py310h2bbff1b_0
+ - tqdm=4.65.0=py310h9909e9c_0
+ - typing_extensions=4.5.0=py310haa95532_0
+ - tzdata=2023c=h04d1e81_0
+ - urllib3=1.26.16=py310haa95532_0
+ - vc=14.2=h21ff451_1
+ - vs2015_runtime=14.27.29016=h5e58377_2
+ - werkzeug=2.2.3=py310haa95532_0
+ - wheel=0.38.4=py310haa95532_0
+ - win_inet_pton=1.1.0=py310haa95532_0
+ - xz=5.4.2=h8cc25b3_0
+ - yaml=0.2.5=he774522_0
+ - yarl=1.8.1=py310h2bbff1b_0
+ - zlib=1.2.13=h8cc25b3_0
+ - zstd=1.5.5=hd43e919_0
+ - pip:
+ - antlr4-python3-runtime==4.8
+ - appdirs==1.4.4
+ - audioread==3.0.0
+ - bitarray==2.7.4
+ - cython==0.29.35
+ - decorator==5.1.1
+ - fairseq==0.12.2
+ - faiss-cpu==1.7.4
+ - filelock==3.12.0
+ - hydra-core==1.0.7
+ - jinja2==3.1.2
+ - joblib==1.2.0
+ - lazy-loader==0.2
+ - librosa==0.10.0.post2
+ - llvmlite==0.40.0
+ - lxml==4.9.2
+ - mpmath==1.3.0
+ - msgpack==1.0.5
+ - networkx==3.1
+ - noisereduce==2.0.1
+ - numba==0.57.0
+ - omegaconf==2.0.6
+ - opencv-python==4.7.0.72
+ - pooch==1.6.0
+ - portalocker==2.7.0
+ - pysimplegui==4.60.5
+ - pywin32==306
+ - pyworld==0.3.3
+ - regex==2023.5.5
+ - sacrebleu==2.3.1
+ - scikit-learn==1.2.2
+ - scipy==1.10.1
+ - sounddevice==0.4.6
+ - soundfile==0.12.1
+ - soxr==0.3.5
+ - sympy==1.12
+ - tabulate==0.9.0
+ - threadpoolctl==3.1.0
+ - torch==2.0.0
+ - torch-directml==0.2.0.dev230426
+ - torchaudio==2.0.1
+ - torchvision==0.15.1
+ - wget==3.2
+prefix: D:\ProgramData\anaconda3_\envs\pydml
diff --git a/extract_f0_print.py b/extract_f0_print.py
new file mode 100644
index 000000000..620cfe2ae
--- /dev/null
+++ b/extract_f0_print.py
@@ -0,0 +1,293 @@
+import os
+import traceback
+import sys
+import parselmouth
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from LazyImport import lazyload
+from my_utils import load_audio
+import pyworld
+import numpy as np, logging
+torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess
+torch = lazyload("torch")
+#from torch import Tensor # Fork Feature. Used for pitch prediction for torch crepe.
+tqdm = lazyload("tqdm")
+
+logging.getLogger("numba").setLevel(logging.WARNING)
+
+import multiprocessing
+
+exp_dir = sys.argv[1]
+f = open(f"{exp_dir}/extract_f0_feature.log", "a+")
+
+DoFormant = False
+Quefrency = 1.0
+Timbre = 1.0
+
+def printt(strr):
+ print(strr)
+ f.write(f"{strr}\n")
+ f.flush()
+
+
+n_p = int(sys.argv[2])
+f0method = sys.argv[3]
+extraction_crepe_hop_length = 0
+try:
+ extraction_crepe_hop_length = int(sys.argv[4])
+except:
+ print("Temp Issue. echl is not being passed with argument!")
+ extraction_crepe_hop_length = 128
+
+# print("EXTRACTION CREPE HOP LENGTH: " + str(extraction_crepe_hop_length))
+# print("EXTRACTION CREPE HOP LENGTH TYPE: " + str(type(extraction_crepe_hop_length)))
+
+
+class FeatureInput(object):
+ def __init__(self, samplerate=16000, hop_size=160):
+ self.fs = samplerate
+ self.hop = hop_size
+
+ self.f0_method_dict = self.get_f0_method_dict()
+
+ self.f0_bin = 256
+ self.f0_max = 1100.0
+ self.f0_min = 50.0
+ self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
+ self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
+
+ # EXPERIMENTAL. PROBABLY BUGGY
+ def mncrepe(self, method, x, p_len, crepe_hop_length):
+ f0 = None
+ torch_device_index = 0
+ torch_device = torch.device(
+ f"cuda:{torch_device_index % torch.cuda.device_count()}"
+ ) if torch.cuda.is_available() \
+ else torch.device("mps") if torch.backends.mps.is_available() \
+ else torch.device("cpu")
+
+ audio = torch.from_numpy(x.astype(np.float32)).to(torch_device, copy=True)
+ audio /= torch.quantile(torch.abs(audio), 0.999)
+ audio = torch.unsqueeze(audio, dim=0)
+ if audio.ndim == 2 and audio.shape[0] > 1:
+ audio = torch.mean(audio, dim=0, keepdim=True).detach()
+ audio = audio.detach()
+
+ if method == 'mangio-crepe':
+ pitch: torch.Tensor = torchcrepe.predict(
+ audio,
+ self.fs,
+ crepe_hop_length,
+ self.f0_min,
+ self.f0_max,
+ "full",
+ batch_size=crepe_hop_length * 2,
+ device=torch_device,
+ pad=True,
+ )
+ p_len = p_len or x.shape[0] // crepe_hop_length
+ # Resize the pitch
+ source = np.array(pitch.squeeze(0).cpu().float().numpy())
+ source[source < 0.001] = np.nan
+ target = np.interp(
+ np.arange(0, len(source) * p_len, len(source)) / p_len,
+ np.arange(0, len(source)),
+ source,
+ )
+ f0 = np.nan_to_num(target)
+
+ elif method == 'crepe':
+ batch_size = 512
+ audio = torch.tensor(np.copy(x))[None].float()
+ f0, pd = torchcrepe.predict(
+ audio,
+ self.fs,
+ 160,
+ self.f0_min,
+ self.f0_max,
+ "full",
+ batch_size=batch_size,
+ device=torch_device,
+ return_periodicity=True,
+ )
+ pd = torchcrepe.filter.median(pd, 3)
+ f0 = torchcrepe.filter.mean(f0, 3)
+ f0[pd < 0.1] = 0
+ f0 = f0[0].cpu().numpy()
+ f0 = f0[1:] # Get rid of extra first frame
+
+ return f0
+
+ def get_pm(self, x, p_len):
+ f0 = parselmouth.Sound(x, self.fs).to_pitch_ac(
+ time_step=160 / 16000,
+ voicing_threshold=0.6,
+ pitch_floor=self.f0_min,
+ pitch_ceiling=self.f0_max,
+ ).selected_array["frequency"]
+
+ return np.pad(
+ f0,
+ [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]],
+ mode="constant"
+ )
+
+ def get_harvest(self, x):
+ f0_spectral = pyworld.harvest(
+ x.astype(np.double),
+ fs=self.fs,
+ f0_ceil=self.f0_max,
+ f0_floor=self.f0_min,
+ frame_period=1000 * self.hop / self.fs,
+ )
+ return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs)
+
+ def get_dio(self, x):
+ f0_spectral = pyworld.dio(
+ x.astype(np.double),
+ fs=self.fs,
+ f0_ceil=self.f0_max,
+ f0_floor=self.f0_min,
+ frame_period=1000 * self.hop / self.fs,
+ )
+ return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs)
+
+ def get_rmvpe(self, x):
+ if not hasattr(self, "model_rmvpe"):
+ from rmvpe import RMVPE
+ self.model_rmvpe = RMVPE("rmvpe.pt", is_half=False, device="cuda:0")
+
+ return self.model_rmvpe.infer_from_audio(x, thred=0.03)
+
+ def get_f0_method_dict(self):
+ return {
+ "pm": self.get_pm,
+ "harvest": self.get_harvest,
+ "dio": self.get_dio,
+ "rmvpe": self.get_rmvpe
+ }
+
+ def get_f0_hybrid_computation(
+ self,
+ methods_str,
+ x,
+ p_len,
+ crepe_hop_length,
+ ):
+ # Get various f0 methods from input to use in the computation stack
+ s = methods_str
+ s = s.split("hybrid")[1]
+ s = s.replace("[", "").replace("]", "")
+ methods = s.split("+")
+ f0_computation_stack = []
+
+ for method in methods:
+ if method in self.f0_method_dict:
+ f0 = self.f0_method_dict[method](x, p_len) if method == 'pm' else self.f0_method_dict[method](x)
+ f0_computation_stack.append(f0)
+ elif method == 'crepe' or method == 'mangio-crepe':
+ self.the_other_complex_function(x, method, crepe_hop_length)
+
+ if len(f0_computation_stack) != 0:
+ f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) if len(f0_computation_stack)>1 else f0_computation_stack[0]
+ return f0_median_hybrid
+ else:
+ raise ValueError("No valid methods were provided")
+
+ def compute_f0(self, path, f0_method, crepe_hop_length):
+ x = load_audio(path, self.fs, DoFormant, Quefrency, Timbre)
+ p_len = x.shape[0] // self.hop
+
+ if f0_method in self.f0_method_dict:
+ f0 = self.f0_method_dict[f0_method](x, p_len) if f0_method == 'pm' else self.f0_method_dict[f0_method](x)
+ elif f0_method in ['crepe', 'mangio-crepe']:
+ f0 = self.mncrepe(f0_method, x, p_len, crepe_hop_length)
+ elif "hybrid" in f0_method: # EXPERIMENTAL
+ # Perform hybrid median pitch estimation
+ f0 = self.get_f0_hybrid_computation(
+ f0_method,
+ x,
+ p_len,
+ crepe_hop_length,
+ )
+
+ return f0
+
+ def coarse_f0(self, f0):
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * (
+ self.f0_bin - 2
+ ) / (self.f0_mel_max - self.f0_mel_min) + 1
+
+ # use 0 or 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1
+ f0_coarse = np.rint(f0_mel).astype(int)
+ assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (
+ f0_coarse.max(),
+ f0_coarse.min(),
+ )
+ return f0_coarse
+
+ def go(self, paths, f0_method, crepe_hop_length, thread_n):
+ if not paths:
+ printt("no-f0-todo")
+ return
+
+ with tqdm.tqdm(total=len(paths), leave=True, position=thread_n) as pbar:
+ description = f"thread:{thread_n}, f0ing, Hop-Length:{crepe_hop_length}"
+ pbar.set_description(description)
+
+ for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths):
+ try:
+ if (
+ os.path.exists(opt_path1 + ".npy")
+ and os.path.exists(opt_path2 + ".npy")
+ ):
+ pbar.update(1)
+ continue
+
+ featur_pit = self.compute_f0(inp_path, f0_method, crepe_hop_length)
+ np.save(opt_path2, featur_pit, allow_pickle=False) # nsf
+
+ coarse_pit = self.coarse_f0(featur_pit)
+ np.save(opt_path1, coarse_pit, allow_pickle=False) # ori
+
+ pbar.update(1)
+ except Exception as e:
+ printt(f"f0fail-{idx}-{inp_path}-{traceback.format_exc()}")
+
+
+if __name__ == "__main__":
+ # exp_dir=r"E:\codes\py39\dataset\mi-test"
+ # n_p=16
+ # f = open("%s/log_extract_f0.log"%exp_dir, "w")
+ printt(sys.argv)
+ featureInput = FeatureInput()
+ paths = []
+ inp_root = "%s/1_16k_wavs" % (exp_dir)
+ opt_root1 = "%s/2a_f0" % (exp_dir)
+ opt_root2 = "%s/2b-f0nsf" % (exp_dir)
+
+ os.makedirs(opt_root1, exist_ok=True)
+ os.makedirs(opt_root2, exist_ok=True)
+ for name in sorted(list(os.listdir(inp_root))):
+ inp_path = "%s/%s" % (inp_root, name)
+ if "spec" in inp_path:
+ continue
+ opt_path1 = "%s/%s" % (opt_root1, name)
+ opt_path2 = "%s/%s" % (opt_root2, name)
+ paths.append([inp_path, opt_path1, opt_path2])
+
+ ps = []
+ print("Using f0 method: " + f0method)
+ for i in range(n_p):
+ p = multiprocessing.Process(
+ target=featureInput.go,
+ args=(paths[i::n_p], f0method, extraction_crepe_hop_length, i),
+ )
+ ps.append(p)
+ p.start()
+ for i in range(n_p):
+ ps[i].join()
diff --git a/extract_feature_print.py b/extract_feature_print.py
new file mode 100644
index 000000000..3aba191e8
--- /dev/null
+++ b/extract_feature_print.py
@@ -0,0 +1,123 @@
+import os, sys, traceback
+
+os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
+os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
+
+# device=sys.argv[1]
+n_part = int(sys.argv[2])
+i_part = int(sys.argv[3])
+if len(sys.argv) == 6:
+ exp_dir = sys.argv[4]
+ version = sys.argv[5]
+else:
+ i_gpu = sys.argv[4]
+ exp_dir = sys.argv[5]
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
+ version = sys.argv[6]
+import torch
+import torch.nn.functional as F
+import soundfile as sf
+import numpy as np
+from fairseq import checkpoint_utils
+
+device = "cpu"
+if torch.cuda.is_available():
+ device = "cuda"
+elif torch.backends.mps.is_available():
+ device = "mps"
+
+f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
+
+
+def printt(strr):
+ print(strr)
+ f.write("%s\n" % strr)
+ f.flush()
+
+
+printt(sys.argv)
+model_path = "hubert_base.pt"
+
+printt(exp_dir)
+wavPath = "%s/1_16k_wavs" % exp_dir
+outPath = (
+ "%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir
+)
+os.makedirs(outPath, exist_ok=True)
+
+
+# wave must be 16k, hop_size=320
+def readwave(wav_path, normalize=False):
+ wav, sr = sf.read(wav_path)
+ assert sr == 16000
+ feats = torch.from_numpy(wav).float()
+ if feats.dim() == 2: # double channels
+ feats = feats.mean(-1)
+ assert feats.dim() == 1, feats.dim()
+ if normalize:
+ with torch.no_grad():
+ feats = F.layer_norm(feats, feats.shape)
+ feats = feats.view(1, -1)
+ return feats
+
+
+# HuBERT model
+printt("load model(s) from {}".format(model_path))
+# if hubert model is exist
+if os.access(model_path, os.F_OK) == False:
+ printt(
+ "Error: Extracting is shut down because %s does not exist, you may download it from https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main"
+ % model_path
+ )
+ exit(0)
+models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+ [model_path],
+ suffix="",
+)
+model = models[0]
+model = model.to(device)
+printt("move model to %s" % device)
+if device not in ["mps", "cpu"]:
+ model = model.half()
+model.eval()
+
+todo = sorted(list(os.listdir(wavPath)))[i_part::n_part]
+n = max(1, len(todo) // 10) # 最多打印十条
+if len(todo) == 0:
+ printt("no-feature-todo")
+else:
+ printt("all-feature-%s" % len(todo))
+ for idx, file in enumerate(todo):
+ try:
+ if file.endswith(".wav"):
+ wav_path = "%s/%s" % (wavPath, file)
+ out_path = "%s/%s" % (outPath, file.replace("wav", "npy"))
+
+ if os.path.exists(out_path):
+ continue
+
+ feats = readwave(wav_path, normalize=saved_cfg.task.normalize)
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
+ inputs = {
+ "source": feats.half().to(device)
+ if device not in ["mps", "cpu"]
+ else feats.to(device),
+ "padding_mask": padding_mask.to(device),
+ "output_layer": 9 if version == "v1" else 12, # layer 9
+ }
+ with torch.no_grad():
+ logits = model.extract_features(**inputs)
+ feats = (
+ model.final_proj(logits[0]) if version == "v1" else logits[0]
+ )
+
+ feats = feats.squeeze(0).float().cpu().numpy()
+ if np.isnan(feats).sum() == 0:
+ np.save(out_path, feats, allow_pickle=False)
+ else:
+ printt("%s-contains nan" % file)
+ if idx % n == 0:
+ printt("now-%s,all-%s,%s,%s" % (idx, len(todo), file, feats.shape))
+ except:
+ printt(traceback.format_exc())
+ printt("all-feature-done")
diff --git a/extract_locale.py b/extract_locale.py
new file mode 100644
index 000000000..a4ff5ea3d
--- /dev/null
+++ b/extract_locale.py
@@ -0,0 +1,34 @@
+import json
+import re
+
+# Define regular expression patterns
+pattern = r"""i18n\([\s\n\t]*(["'][^"']+["'])[\s\n\t]*\)"""
+
+# Initialize the dictionary to store key-value pairs
+data = {}
+
+
+def process(fn: str):
+ global data
+ with open(fn, "r", encoding="utf-8") as f:
+ contents = f.read()
+ matches = re.findall(pattern, contents)
+ for key in matches:
+ key = eval(key)
+ print("extract:", key)
+ data[key] = key
+
+
+print("processing infer-web.py")
+process("infer-web.py")
+
+print("processing gui_v0.py")
+process("gui_v0.py")
+
+print("processing gui_v1.py")
+process("gui_v1.py")
+
+# Save as a JSON file
+with open("./i18n/en_US.json", "w", encoding="utf-8") as f:
+ json.dump(data, f, ensure_ascii=False, indent=4)
+ f.write("\n")
diff --git a/formantshiftcfg/Put your formantshift presets here as a txt file b/formantshiftcfg/Put your formantshift presets here as a txt file
new file mode 100644
index 000000000..e69de29bb
diff --git a/formantshiftcfg/f2m.txt b/formantshiftcfg/f2m.txt
new file mode 100644
index 000000000..40356a80c
--- /dev/null
+++ b/formantshiftcfg/f2m.txt
@@ -0,0 +1,2 @@
+1.0
+0.8
\ No newline at end of file
diff --git a/formantshiftcfg/m2f.txt b/formantshiftcfg/m2f.txt
new file mode 100644
index 000000000..fa69b52dc
--- /dev/null
+++ b/formantshiftcfg/m2f.txt
@@ -0,0 +1,2 @@
+1.0
+1.2
\ No newline at end of file
diff --git a/formantshiftcfg/random.txt b/formantshiftcfg/random.txt
new file mode 100644
index 000000000..427be5c80
--- /dev/null
+++ b/formantshiftcfg/random.txt
@@ -0,0 +1,2 @@
+32.0
+9.8
\ No newline at end of file
diff --git a/go-realtime-gui-v0.bat b/go-realtime-gui-v0.bat
new file mode 100644
index 000000000..ed23f3f2f
--- /dev/null
+++ b/go-realtime-gui-v0.bat
@@ -0,0 +1,2 @@
+runtime\python.exe gui_v0.py
+pause
diff --git a/go-realtime-gui-v1.bat b/go-realtime-gui-v1.bat
new file mode 100644
index 000000000..21f0edc35
--- /dev/null
+++ b/go-realtime-gui-v1.bat
@@ -0,0 +1,2 @@
+runtime\python.exe gui_v1.py
+pause
diff --git a/go-web.bat b/go-web.bat
new file mode 100644
index 000000000..babef90fb
--- /dev/null
+++ b/go-web.bat
@@ -0,0 +1,25 @@
+@ECHO OFF
+SETLOCAL
+
+:: Set the Python command.
+SET PYCMD="runtime\python.exe"
+
+:: Set the port number.
+SET PORT="7897"
+
+:: Set the theme of Gradio.
+SET THEME="JohnSmith9982/small_and_pretty"
+
+:: Echo the current settings.
+ECHO Current Settings:
+ECHO.
+ECHO Python command: %PYCMD%
+ECHO Port number: %PORT%
+ECHO Theme: %THEME%
+ECHO.
+
+:: Execute the Python script with the current settings.
+%PYCMD% infer-web.py --pycmd %PYCMD% --port %PORT% --theme %THEME%
+
+:: Pause the script at the end.
+pause
\ No newline at end of file
diff --git a/go-web.ps1 b/go-web.ps1
new file mode 100644
index 000000000..9a407d5fa
--- /dev/null
+++ b/go-web.ps1
@@ -0,0 +1,29 @@
+# Set the theme of Gradio.
+# You can get more themes at https://huggingface.co/spaces/gradio/theme-gallery
+# For example if you want this one: https://huggingface.co/spaces/bethecloud/storj_theme
+# You will have to look at a line that starts like "To use this theme, set"
+# On the same line look for [" theme='[AUTHOR]/[THEME]' "]. e.g. [" theme='bethecloud/storj_theme' "]
+# Copy just the part in apostrophes: ''. e.g. bethecloud/storj_theme
+# Now modify the line below and paste that part with replacement in quotation mark. e.g. "bethecloud/storj_theme"
+# In the end you should have THEME = "bethecloud/storj_theme"
+$props = @{
+ PYCMD = "runtime\python.exe"
+ PORT = "7897"
+ THEME = "gradio/soft" # Modify accordigly to change Gradio theme
+}
+
+Write-Host "Current Settings:`n" -ForegroundColor Magenta
+# Display the current settings
+$props.GetEnumerator() | ForEach-Object {
+ Write-Host ("{0}:" -f $_.Name) -NoNewline -ForegroundColor Green
+ Write-Host (" {0}" -f $_.Value) -ForegroundColor Cyan
+}
+
+Write-Host ""
+
+# Run Python script using properties as arguments
+& $props.PYCMD infer-web.py --pycmd $props.PYCMD --port $props.PORT --theme $props.THEME
+
+# Pause the script at the end
+Write-Host "Press any key to continue ..."
+$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown")
\ No newline at end of file
diff --git a/gui_v0.py b/gui_v0.py
new file mode 100644
index 000000000..3916f6db5
--- /dev/null
+++ b/gui_v0.py
@@ -0,0 +1,786 @@
+import os, sys, traceback, re
+
+import json
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from config import Config
+
+Config = Config()
+import PySimpleGUI as sg
+import sounddevice as sd
+import noisereduce as nr
+import numpy as np
+from fairseq import checkpoint_utils
+import librosa, torch, pyworld, faiss, time, threading
+import torch.nn.functional as F
+import torchaudio.transforms as tat
+import scipy.signal as signal
+import torchcrepe
+
+# import matplotlib.pyplot as plt
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+from i18n import I18nAuto
+
+i18n = I18nAuto()
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+current_dir = os.getcwd()
+
+
+class RVC:
+ def __init__(
+ self, key, f0_method, hubert_path, pth_path, index_path, npy_path, index_rate
+ ) -> None:
+ """
+ 初始化
+ """
+ try:
+ self.f0_up_key = key
+ self.time_step = 160 / 16000 * 1000
+ self.f0_min = 50
+ self.f0_max = 1100
+ self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
+ self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
+ self.f0_method = f0_method
+ self.sr = 16000
+ self.window = 160
+
+ # Get Torch Device
+ if torch.cuda.is_available():
+ self.torch_device = torch.device(
+ f"cuda:{0 % torch.cuda.device_count()}"
+ )
+ elif torch.backends.mps.is_available():
+ self.torch_device = torch.device("mps")
+ else:
+ self.torch_device = torch.device("cpu")
+
+ if index_rate != 0:
+ self.index = faiss.read_index(index_path)
+ # self.big_npy = np.load(npy_path)
+ self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
+ print("index search enabled")
+ self.index_rate = index_rate
+ model_path = hubert_path
+ print("load model(s) from {}".format(model_path))
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+ [model_path],
+ suffix="",
+ )
+ self.model = models[0]
+ self.model = self.model.to(device)
+ if Config.is_half:
+ self.model = self.model.half()
+ else:
+ self.model = self.model.float()
+ self.model.eval()
+ cpt = torch.load(pth_path, map_location="cpu")
+ self.tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
+ self.if_f0 = cpt.get("f0", 1)
+ self.version = cpt.get("version", "v1")
+ if self.version == "v1":
+ if self.if_f0 == 1:
+ self.net_g = SynthesizerTrnMs256NSFsid(
+ *cpt["config"], is_half=Config.is_half
+ )
+ else:
+ self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif self.version == "v2":
+ if self.if_f0 == 1:
+ self.net_g = SynthesizerTrnMs768NSFsid(
+ *cpt["config"], is_half=Config.is_half
+ )
+ else:
+ self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del self.net_g.enc_q
+ print(self.net_g.load_state_dict(cpt["weight"], strict=False))
+ self.net_g.eval().to(device)
+ if Config.is_half:
+ self.net_g = self.net_g.half()
+ else:
+ self.net_g = self.net_g.float()
+ except:
+ print(traceback.format_exc())
+
+ def get_regular_crepe_computation(self, x, f0_min, f0_max, model="full"):
+ batch_size = 512
+ # Compute pitch using first gpu
+ audio = torch.tensor(np.copy(x))[None].float()
+ f0, pd = torchcrepe.predict(
+ audio,
+ self.sr,
+ self.window,
+ f0_min,
+ f0_max,
+ model,
+ batch_size=batch_size,
+ device=self.torch_device,
+ return_periodicity=True,
+ )
+ pd = torchcrepe.filter.median(pd, 3)
+ f0 = torchcrepe.filter.mean(f0, 3)
+ f0[pd < 0.1] = 0
+ f0 = f0[0].cpu().numpy()
+ return f0
+
+ def get_harvest_computation(self, x, f0_min, f0_max):
+ f0, t = pyworld.harvest(
+ x.astype(np.double),
+ fs=self.sr,
+ f0_ceil=f0_max,
+ f0_floor=f0_min,
+ frame_period=10,
+ )
+ f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
+ f0 = signal.medfilt(f0, 3)
+ return f0
+
+ def get_f0(self, x, f0_up_key, inp_f0=None):
+ # Calculate Padding and f0 details here
+ p_len = x.shape[0] // 512 # For Now This probs doesn't work
+ x_pad = 1
+ f0_min = 50
+ f0_max = 1100
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+
+ f0 = 0
+ # Here, check f0_methods and get their computations
+ if self.f0_method == "harvest":
+ f0 = self.get_harvest_computation(x, f0_min, f0_max)
+ elif self.f0_method == "reg-crepe":
+ f0 = self.get_regular_crepe_computation(x, f0_min, f0_max)
+ elif self.f0_method == "reg-crepe-tiny":
+ f0 = self.get_regular_crepe_computation(x, f0_min, f0_max, "tiny")
+
+ # Calculate f0_course and f0_bak here
+ f0 *= pow(2, f0_up_key / 12)
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
+ tf0 = self.sr // self.window # 每秒f0点数
+ if inp_f0 is not None:
+ delta_t = np.round(
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
+ ).astype("int16")
+ replace_f0 = np.interp(
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
+ )
+ shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
+ f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
+ f0bak = f0.copy()
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ f0_coarse = np.rint(f0_mel).astype(np.int)
+ return f0_coarse, f0bak # 1-0
+
+ def infer(self, feats: torch.Tensor) -> np.ndarray:
+ """
+ 推理函数
+ """
+ audio = feats.clone().cpu().numpy()
+ assert feats.dim() == 1, feats.dim()
+ feats = feats.view(1, -1)
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
+ if Config.is_half:
+ feats = feats.half()
+ else:
+ feats = feats.float()
+ inputs = {
+ "source": feats.to(device),
+ "padding_mask": padding_mask.to(device),
+ "output_layer": 9 if self.version == "v1" else 12,
+ }
+ torch.cuda.synchronize()
+ with torch.no_grad():
+ logits = self.model.extract_features(**inputs)
+ feats = (
+ self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
+ )
+
+ ####索引优化
+ try:
+ if (
+ hasattr(self, "index")
+ and hasattr(self, "big_npy")
+ and self.index_rate != 0
+ ):
+ npy = feats[0].cpu().numpy().astype("float32")
+ score, ix = self.index.search(npy, k=8)
+ weight = np.square(1 / score)
+ weight /= weight.sum(axis=1, keepdims=True)
+ npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
+ if Config.is_half:
+ npy = npy.astype("float16")
+ feats = (
+ torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate
+ + (1 - self.index_rate) * feats
+ )
+ else:
+ print("index search FAIL or disabled")
+ except:
+ traceback.print_exc()
+ print("index search FAIL")
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
+ torch.cuda.synchronize()
+ print(feats.shape)
+ if self.if_f0 == 1:
+ pitch, pitchf = self.get_f0(audio, self.f0_up_key)
+ p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存
+ else:
+ pitch, pitchf = None, None
+ p_len = min(feats.shape[1], 13000) # 太大了爆显存
+ torch.cuda.synchronize()
+ # print(feats.shape,pitch.shape)
+ feats = feats[:, :p_len, :]
+ if self.if_f0 == 1:
+ pitch = pitch[:p_len]
+ pitchf = pitchf[:p_len]
+ pitch = torch.LongTensor(pitch).unsqueeze(0).to(device)
+ pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device)
+ p_len = torch.LongTensor([p_len]).to(device)
+ ii = 0 # sid
+ sid = torch.LongTensor([ii]).to(device)
+ with torch.no_grad():
+ if self.if_f0 == 1:
+ infered_audio = (
+ self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
+ .data.cpu()
+ .float()
+ )
+ else:
+ infered_audio = (
+ self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float()
+ )
+ torch.cuda.synchronize()
+ return infered_audio
+
+
+class GUIConfig:
+ def __init__(self) -> None:
+ self.hubert_path: str = ""
+ self.pth_path: str = ""
+ self.index_path: str = ""
+ self.npy_path: str = ""
+ self.f0_method: str = ""
+ self.pitch: int = 12
+ self.samplerate: int = 44100
+ self.block_time: float = 1.0 # s
+ self.buffer_num: int = 1
+ self.threhold: int = -30
+ self.crossfade_time: float = 0.08
+ self.extra_time: float = 0.04
+ self.I_noise_reduce = False
+ self.O_noise_reduce = False
+ self.index_rate = 0.3
+
+
+class GUI:
+ def __init__(self) -> None:
+ self.config = GUIConfig()
+ self.flag_vc = False
+
+ self.launcher()
+
+ def load(self):
+ (
+ input_devices,
+ output_devices,
+ input_devices_indices,
+ output_devices_indices,
+ ) = self.get_devices()
+ try:
+ with open("values1.json", "r") as j:
+ data = json.load(j)
+ except:
+ # Injecting f0_method into the json data
+ with open("values1.json", "w") as j:
+ data = {
+ "pth_path": "",
+ "index_path": "",
+ "sg_input_device": input_devices[
+ input_devices_indices.index(sd.default.device[0])
+ ],
+ "sg_output_device": output_devices[
+ output_devices_indices.index(sd.default.device[1])
+ ],
+ "threhold": "-45",
+ "pitch": "0",
+ "index_rate": "0",
+ "block_time": "1",
+ "crossfade_length": "0.04",
+ "extra_time": "1",
+ }
+ return data
+
+ def launcher(self):
+ data = self.load()
+ sg.theme("DarkTeal12")
+ input_devices, output_devices, _, _ = self.get_devices()
+ layout = [
+ [
+ sg.Frame(
+ title="Proudly forked by Mangio621",
+ ),
+ sg.Frame(
+ title=i18n("Load model"),
+ layout=[
+ [
+ sg.Input(
+ default_text="hubert_base.pt",
+ key="hubert_path",
+ disabled=True,
+ ),
+ sg.FileBrowse(
+ i18n("Hubert Model"),
+ initial_folder=os.path.join(os.getcwd()),
+ file_types=(("pt files", "*.pt"),),
+ ),
+ ],
+ [
+ sg.Input(
+ default_text=data.get("pth_path", ""),
+ key="pth_path",
+ ),
+ sg.FileBrowse(
+ i18n("Select the .pth file"),
+ initial_folder=os.path.join(os.getcwd(), "weights"),
+ file_types=(("weight files", "*.pth"),),
+ ),
+ ],
+ [
+ sg.Input(
+ default_text=data.get("index_path", ""),
+ key="index_path",
+ ),
+ sg.FileBrowse(
+ i18n("Select the .index file"),
+ initial_folder=os.path.join(os.getcwd(), "logs"),
+ file_types=(("index files", "*.index"),),
+ ),
+ ],
+ [
+ sg.Input(
+ default_text="你不需要填写这个You don't need write this.",
+ key="npy_path",
+ disabled=True,
+ ),
+ sg.FileBrowse(
+ i18n("Select the .npy file"),
+ initial_folder=os.path.join(os.getcwd(), "logs"),
+ file_types=(("feature files", "*.npy"),),
+ ),
+ ],
+ ],
+ ),
+ ],
+ [
+ # Mangio f0 Selection frame Here
+ sg.Frame(
+ layout=[
+ [
+ sg.Radio(
+ "Harvest", "f0_method", key="harvest", default=True
+ ),
+ sg.Radio("Crepe", "f0_method", key="reg-crepe"),
+ sg.Radio("Crepe Tiny", "f0_method", key="reg-crepe-tiny"),
+ ]
+ ],
+ title="Select an f0 Method",
+ )
+ ],
+ [
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Input device")),
+ sg.Combo(
+ input_devices,
+ key="sg_input_device",
+ default_value=data.get("sg_input_device", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Output device")),
+ sg.Combo(
+ output_devices,
+ key="sg_output_device",
+ default_value=data.get("sg_output_device", ""),
+ ),
+ ],
+ ],
+ title=i18n("Audio device (please use the same type of driver)"),
+ )
+ ],
+ [
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Response threshold")),
+ sg.Slider(
+ range=(-60, 0),
+ key="threhold",
+ resolution=1,
+ orientation="h",
+ default_value=data.get("threhold", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Pitch settings")),
+ sg.Slider(
+ range=(-24, 24),
+ key="pitch",
+ resolution=1,
+ orientation="h",
+ default_value=data.get("pitch", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Index Rate")),
+ sg.Slider(
+ range=(0.0, 1.0),
+ key="index_rate",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("index_rate", ""),
+ ),
+ ],
+ ],
+ title=i18n("General settings"),
+ ),
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Sample length")),
+ sg.Slider(
+ range=(0.1, 3.0),
+ key="block_time",
+ resolution=0.1,
+ orientation="h",
+ default_value=data.get("block_time", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Fade length")),
+ sg.Slider(
+ range=(0.01, 0.15),
+ key="crossfade_length",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("crossfade_length", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Extra推理时长")),
+ sg.Slider(
+ range=(0.05, 3.00),
+ key="extra_time",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("extra_time", ""),
+ ),
+ ],
+ [
+ sg.Checkbox(i18n("Input noise reduction"), key="I_noise_reduce"),
+ sg.Checkbox(i18n("Output noise reduction"), key="O_noise_reduce"),
+ ],
+ ],
+ title=i18n("Performance settings"),
+ ),
+ ],
+ [
+ sg.Button(i18n("开始音频Convert"), key="start_vc"),
+ sg.Button(i18n("停止音频Convert"), key="stop_vc"),
+ sg.Text(i18n("Inference time (ms):")),
+ sg.Text("0", key="infer_time"),
+ ],
+ ]
+ self.window = sg.Window("RVC - GUI", layout=layout)
+ self.event_handler()
+
+ def event_handler(self):
+ while True:
+ event, values = self.window.read()
+ if event == sg.WINDOW_CLOSED:
+ self.flag_vc = False
+ exit()
+ if event == "start_vc" and self.flag_vc == False:
+ if self.set_values(values) == True:
+ print("using_cuda:" + str(torch.cuda.is_available()))
+ self.start_vc()
+ settings = {
+ "pth_path": values["pth_path"],
+ "index_path": values["index_path"],
+ "f0_method": self.get_f0_method_from_radios(values),
+ "sg_input_device": values["sg_input_device"],
+ "sg_output_device": values["sg_output_device"],
+ "threhold": values["threhold"],
+ "pitch": values["pitch"],
+ "index_rate": values["index_rate"],
+ "block_time": values["block_time"],
+ "crossfade_length": values["crossfade_length"],
+ "extra_time": values["extra_time"],
+ }
+ with open("values1.json", "w") as j:
+ json.dump(settings, j)
+ if event == "stop_vc" and self.flag_vc == True:
+ self.flag_vc = False
+
+ # Function that returns the used f0 method in string format "harvest"
+ def get_f0_method_from_radios(self, values):
+ f0_array = [
+ {"name": "harvest", "val": values["harvest"]},
+ {"name": "reg-crepe", "val": values["reg-crepe"]},
+ {"name": "reg-crepe-tiny", "val": values["reg-crepe-tiny"]},
+ ]
+ # Filter through to find a true value
+ used_f0 = ""
+ for f0 in f0_array:
+ if f0["val"] == True:
+ used_f0 = f0["name"]
+ break
+ if used_f0 == "":
+ used_f0 = "harvest" # Default Harvest if used_f0 is empty somehow
+ return used_f0
+
+ def set_values(self, values):
+ if len(values["pth_path"].strip()) == 0:
+ sg.popup(i18n("Select the pth file"))
+ return False
+ if len(values["index_path"].strip()) == 0:
+ sg.popup(i18n("Select the index file"))
+ return False
+ pattern = re.compile("[^\x00-\x7F]+")
+ if pattern.findall(values["hubert_path"]):
+ sg.popup(i18n("The hubert model path must not contain Chinese characters"))
+ return False
+ if pattern.findall(values["pth_path"]):
+ sg.popup(i18n("The pth file path must not contain Chinese characters."))
+ return False
+ if pattern.findall(values["index_path"]):
+ sg.popup(i18n("The index file path must not contain Chinese characters."))
+ return False
+ self.set_devices(values["sg_input_device"], values["sg_output_device"])
+ self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt")
+ self.config.pth_path = values["pth_path"]
+ self.config.index_path = values["index_path"]
+ self.config.npy_path = values["npy_path"]
+ self.config.f0_method = self.get_f0_method_from_radios(values)
+ self.config.threhold = values["threhold"]
+ self.config.pitch = values["pitch"]
+ self.config.block_time = values["block_time"]
+ self.config.crossfade_time = values["crossfade_length"]
+ self.config.extra_time = values["extra_time"]
+ self.config.I_noise_reduce = values["I_noise_reduce"]
+ self.config.O_noise_reduce = values["O_noise_reduce"]
+ self.config.index_rate = values["index_rate"]
+ return True
+
+ def start_vc(self):
+ torch.cuda.empty_cache()
+ self.flag_vc = True
+ self.block_frame = int(self.config.block_time * self.config.samplerate)
+ self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate)
+ self.sola_search_frame = int(0.012 * self.config.samplerate)
+ self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s
+ self.extra_frame = int(self.config.extra_time * self.config.samplerate)
+ self.rvc = None
+ self.rvc = RVC(
+ self.config.pitch,
+ self.config.f0_method,
+ self.config.hubert_path,
+ self.config.pth_path,
+ self.config.index_path,
+ self.config.npy_path,
+ self.config.index_rate,
+ )
+ self.input_wav: np.ndarray = np.zeros(
+ self.extra_frame
+ + self.crossfade_frame
+ + self.sola_search_frame
+ + self.block_frame,
+ dtype="float32",
+ )
+ self.output_wav: torch.Tensor = torch.zeros(
+ self.block_frame, device=device, dtype=torch.float32
+ )
+ self.sola_buffer: torch.Tensor = torch.zeros(
+ self.crossfade_frame, device=device, dtype=torch.float32
+ )
+ self.fade_in_window: torch.Tensor = torch.linspace(
+ 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32
+ )
+ self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
+ self.resampler1 = tat.Resample(
+ orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
+ )
+ self.resampler2 = tat.Resample(
+ orig_freq=self.rvc.tgt_sr,
+ new_freq=self.config.samplerate,
+ dtype=torch.float32,
+ )
+ thread_vc = threading.Thread(target=self.soundinput)
+ thread_vc.start()
+
+ def soundinput(self):
+ """
+ 接受音频输入
+ """
+ with sd.Stream(
+ channels=2,
+ callback=self.audio_callback,
+ blocksize=self.block_frame,
+ samplerate=self.config.samplerate,
+ dtype="float32",
+ ):
+ while self.flag_vc:
+ time.sleep(self.config.block_time)
+ print("Audio block passed.")
+ print("ENDing VC")
+
+ def audio_callback(
+ self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
+ ):
+ """
+ 音频处理
+ """
+ start_time = time.perf_counter()
+ indata = librosa.to_mono(indata.T)
+ if self.config.I_noise_reduce:
+ indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate)
+
+ """noise gate"""
+ frame_length = 2048
+ hop_length = 1024
+ rms = librosa.feature.rms(
+ y=indata, frame_length=frame_length, hop_length=hop_length
+ )
+ db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
+ # print(rms.shape,db.shape,db)
+ for i in range(db_threhold.shape[0]):
+ if db_threhold[i]:
+ indata[i * hop_length : (i + 1) * hop_length] = 0
+ self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata)
+
+ # infer
+ print("input_wav:" + str(self.input_wav.shape))
+ # print('infered_wav:'+str(infer_wav.shape))
+ infer_wav: torch.Tensor = self.resampler2(
+ self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav)))
+ )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to(
+ device
+ )
+ print("infer_wav:" + str(infer_wav.shape))
+
+ # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
+ cor_nom = F.conv1d(
+ infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame],
+ self.sola_buffer[None, None, :],
+ )
+ cor_den = torch.sqrt(
+ F.conv1d(
+ infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
+ ** 2,
+ torch.ones(1, 1, self.crossfade_frame, device=device),
+ )
+ + 1e-8
+ )
+ sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
+ print("sola offset: " + str(int(sola_offset)))
+
+ # crossfade
+ self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame]
+ self.output_wav[: self.crossfade_frame] *= self.fade_in_window
+ self.output_wav[: self.crossfade_frame] += self.sola_buffer[:]
+ if sola_offset < self.sola_search_frame:
+ self.sola_buffer[:] = (
+ infer_wav[
+ -self.sola_search_frame
+ - self.crossfade_frame
+ + sola_offset : -self.sola_search_frame
+ + sola_offset
+ ]
+ * self.fade_out_window
+ )
+ else:
+ self.sola_buffer[:] = (
+ infer_wav[-self.crossfade_frame :] * self.fade_out_window
+ )
+
+ if self.config.O_noise_reduce:
+ outdata[:] = np.tile(
+ nr.reduce_noise(
+ y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate
+ ),
+ (2, 1),
+ ).T
+ else:
+ outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy()
+ total_time = time.perf_counter() - start_time
+ self.window["infer_time"].update(int(total_time * 1000))
+ print("infer time:" + str(total_time))
+ print("f0_method: " + str(self.config.f0_method))
+
+ def get_devices(self, update: bool = True):
+ """获取设备列表"""
+ if update:
+ sd._terminate()
+ sd._initialize()
+ devices = sd.query_devices()
+ hostapis = sd.query_hostapis()
+ for hostapi in hostapis:
+ for device_idx in hostapi["devices"]:
+ devices[device_idx]["hostapi_name"] = hostapi["name"]
+ input_devices = [
+ f"{d['name']} ({d['hostapi_name']})"
+ for d in devices
+ if d["max_input_channels"] > 0
+ ]
+ output_devices = [
+ f"{d['name']} ({d['hostapi_name']})"
+ for d in devices
+ if d["max_output_channels"] > 0
+ ]
+ input_devices_indices = [
+ d["index"] if "index" in d else d["name"]
+ for d in devices
+ if d["max_input_channels"] > 0
+ ]
+ output_devices_indices = [
+ d["index"] if "index" in d else d["name"]
+ for d in devices
+ if d["max_output_channels"] > 0
+ ]
+ return (
+ input_devices,
+ output_devices,
+ input_devices_indices,
+ output_devices_indices,
+ )
+
+ def set_devices(self, input_device, output_device):
+ """设置输出设备"""
+ (
+ input_devices,
+ output_devices,
+ input_device_indices,
+ output_device_indices,
+ ) = self.get_devices()
+ sd.default.device[0] = input_device_indices[input_devices.index(input_device)]
+ sd.default.device[1] = output_device_indices[
+ output_devices.index(output_device)
+ ]
+ print("input device:" + str(sd.default.device[0]) + ":" + str(input_device))
+ print("output device:" + str(sd.default.device[1]) + ":" + str(output_device))
+
+
+gui = GUI()
diff --git a/gui_v1.py b/gui_v1.py
new file mode 100644
index 000000000..6198b6c5b
--- /dev/null
+++ b/gui_v1.py
@@ -0,0 +1,637 @@
+import os, sys
+
+if sys.platform == "darwin":
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+import multiprocessing
+
+
+class Harvest(multiprocessing.Process):
+ def __init__(self, inp_q, opt_q):
+ multiprocessing.Process.__init__(self)
+ self.inp_q = inp_q
+ self.opt_q = opt_q
+
+ def run(self):
+ import numpy as np, pyworld
+
+ while 1:
+ idx, x, res_f0, n_cpu, ts = self.inp_q.get()
+ f0, t = pyworld.harvest(
+ x.astype(np.double),
+ fs=16000,
+ f0_ceil=1100,
+ f0_floor=50,
+ frame_period=10,
+ )
+ res_f0[idx] = f0
+ if len(res_f0.keys()) >= n_cpu:
+ self.opt_q.put(ts)
+
+
+if __name__ == "__main__":
+ from multiprocessing import Queue
+ from queue import Empty
+ import numpy as np
+ import multiprocessing
+ import traceback, re
+ import json
+ import PySimpleGUI as sg
+ import sounddevice as sd
+ import noisereduce as nr
+ from multiprocessing import cpu_count
+ import librosa, torch, time, threading
+ import torch.nn.functional as F
+ import torchaudio.transforms as tat
+ from i18n import I18nAuto
+
+ i18n = I18nAuto()
+ device = torch.device(
+ "cuda"
+ if torch.cuda.is_available()
+ else ("mps" if torch.backends.mps.is_available() else "cpu")
+ )
+ current_dir = os.getcwd()
+ inp_q = Queue()
+ opt_q = Queue()
+ n_cpu = min(cpu_count(), 8)
+ for _ in range(n_cpu):
+ Harvest(inp_q, opt_q).start()
+ from rvc_for_realtime import RVC
+
+ class GUIConfig:
+ def __init__(self) -> None:
+ self.pth_path: str = ""
+ self.index_path: str = ""
+ self.pitch: int = 12
+ self.samplerate: int = 40000
+ self.block_time: float = 1.0 # s
+ self.buffer_num: int = 1
+ self.threhold: int = -30
+ self.crossfade_time: float = 0.08
+ self.extra_time: float = 0.04
+ self.I_noise_reduce = False
+ self.O_noise_reduce = False
+ self.index_rate = 0.3
+ self.n_cpu = min(n_cpu, 8)
+ self.f0method = "harvest"
+
+ class GUI:
+ def __init__(self) -> None:
+ self.config = GUIConfig()
+ self.flag_vc = False
+
+ self.launcher()
+
+ def load(self):
+ input_devices, output_devices, _, _ = self.get_devices()
+ try:
+ with open("values1.json", "r") as j:
+ data = json.load(j)
+ data["pm"] = data["f0method"] == "pm"
+ data["harvest"] = data["f0method"] == "harvest"
+ data["crepe"] = data["f0method"] == "crepe"
+ data["rmvpe"] = data["f0method"] == "rmvpe"
+ except:
+ with open("values1.json", "w") as j:
+ data = {
+ "pth_path": " ",
+ "index_path": " ",
+ "sg_input_device": input_devices[sd.default.device[0]],
+ "sg_output_device": output_devices[sd.default.device[1]],
+ "threhold": "-45",
+ "pitch": "0",
+ "index_rate": "0",
+ "block_time": "1",
+ "crossfade_length": "0.04",
+ "extra_time": "1",
+ "f0method": "rmvpe",
+ }
+ return data
+
+ def launcher(self):
+ data = self.load()
+ sg.theme("LightBlue3")
+ input_devices, output_devices, _, _ = self.get_devices()
+ layout = [
+ [
+ sg.Frame(
+ title=i18n("Load model"),
+ layout=[
+ [
+ sg.Input(
+ default_text=data.get("pth_path", ""),
+ key="pth_path",
+ ),
+ sg.FileBrowse(
+ i18n("Select the .pth file"),
+ initial_folder=os.path.join(os.getcwd(), "weights"),
+ file_types=((". pth"),),
+ ),
+ ],
+ [
+ sg.Input(
+ default_text=data.get("index_path", ""),
+ key="index_path",
+ ),
+ sg.FileBrowse(
+ i18n("Select the .index file"),
+ initial_folder=os.path.join(os.getcwd(), "logs"),
+ file_types=((". index"),),
+ ),
+ ],
+ ],
+ )
+ ],
+ [
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Input device")),
+ sg.Combo(
+ input_devices,
+ key="sg_input_device",
+ default_value=data.get("sg_input_device", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Output device")),
+ sg.Combo(
+ output_devices,
+ key="sg_output_device",
+ default_value=data.get("sg_output_device", ""),
+ ),
+ ],
+ ],
+ title=i18n("Audio device (please use the same type of driver)"),
+ )
+ ],
+ [
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Response threshold")),
+ sg.Slider(
+ range=(-60, 0),
+ key="threhold",
+ resolution=1,
+ orientation="h",
+ default_value=data.get("threhold", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Pitch settings")),
+ sg.Slider(
+ range=(-24, 24),
+ key="pitch",
+ resolution=1,
+ orientation="h",
+ default_value=data.get("pitch", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Index Rate")),
+ sg.Slider(
+ range=(0.0, 1.0),
+ key="index_rate",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("index_rate", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Step algorithm")),
+ sg.Radio(
+ "pm",
+ "f0method",
+ key="pm",
+ default=data.get("pm", "") == True,
+ ),
+ sg.Radio(
+ "harvest",
+ "f0method",
+ key="harvest",
+ default=data.get("harvest", "") == True,
+ ),
+ sg.Radio(
+ "crepe",
+ "f0method",
+ key="crepe",
+ default=data.get("crepe", "") == True,
+ ),
+ sg.Radio(
+ "rmvpe",
+ "f0method",
+ key="rmvpe",
+ default=data.get("rmvpe", "") == True,
+ ),
+ ],
+ ],
+ title=i18n("General settings"),
+ ),
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Sample length")),
+ sg.Slider(
+ range=(0.12, 2.4),
+ key="block_time",
+ resolution=0.03,
+ orientation="h",
+ default_value=data.get("block_time", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Number of epoch processes")),
+ sg.Slider(
+ range=(1, n_cpu),
+ key="n_cpu",
+ resolution=1,
+ orientation="h",
+ default_value=data.get(
+ "n_cpu", min(self.config.n_cpu, n_cpu)
+ ),
+ ),
+ ],
+ [
+ sg.Text(i18n("Fade length")),
+ sg.Slider(
+ range=(0.01, 0.15),
+ key="crossfade_length",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("crossfade_length", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Extra推理时长")),
+ sg.Slider(
+ range=(0.05, 3.00),
+ key="extra_time",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("extra_time", ""),
+ ),
+ ],
+ [
+ sg.Checkbox(i18n("Input noise reduction"), key="I_noise_reduce"),
+ sg.Checkbox(i18n("Output noise reduction"), key="O_noise_reduce"),
+ ],
+ ],
+ title=i18n("Performance settings"),
+ ),
+ ],
+ [
+ sg.Button(i18n("开始音频Convert"), key="start_vc"),
+ sg.Button(i18n("停止音频Convert"), key="stop_vc"),
+ sg.Text(i18n("Inference time (ms):")),
+ sg.Text("0", key="infer_time"),
+ ],
+ ]
+ self.window = sg.Window("RVC - GUI", layout=layout)
+ self.event_handler()
+
+ def event_handler(self):
+ while True:
+ event, values = self.window.read()
+ if event == sg.WINDOW_CLOSED:
+ self.flag_vc = False
+ exit()
+ if event == "start_vc" and self.flag_vc == False:
+ if self.set_values(values) == True:
+ print("using_cuda:" + str(torch.cuda.is_available()))
+ self.start_vc()
+ settings = {
+ "pth_path": values["pth_path"],
+ "index_path": values["index_path"],
+ "sg_input_device": values["sg_input_device"],
+ "sg_output_device": values["sg_output_device"],
+ "threhold": values["threhold"],
+ "pitch": values["pitch"],
+ "index_rate": values["index_rate"],
+ "block_time": values["block_time"],
+ "crossfade_length": values["crossfade_length"],
+ "extra_time": values["extra_time"],
+ "n_cpu": values["n_cpu"],
+ "f0method": ["pm", "harvest", "crepe", "rmvpe"][
+ [
+ values["pm"],
+ values["harvest"],
+ values["crepe"],
+ values["rmvpe"],
+ ].index(True)
+ ],
+ }
+ with open("values1.json", "w") as j:
+ json.dump(settings, j)
+ if event == "stop_vc" and self.flag_vc == True:
+ self.flag_vc = False
+
+ def set_values(self, values):
+ if len(values["pth_path"].strip()) == 0:
+ sg.popup(i18n("Select the pth file"))
+ return False
+ if len(values["index_path"].strip()) == 0:
+ sg.popup(i18n("Select the index file"))
+ return False
+ pattern = re.compile("[^\x00-\x7F]+")
+ if pattern.findall(values["pth_path"]):
+ sg.popup(i18n("The pth file path must not contain Chinese characters."))
+ return False
+ if pattern.findall(values["index_path"]):
+ sg.popup(i18n("The index file path must not contain Chinese characters."))
+ return False
+ self.set_devices(values["sg_input_device"], values["sg_output_device"])
+ self.config.pth_path = values["pth_path"]
+ self.config.index_path = values["index_path"]
+ self.config.threhold = values["threhold"]
+ self.config.pitch = values["pitch"]
+ self.config.block_time = values["block_time"]
+ self.config.crossfade_time = values["crossfade_length"]
+ self.config.extra_time = values["extra_time"]
+ self.config.I_noise_reduce = values["I_noise_reduce"]
+ self.config.O_noise_reduce = values["O_noise_reduce"]
+ self.config.index_rate = values["index_rate"]
+ self.config.n_cpu = values["n_cpu"]
+ self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][
+ [
+ values["pm"],
+ values["harvest"],
+ values["crepe"],
+ values["rmvpe"],
+ ].index(True)
+ ]
+ return True
+
+ def start_vc(self):
+ torch.cuda.empty_cache()
+ self.flag_vc = True
+ self.rvc = RVC(
+ self.config.pitch,
+ self.config.pth_path,
+ self.config.index_path,
+ self.config.index_rate,
+ self.config.n_cpu,
+ inp_q,
+ opt_q,
+ device,
+ )
+ self.config.samplerate = self.rvc.tgt_sr
+ self.config.crossfade_time = min(
+ self.config.crossfade_time, self.config.block_time
+ )
+ self.block_frame = int(self.config.block_time * self.config.samplerate)
+ self.crossfade_frame = int(
+ self.config.crossfade_time * self.config.samplerate
+ )
+ self.sola_search_frame = int(0.01 * self.config.samplerate)
+ self.extra_frame = int(self.config.extra_time * self.config.samplerate)
+ self.zc = self.rvc.tgt_sr // 100
+ self.input_wav: np.ndarray = np.zeros(
+ int(
+ np.ceil(
+ (
+ self.extra_frame
+ + self.crossfade_frame
+ + self.sola_search_frame
+ + self.block_frame
+ )
+ / self.zc
+ )
+ * self.zc
+ ),
+ dtype="float32",
+ )
+ self.output_wav_cache: torch.Tensor = torch.zeros(
+ int(
+ np.ceil(
+ (
+ self.extra_frame
+ + self.crossfade_frame
+ + self.sola_search_frame
+ + self.block_frame
+ )
+ / self.zc
+ )
+ * self.zc
+ ),
+ device=device,
+ dtype=torch.float32,
+ )
+ self.pitch: np.ndarray = np.zeros(
+ self.input_wav.shape[0] // self.zc,
+ dtype="int32",
+ )
+ self.pitchf: np.ndarray = np.zeros(
+ self.input_wav.shape[0] // self.zc,
+ dtype="float64",
+ )
+ self.output_wav: torch.Tensor = torch.zeros(
+ self.block_frame, device=device, dtype=torch.float32
+ )
+ self.sola_buffer: torch.Tensor = torch.zeros(
+ self.crossfade_frame, device=device, dtype=torch.float32
+ )
+ self.fade_in_window: torch.Tensor = torch.linspace(
+ 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32
+ )
+ self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
+ self.resampler = tat.Resample(
+ orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
+ ).to(device)
+ thread_vc = threading.Thread(target=self.soundinput)
+ thread_vc.start()
+
+ def soundinput(self):
+ """
+ 接受音频输入
+ """
+ channels = 1 if sys.platform == "darwin" else 2
+ with sd.Stream(
+ channels=channels,
+ callback=self.audio_callback,
+ blocksize=self.block_frame,
+ samplerate=self.config.samplerate,
+ dtype="float32",
+ ):
+ while self.flag_vc:
+ time.sleep(self.config.block_time)
+ print("Audio block passed.")
+ print("ENDing VC")
+
+ def audio_callback(
+ self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
+ ):
+ """
+ 音频处理
+ """
+ start_time = time.perf_counter()
+ indata = librosa.to_mono(indata.T)
+ if self.config.I_noise_reduce:
+ indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate)
+ """noise gate"""
+ frame_length = 2048
+ hop_length = 1024
+ rms = librosa.feature.rms(
+ y=indata, frame_length=frame_length, hop_length=hop_length
+ )
+ if self.config.threhold > -60:
+ db_threhold = (
+ librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
+ )
+ for i in range(db_threhold.shape[0]):
+ if db_threhold[i]:
+ indata[i * hop_length : (i + 1) * hop_length] = 0
+ self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata)
+ # infer
+ inp = torch.from_numpy(self.input_wav).to(device)
+ ##0
+ res1 = self.resampler(inp)
+ ###55%
+ rate1 = self.block_frame / (
+ self.extra_frame
+ + self.crossfade_frame
+ + self.sola_search_frame
+ + self.block_frame
+ )
+ rate2 = (
+ self.crossfade_frame + self.sola_search_frame + self.block_frame
+ ) / (
+ self.extra_frame
+ + self.crossfade_frame
+ + self.sola_search_frame
+ + self.block_frame
+ )
+ res2 = self.rvc.infer(
+ res1,
+ res1[-self.block_frame :].cpu().numpy(),
+ rate1,
+ rate2,
+ self.pitch,
+ self.pitchf,
+ self.config.f0method,
+ )
+ self.output_wav_cache[-res2.shape[0] :] = res2
+ infer_wav = self.output_wav_cache[
+ -self.crossfade_frame - self.sola_search_frame - self.block_frame :
+ ]
+ # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
+ cor_nom = F.conv1d(
+ infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame],
+ self.sola_buffer[None, None, :],
+ )
+ cor_den = torch.sqrt(
+ F.conv1d(
+ infer_wav[
+ None, None, : self.crossfade_frame + self.sola_search_frame
+ ]
+ ** 2,
+ torch.ones(1, 1, self.crossfade_frame, device=device),
+ )
+ + 1e-8
+ )
+ if sys.platform == "darwin":
+ cor_nom = cor_nom.cpu()
+ cor_den = cor_den.cpu()
+ sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
+ print("sola offset: " + str(int(sola_offset)))
+ self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame]
+ self.output_wav[: self.crossfade_frame] *= self.fade_in_window
+ self.output_wav[: self.crossfade_frame] += self.sola_buffer[:]
+ # crossfade
+ if sola_offset < self.sola_search_frame:
+ self.sola_buffer[:] = (
+ infer_wav[
+ -self.sola_search_frame
+ - self.crossfade_frame
+ + sola_offset : -self.sola_search_frame
+ + sola_offset
+ ]
+ * self.fade_out_window
+ )
+ else:
+ self.sola_buffer[:] = (
+ infer_wav[-self.crossfade_frame :] * self.fade_out_window
+ )
+ if self.config.O_noise_reduce:
+ if sys.platform == "darwin":
+ noise_reduced_signal = nr.reduce_noise(
+ y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate
+ )
+ outdata[:] = noise_reduced_signal[:, np.newaxis]
+ else:
+ outdata[:] = np.tile(
+ nr.reduce_noise(
+ y=self.output_wav[:].cpu().numpy(),
+ sr=self.config.samplerate,
+ ),
+ (2, 1),
+ ).T
+ else:
+ if sys.platform == "darwin":
+ outdata[:] = self.output_wav[:].cpu().numpy()[:, np.newaxis]
+ else:
+ outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy()
+ total_time = time.perf_counter() - start_time
+ self.window["infer_time"].update(int(total_time * 1000))
+ print("infer time:" + str(total_time))
+
+ def get_devices(self, update: bool = True):
+ """获取设备列表"""
+ if update:
+ sd._terminate()
+ sd._initialize()
+ devices = sd.query_devices()
+ hostapis = sd.query_hostapis()
+ for hostapi in hostapis:
+ for device_idx in hostapi["devices"]:
+ devices[device_idx]["hostapi_name"] = hostapi["name"]
+ input_devices = [
+ f"{d['name']} ({d['hostapi_name']})"
+ for d in devices
+ if d["max_input_channels"] > 0
+ ]
+ output_devices = [
+ f"{d['name']} ({d['hostapi_name']})"
+ for d in devices
+ if d["max_output_channels"] > 0
+ ]
+ input_devices_indices = [
+ d["index"] if "index" in d else d["name"]
+ for d in devices
+ if d["max_input_channels"] > 0
+ ]
+ output_devices_indices = [
+ d["index"] if "index" in d else d["name"]
+ for d in devices
+ if d["max_output_channels"] > 0
+ ]
+ return (
+ input_devices,
+ output_devices,
+ input_devices_indices,
+ output_devices_indices,
+ )
+
+ def set_devices(self, input_device, output_device):
+ """设置输出设备"""
+ (
+ input_devices,
+ output_devices,
+ input_device_indices,
+ output_device_indices,
+ ) = self.get_devices()
+ sd.default.device[0] = input_device_indices[
+ input_devices.index(input_device)
+ ]
+ sd.default.device[1] = output_device_indices[
+ output_devices.index(output_device)
+ ]
+ print("input device:" + str(sd.default.device[0]) + ":" + str(input_device))
+ print(
+ "output device:" + str(sd.default.device[1]) + ":" + str(output_device)
+ )
+
+ gui = GUI()
diff --git a/guidml.py b/guidml.py
new file mode 100644
index 000000000..cd6675bb1
--- /dev/null
+++ b/guidml.py
@@ -0,0 +1,710 @@
+"""
+0416后的更新:
+ 引入config中half
+ 重建npy而不用填写
+ v2支持
+ 无f0模型支持
+ 修复
+
+ int16:
+ 增加无索引支持
+ f0算法改harvest(怎么看就只有这个会影响CPU占用),但是不这么改效果不好
+"""
+import os, sys, traceback, re
+
+import json
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from config import Config
+
+Config = Config()
+
+import torch_directml
+import PySimpleGUI as sg
+import sounddevice as sd
+import noisereduce as nr
+import numpy as np
+from fairseq import checkpoint_utils
+import librosa, torch, pyworld, faiss, time, threading
+import torch.nn.functional as F
+import torchaudio.transforms as tat
+import scipy.signal as signal
+
+
+# import matplotlib.pyplot as plt
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+from i18n import I18nAuto
+
+i18n = I18nAuto()
+device = torch_directml.device(torch_directml.default_device())
+current_dir = os.getcwd()
+
+
+class RVC:
+ def __init__(
+ self, key, hubert_path, pth_path, index_path, npy_path, index_rate
+ ) -> None:
+ """
+ 初始化
+ """
+ try:
+ self.f0_up_key = key
+ self.time_step = 160 / 16000 * 1000
+ self.f0_min = 50
+ self.f0_max = 1100
+ self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
+ self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
+ self.sr = 16000
+ self.window = 160
+ if index_rate != 0:
+ self.index = faiss.read_index(index_path)
+ # self.big_npy = np.load(npy_path)
+ self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
+ print("index search enabled")
+ self.index_rate = index_rate
+ model_path = hubert_path
+ print("load model(s) from {}".format(model_path))
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+ [model_path],
+ suffix="",
+ )
+ self.model = models[0]
+ self.model = self.model.to(device)
+ if Config.is_half:
+ self.model = self.model.half()
+ else:
+ self.model = self.model.float()
+ self.model.eval()
+ cpt = torch.load(pth_path, map_location="cpu")
+ self.tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
+ self.if_f0 = cpt.get("f0", 1)
+ self.version = cpt.get("version", "v1")
+ if self.version == "v1":
+ if self.if_f0 == 1:
+ self.net_g = SynthesizerTrnMs256NSFsid(
+ *cpt["config"], is_half=Config.is_half
+ )
+ else:
+ self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif self.version == "v2":
+ if self.if_f0 == 1:
+ self.net_g = SynthesizerTrnMs768NSFsid(
+ *cpt["config"], is_half=Config.is_half
+ )
+ else:
+ self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del self.net_g.enc_q
+ print(self.net_g.load_state_dict(cpt["weight"], strict=False))
+ self.net_g.eval().to(device)
+ if Config.is_half:
+ self.net_g = self.net_g.half()
+ else:
+ self.net_g = self.net_g.float()
+ except:
+ print(traceback.format_exc())
+
+ def get_f0(self, x, f0_up_key, inp_f0=None):
+ x_pad = 1
+ f0_min = 50
+ f0_max = 1100
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+ f0, t = pyworld.harvest(
+ x.astype(np.double),
+ fs=self.sr,
+ f0_ceil=f0_max,
+ f0_floor=f0_min,
+ frame_period=10,
+ )
+ f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
+ f0 = signal.medfilt(f0, 3)
+ f0 *= pow(2, f0_up_key / 12)
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
+ tf0 = self.sr // self.window # 每秒f0点数
+ if inp_f0 is not None:
+ delta_t = np.round(
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
+ ).astype("int16")
+ replace_f0 = np.interp(
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
+ )
+ shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
+ f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
+ f0bak = f0.copy()
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ f0_coarse = np.rint(f0_mel).astype(np.int)
+ return f0_coarse, f0bak # 1-0
+
+ def infer(self, feats: torch.Tensor) -> np.ndarray:
+ """
+ 推理函数
+ """
+ audio = feats.clone().cpu().numpy()
+ assert feats.dim() == 1, feats.dim()
+ feats = feats.view(1, -1)
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
+ if Config.is_half:
+ feats = feats.half()
+ else:
+ feats = feats.float()
+ inputs = {
+ "source": feats.to(device),
+ "padding_mask": padding_mask.to(device),
+ "output_layer": 9 if self.version == "v1" else 12,
+ }
+ torch.cuda.synchronize()
+ with torch.no_grad():
+ logits = self.model.extract_features(**inputs)
+ feats = (
+ self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
+ )
+
+ ####索引优化
+ try:
+ if (
+ hasattr(self, "index")
+ and hasattr(self, "big_npy")
+ and self.index_rate != 0
+ ):
+ npy = feats[0].cpu().numpy().astype("float32")
+ score, ix = self.index.search(npy, k=8)
+ weight = np.square(1 / score)
+ weight /= weight.sum(axis=1, keepdims=True)
+ npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
+ if Config.is_half:
+ npy = npy.astype("float16")
+ feats = (
+ torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate
+ + (1 - self.index_rate) * feats
+ )
+ else:
+ print("index search FAIL or disabled")
+ except:
+ traceback.print_exc()
+ print("index search FAIL")
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
+ torch.cuda.synchronize()
+ print(feats.shape)
+ if self.if_f0 == 1:
+ pitch, pitchf = self.get_f0(audio, self.f0_up_key)
+ p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存
+ else:
+ pitch, pitchf = None, None
+ p_len = min(feats.shape[1], 13000) # 太大了爆显存
+ torch.cuda.synchronize()
+ # print(feats.shape,pitch.shape)
+ feats = feats[:, :p_len, :]
+ if self.if_f0 == 1:
+ pitch = pitch[:p_len]
+ pitchf = pitchf[:p_len]
+ pitch = torch.LongTensor(pitch).unsqueeze(0).to(device)
+ pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device)
+ p_len = torch.LongTensor([p_len]).to(device)
+ ii = 0 # sid
+ sid = torch.LongTensor([ii]).to(device)
+ with torch.no_grad():
+ if self.if_f0 == 1:
+ infered_audio = (
+ self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
+ .data.cpu()
+ .float()
+ )
+ else:
+ infered_audio = (
+ self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float()
+ )
+ torch.cuda.synchronize()
+ return infered_audio
+
+
+class GUIConfig:
+ def __init__(self) -> None:
+ self.hubert_path: str = ""
+ self.pth_path: str = ""
+ self.index_path: str = ""
+ self.npy_path: str = ""
+ self.pitch: int = 12
+ self.samplerate: int = 44100
+ self.block_time: float = 1.0 # s
+ self.buffer_num: int = 1
+ self.threhold: int = -30
+ self.crossfade_time: float = 0.08
+ self.extra_time: float = 0.04
+ self.I_noise_reduce = False
+ self.O_noise_reduce = False
+ self.index_rate = 0.3
+
+
+class GUI:
+ def __init__(self) -> None:
+ self.config = GUIConfig()
+ self.flag_vc = False
+
+ self.launcher()
+
+ def load(self):
+ (
+ input_devices,
+ output_devices,
+ input_devices_indices,
+ output_devices_indices,
+ ) = self.get_devices()
+ try:
+ with open("values1.json", "r") as j:
+ data = json.load(j)
+ except:
+ with open("values1.json", "w") as j:
+ data = {
+ "pth_path": "",
+ "index_path": "",
+ "sg_input_device": input_devices[
+ input_devices_indices.index(sd.default.device[0])
+ ],
+ "sg_output_device": output_devices[
+ output_devices_indices.index(sd.default.device[1])
+ ],
+ "threhold": "-45",
+ "pitch": "0",
+ "index_rate": "0",
+ "block_time": "1",
+ "crossfade_length": "0.04",
+ "extra_time": "1",
+ }
+ return data
+
+ def launcher(self):
+ data = self.load()
+ sg.theme("LightBlue3")
+ input_devices, output_devices, _, _ = self.get_devices()
+ layout = [
+ [
+ sg.Frame(
+ title=i18n("Load model"),
+ layout=[
+ [
+ sg.Input(
+ default_text="hubert_base.pt",
+ key="hubert_path",
+ disabled=True,
+ ),
+ sg.FileBrowse(
+ i18n("Hubert Model"),
+ initial_folder=os.path.join(os.getcwd()),
+ file_types=(("pt files", "*.pt"),),
+ ),
+ ],
+ [
+ sg.Input(
+ default_text=data.get("pth_path", ""),
+ key="pth_path",
+ ),
+ sg.FileBrowse(
+ i18n("Select the .pth file"),
+ initial_folder=os.path.join(os.getcwd(), "weights"),
+ file_types=(("weight files", "*.pth"),),
+ ),
+ ],
+ [
+ sg.Input(
+ default_text=data.get("index_path", ""),
+ key="index_path",
+ ),
+ sg.FileBrowse(
+ i18n("Select the .index file"),
+ initial_folder=os.path.join(os.getcwd(), "logs"),
+ file_types=(("index files", "*.index"),),
+ ),
+ ],
+ [
+ sg.Input(
+ default_text="你不需要填写这个You don't need write this.",
+ key="npy_path",
+ disabled=True,
+ ),
+ sg.FileBrowse(
+ i18n("Select the .npy file"),
+ initial_folder=os.path.join(os.getcwd(), "logs"),
+ file_types=(("feature files", "*.npy"),),
+ ),
+ ],
+ ],
+ )
+ ],
+ [
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Input device")),
+ sg.Combo(
+ input_devices,
+ key="sg_input_device",
+ default_value=data.get("sg_input_device", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Output device")),
+ sg.Combo(
+ output_devices,
+ key="sg_output_device",
+ default_value=data.get("sg_output_device", ""),
+ ),
+ ],
+ ],
+ title=i18n("Audio device (please use the same type of driver)"),
+ )
+ ],
+ [
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Response threshold")),
+ sg.Slider(
+ range=(-60, 0),
+ key="threhold",
+ resolution=1,
+ orientation="h",
+ default_value=data.get("threhold", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Pitch settings")),
+ sg.Slider(
+ range=(-24, 24),
+ key="pitch",
+ resolution=1,
+ orientation="h",
+ default_value=data.get("pitch", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Index Rate")),
+ sg.Slider(
+ range=(0.0, 1.0),
+ key="index_rate",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("index_rate", ""),
+ ),
+ ],
+ ],
+ title=i18n("General settings"),
+ ),
+ sg.Frame(
+ layout=[
+ [
+ sg.Text(i18n("Sample length")),
+ sg.Slider(
+ range=(0.1, 3.0),
+ key="block_time",
+ resolution=0.1,
+ orientation="h",
+ default_value=data.get("block_time", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Fade length")),
+ sg.Slider(
+ range=(0.01, 0.15),
+ key="crossfade_length",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("crossfade_length", ""),
+ ),
+ ],
+ [
+ sg.Text(i18n("Extra推理时长")),
+ sg.Slider(
+ range=(0.05, 3.00),
+ key="extra_time",
+ resolution=0.01,
+ orientation="h",
+ default_value=data.get("extra_time", ""),
+ ),
+ ],
+ [
+ sg.Checkbox(i18n("Input noise reduction"), key="I_noise_reduce"),
+ sg.Checkbox(i18n("Output noise reduction"), key="O_noise_reduce"),
+ ],
+ ],
+ title=i18n("Performance settings"),
+ ),
+ ],
+ [
+ sg.Button(i18n("开始音频Convert"), key="start_vc"),
+ sg.Button(i18n("停止音频Convert"), key="stop_vc"),
+ sg.Text(i18n("Inference time (ms):")),
+ sg.Text("0", key="infer_time"),
+ ],
+ ]
+ self.window = sg.Window("RVC - GUI", layout=layout)
+ self.event_handler()
+
+ def event_handler(self):
+ while True:
+ event, values = self.window.read()
+ if event == sg.WINDOW_CLOSED:
+ self.flag_vc = False
+ exit()
+ if event == "start_vc" and self.flag_vc == False:
+ if self.set_values(values) == True:
+ print("using_cuda:" + str(torch.cuda.is_available()))
+ self.start_vc()
+ settings = {
+ "pth_path": values["pth_path"],
+ "index_path": values["index_path"],
+ "sg_input_device": values["sg_input_device"],
+ "sg_output_device": values["sg_output_device"],
+ "threhold": values["threhold"],
+ "pitch": values["pitch"],
+ "index_rate": values["index_rate"],
+ "block_time": values["block_time"],
+ "crossfade_length": values["crossfade_length"],
+ "extra_time": values["extra_time"],
+ }
+ with open("values1.json", "w") as j:
+ json.dump(settings, j)
+ if event == "stop_vc" and self.flag_vc == True:
+ self.flag_vc = False
+
+ def set_values(self, values):
+ if len(values["pth_path"].strip()) == 0:
+ sg.popup(i18n("Select the pth file"))
+ return False
+ if len(values["index_path"].strip()) == 0:
+ sg.popup(i18n("Select the index file"))
+ return False
+ pattern = re.compile("[^\x00-\x7F]+")
+ if pattern.findall(values["hubert_path"]):
+ sg.popup(i18n("The hubert model path must not contain Chinese characters"))
+ return False
+ if pattern.findall(values["pth_path"]):
+ sg.popup(i18n("The pth file path must not contain Chinese characters."))
+ return False
+ if pattern.findall(values["index_path"]):
+ sg.popup(i18n("The index file path must not contain Chinese characters."))
+ return False
+ self.set_devices(values["sg_input_device"], values["sg_output_device"])
+ self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt")
+ self.config.pth_path = values["pth_path"]
+ self.config.index_path = values["index_path"]
+ self.config.npy_path = values["npy_path"]
+ self.config.threhold = values["threhold"]
+ self.config.pitch = values["pitch"]
+ self.config.block_time = values["block_time"]
+ self.config.crossfade_time = values["crossfade_length"]
+ self.config.extra_time = values["extra_time"]
+ self.config.I_noise_reduce = values["I_noise_reduce"]
+ self.config.O_noise_reduce = values["O_noise_reduce"]
+ self.config.index_rate = values["index_rate"]
+ return True
+
+ def start_vc(self):
+ torch.cuda.empty_cache()
+ self.flag_vc = True
+ self.block_frame = int(self.config.block_time * self.config.samplerate)
+ self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate)
+ self.sola_search_frame = int(0.012 * self.config.samplerate)
+ self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s
+ self.extra_frame = int(self.config.extra_time * self.config.samplerate)
+ self.rvc = None
+ self.rvc = RVC(
+ self.config.pitch,
+ self.config.hubert_path,
+ self.config.pth_path,
+ self.config.index_path,
+ self.config.npy_path,
+ self.config.index_rate,
+ )
+ self.input_wav: np.ndarray = np.zeros(
+ self.extra_frame
+ + self.crossfade_frame
+ + self.sola_search_frame
+ + self.block_frame,
+ dtype="float32",
+ )
+ self.output_wav: torch.Tensor = torch.zeros(
+ self.block_frame, device=device, dtype=torch.float32
+ )
+ self.sola_buffer: torch.Tensor = torch.zeros(
+ self.crossfade_frame, device=device, dtype=torch.float32
+ )
+ self.fade_in_window: torch.Tensor = torch.linspace(
+ 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32
+ )
+ self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
+ self.resampler1 = tat.Resample(
+ orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
+ )
+ self.resampler2 = tat.Resample(
+ orig_freq=self.rvc.tgt_sr,
+ new_freq=self.config.samplerate,
+ dtype=torch.float32,
+ )
+ thread_vc = threading.Thread(target=self.soundinput)
+ thread_vc.start()
+
+ def soundinput(self):
+ """
+ 接受音频输入
+ """
+ with sd.Stream(
+ channels=2,
+ callback=self.audio_callback,
+ blocksize=self.block_frame,
+ samplerate=self.config.samplerate,
+ dtype="float32",
+ ):
+ while self.flag_vc:
+ time.sleep(self.config.block_time)
+ print("Audio block passed.")
+ print("ENDing VC")
+
+ def audio_callback(
+ self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
+ ):
+ """
+ 音频处理
+ """
+ start_time = time.perf_counter()
+ indata = librosa.to_mono(indata.T)
+ if self.config.I_noise_reduce:
+ indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate)
+
+ """noise gate"""
+ frame_length = 2048
+ hop_length = 1024
+ rms = librosa.feature.rms(
+ y=indata, frame_length=frame_length, hop_length=hop_length
+ )
+ db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
+ # print(rms.shape,db.shape,db)
+ for i in range(db_threhold.shape[0]):
+ if db_threhold[i]:
+ indata[i * hop_length : (i + 1) * hop_length] = 0
+ self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata)
+
+ # infer
+ print("input_wav:" + str(self.input_wav.shape))
+ # print('infered_wav:'+str(infer_wav.shape))
+ infer_wav: torch.Tensor = self.resampler2(
+ self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav)))
+ )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to(
+ device
+ )
+ print("infer_wav:" + str(infer_wav.shape))
+
+ # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
+ cor_nom = F.conv1d(
+ infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame],
+ self.sola_buffer[None, None, :],
+ )
+ cor_den = torch.sqrt(
+ F.conv1d(
+ infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
+ ** 2,
+ torch.ones(1, 1, self.crossfade_frame, device=device),
+ )
+ + 1e-8
+ )
+ sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
+ print("sola offset: " + str(int(sola_offset)))
+
+ # crossfade
+ self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame]
+ self.output_wav[: self.crossfade_frame] *= self.fade_in_window
+ self.output_wav[: self.crossfade_frame] += self.sola_buffer[:]
+ if sola_offset < self.sola_search_frame:
+ self.sola_buffer[:] = (
+ infer_wav[
+ -self.sola_search_frame
+ - self.crossfade_frame
+ + sola_offset : -self.sola_search_frame
+ + sola_offset
+ ]
+ * self.fade_out_window
+ )
+ else:
+ self.sola_buffer[:] = (
+ infer_wav[-self.crossfade_frame :] * self.fade_out_window
+ )
+
+ if self.config.O_noise_reduce:
+ outdata[:] = np.tile(
+ nr.reduce_noise(
+ y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate
+ ),
+ (2, 1),
+ ).T
+ else:
+ outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy()
+ total_time = time.perf_counter() - start_time
+ self.window["infer_time"].update(int(total_time * 1000))
+ print("infer time:" + str(total_time))
+
+ def get_devices(self, update: bool = True):
+ """获取设备列表"""
+ if update:
+ sd._terminate()
+ sd._initialize()
+ devices = sd.query_devices()
+ hostapis = sd.query_hostapis()
+ for hostapi in hostapis:
+ for device_idx in hostapi["devices"]:
+ devices[device_idx]["hostapi_name"] = hostapi["name"]
+ input_devices = [
+ f"{d['name']} ({d['hostapi_name']})"
+ for d in devices
+ if d["max_input_channels"] > 0
+ ]
+ output_devices = [
+ f"{d['name']} ({d['hostapi_name']})"
+ for d in devices
+ if d["max_output_channels"] > 0
+ ]
+ input_devices_indices = [
+ d["index"] if "index" in d else d["name"]
+ for d in devices
+ if d["max_input_channels"] > 0
+ ]
+ output_devices_indices = [
+ d["index"] if "index" in d else d["name"]
+ for d in devices
+ if d["max_output_channels"] > 0
+ ]
+ return (
+ input_devices,
+ output_devices,
+ input_devices_indices,
+ output_devices_indices,
+ )
+
+ def set_devices(self, input_device, output_device):
+ """设置输出设备"""
+ (
+ input_devices,
+ output_devices,
+ input_device_indices,
+ output_device_indices,
+ ) = self.get_devices()
+ sd.default.device[0] = input_device_indices[input_devices.index(input_device)]
+ sd.default.device[1] = output_device_indices[
+ output_devices.index(output_device)
+ ]
+ print("input device:" + str(sd.default.device[0]) + ":" + str(input_device))
+ print("output device:" + str(sd.default.device[1]) + ":" + str(output_device))
+
+
+gui = GUI()
diff --git a/i18n.py b/i18n.py
new file mode 100644
index 000000000..4e3536bb5
--- /dev/null
+++ b/i18n.py
@@ -0,0 +1,43 @@
+import json
+
+def load_language_list(language):
+ try:
+ with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f:
+ return json.load(f)
+ except FileNotFoundError:
+ raise FileNotFoundError(
+ f"Failed to load language file for {language}. Check if the correct .json file exists."
+ )
+
+
+class I18nAuto:
+ """
+ A class used for internationalization using JSON language files.
+
+ Examples
+ --------
+ >>> i18n = I18nAuto('en_US')
+ >>> i18n.print()
+ Using Language: en_US
+ """
+ def __init__(self, language=None):
+ from locale import getdefaultlocale
+ language = language or getdefaultlocale()[0]
+ if not self._language_exists(language):
+ language = "en_US"
+
+ self.language_map = load_language_list(language)
+ self.language = language
+
+ @staticmethod
+ def _language_exists(language):
+ from os.path import exists
+ return exists(f"./i18n/{language}.json")
+
+ def __call__(self, key):
+ """Returns the translation of the given key if it exists, else returns the key itself."""
+ return self.language_map.get(key, key)
+
+ def print(self):
+ """Prints the language currently in use."""
+ print(f"Using Language: {self.language}")
\ No newline at end of file
diff --git a/i18n/ar_AR.json b/i18n/ar_AR.json
new file mode 100644
index 000000000..b1b1c05dd
--- /dev/null
+++ b/i18n/ar_AR.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "لسوء الحظ، لا تتوفر وحدة معالجة رسومات متوافقة لدعم تدريبك.",
+ "Yes": "نعم",
+ "Select your dataset.": "حدد مجموعة البيانات الخاصة بك.",
+ "Update list.": "قائمة التحديث.",
+ "Download Model": "تحميل الموديل",
+ "Download Backup": "تحميل النسخ الاحتياطي",
+ "Download Dataset": "تحميل مجموعة البيانات",
+ "Download": "تحميل",
+ "Url:": "عنوان URL:",
+ "Build the index before saving.": "قم ببناء الفهرس قبل الحفظ.",
+ "Save your model once the training ends.": "احفظ النموذج الخاص بك بمجرد انتهاء التدريب.",
+ "Save type": "حفظ النوع",
+ "Save model": "حفظ النموذج",
+ "Choose the method": "اختر الطريقة",
+ "Save all": "احفظ الكل",
+ "Save D and G": "احفظ D وG",
+ "Save voice": "حفظ الصوت",
+ "Downloading the file: ": "تنزيل الملف:",
+ "Stop training": "توقف عن التدريب",
+ "Too many users have recently viewed or downloaded this file": "لقد قام عدد كبير جدًا من المستخدمين مؤخرًا بعرض هذا الملف أو تنزيله",
+ "Cannot get file from this private link": "لا يمكن الحصول على الملف من هذا الرابط الخاص",
+ "Full download": "تحميل كامل",
+ "An error occurred downloading": "حدث خطأ أثناء التنزيل",
+ "Model saved successfully": "تم حفظ النموذج بنجاح",
+ "Saving the model...": "جارٍ حفظ النموذج...",
+ "Saved without index...": "تم الحفظ بدون فهرس...",
+ "model_name": "اسم النموذج",
+ "Saved without inference model...": "تم الحفظ بدون نموذج الاستدلال...",
+ "An error occurred saving the model": "حدث خطأ أثناء حفظ النموذج",
+ "The model you want to save does not exist, be sure to enter the correct name.": "النموذج الذي تريد حفظه غير موجود، تأكد من إدخال الاسم الصحيح.",
+ "The file could not be downloaded.": "لا يمكن تحميل الملف.",
+ "Unzip error.": "خطأ في فك الضغط.",
+ "Path to your added.index file (if it didn't automatically find it)": "المسار إلى ملف add.index (إذا لم يتم العثور عليه تلقائيًا)",
+ "It has been downloaded successfully.": "لقد تم تحميله بنجاح.",
+ "Proceeding with the extraction...": "المضي قدما في عملية الاستخراج...",
+ "The Backup has been uploaded successfully.": "تم تحميل النسخة الاحتياطية بنجاح.",
+ "The Dataset has been loaded successfully.": "تم تحميل مجموعة البيانات بنجاح.",
+ "The Model has been loaded successfully.": "تم تحميل النموذج بنجاح.",
+ "It is used to download your inference models.": "يتم استخدامه لتنزيل نماذج الاستدلال الخاصة بك.",
+ "It is used to download your training backups.": "يتم استخدامه لتنزيل النسخ الاحتياطية للتدريب.",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "قم بتنزيل مجموعة البيانات مع التسجيلات الصوتية بتنسيق متوافق (.wav/.flac) لتدريب النموذج الخاص بك.",
+ "No relevant file was found to upload.": "لم يتم العثور على ملف ذي صلة للتحميل.",
+ "The model works for inference, and has the .index file.": "يعمل النموذج من أجل الاستدلال، ويحتوي على ملف .index.",
+ "The model works for inference, but it doesn't have the .index file.": "يعمل النموذج من أجل الاستدلال، لكنه لا يحتوي على ملف .index.",
+ "This may take a few minutes, please wait...": "قد يستغرق ذلك بضع دقائق، يرجى الانتظار...",
+ "Resources": "موارد",
+ "Step 1: Processing data": "الخطوة 1: معالجة البيانات",
+ "Step 2a: Skipping pitch extraction": "الخطوة 2 أ: تخطي استخراج الملعب",
+ "Step 2b: Extracting features": "الخطوة 2 ب: استخراج الميزات",
+ "Step 3a: Model training started": "الخطوة 3 أ: بدأ التدريب النموذجي",
+ "Step 4: Export lowest points on a graph of the model": "الخطوة 4: تصدير أدنى النقاط على الرسم البياني للنموذج",
+ "Training is done, check train.log": "تم الانتهاء من التدريب، قم بزيارة Train.log",
+ "All processes have been completed!": "تم الانتهاء من جميع العمليات!",
+ "Model Inference": "الاستدلال النموذجي",
+ "Inferencing voice:": "الاستدلال الصوتي:",
+ "Model_Name": "اسم النموذج",
+ "Dataset_Name": "اسم مجموعة البيانات",
+ "Whether the model has pitch guidance.": "ما إذا كان النموذج يحتوي على توجيهات في الملعب.",
+ "Whether to save only the latest .ckpt file to save hard drive space": "ما إذا كان سيتم حفظ أحدث ملف .ckpt فقط لتوفير مساحة على القرص الصلب",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "قم بتخزين جميع مجموعات التدريب مؤقتًا في ذاكرة GPU. يمكن أن يؤدي تخزين مجموعات البيانات الصغيرة مؤقتًا (أقل من 10 دقائق) إلى تسريع عملية التدريب",
+ "Save a small final model to the 'weights' folder at each save point": "احفظ نموذجًا نهائيًا صغيرًا في مجلد \"الأوزان\" عند كل نقطة حفظ",
+ "Refresh voice list, index path and audio files": "تحديث قائمة الصوت ومسار الفهرس والملفات الصوتية",
+ "Unload voice to save GPU memory:": "قم بإلغاء تحميل الصوت لحفظ ذاكرة GPU:",
+ "Select Speaker/Singer ID:": "حدد معرف المتحدث/المغني:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "يوصى باستخدام مفتاح +12 للتحويل من ذكر إلى أنثى، ومفتاح -12 للتحويل من أنثى إلى ذكر. إذا تجاوز نطاق الصوت كثيرًا وكان الصوت مشوهًا، فيمكنك أيضًا ضبطه على النطاق المناسب بنفسك.",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "تبديل الموضع (عدد صحيح، عدد نصف النغمات، رفع بمقدار أوكتاف: 12، خفض بمقدار أوكتاف: -12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "أدخل مسار الملف الصوتي المراد معالجته (الافتراضي هو مثال التنسيق الصحيح):",
+ "Select the pitch extraction algorithm:": "حدد خوارزمية استخراج الملعب:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "طول قفزة المانجيو-كريب (ينطبق فقط على كريب المانجيو): يشير طول القفزة إلى الوقت الذي يستغرقه المتحدث للانتقال إلى طبقة الصوت الدرامية. يستغرق استنتاج أطوال القفزات المنخفضة وقتًا أطول ولكنها أكثر دقة في درجة الصوت.",
+ "Feature search dataset file path": "مسار ملف مجموعة بيانات البحث عن المعالم",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "إذا كان > = 3: قم بتطبيق التصفية المتوسطة على نتائج العرض التقديمي المحصودة. تمثل القيمة نصف قطر المرشح ويمكن أن تقلل من التنفس.",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "المسار إلى ملف فهرس الميزات. اتركه فارغًا لاستخدام النتيجة المحددة من القائمة المنسدلة:",
+ "Auto-detect index path and select from the dropdown": "الكشف التلقائي عن مسار الفهرس والاختيار من القائمة المنسدلة",
+ "Path to feature file:": "المسار إلى ملف الميزة:",
+ "Search feature ratio:": "نسبة ميزة البحث:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "قم بإعادة تشكيل الصوت الناتج في مرحلة ما بعد المعالجة إلى معدل العينة النهائي. اضبط على 0 لعدم إعادة التشكيل:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "استخدم مظروف حجم الإدخال لاستبدال أو مزج مظروف حجم الإخراج. كلما اقتربت النسبة من 1، زاد استخدام مظروف الإخراج:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "قم بحماية الحروف الساكنة وأصوات التنفس التي لا صوت لها لمنع المؤثرات مثل تمزيق الموسيقى الإلكترونية. اضبط على 0.5 للتعطيل. قم بتقليل القيمة لزيادة الحماية، ولكنه قد يقلل من دقة الفهرسة:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "ملف منحنى F0 (اختياري). خطوة واحدة لكل سطر. يستبدل الإعداد الافتراضي F0 وتعديل درجة الصوت:",
+ "Convert": "يتحول",
+ "Output information": "معلومات الإخراج",
+ "Export audio (click on the three dots in the lower right corner to download)": "تصدير الصوت (انقر على النقاط الثلاث في الزاوية اليمنى السفلية للتنزيل)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "تحويل دفعة. أدخل المجلد الذي يحتوي على الملفات الصوتية المراد تحويلها أو قم بتحميل ملفات صوتية متعددة. سيتم إخراج الصوت المحول في المجلد المحدد (الافتراضي: \"اختياري\").",
+ "Specify output folder:": "تحديد مجلد الإخراج:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "أدخل مسار مجلد الصوت المراد معالجته (انسخه من شريط العناوين الخاص بمدير الملفات):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "يمكنك أيضًا إدخال الملفات الصوتية على دفعات. اختر أحد الخيارين. تعطى الأولوية للقراءة من المجلد.",
+ "Export file format": "تصدير تنسيق الملف",
+ "UVR5": "الأشعة فوق البنفسجية5",
+ "Enter the path of the audio folder to be processed:": "أدخل مسار مجلد الصوت المراد معالجته:",
+ "Model": "نموذج",
+ "Vocal Extraction Aggressive": "استخراج الصوتية العدوانية",
+ "Specify the output folder for vocals:": "حدد مجلد الإخراج للغناء:",
+ "Specify the output folder for accompaniment:": "حدد مجلد الإخراج للمرافقة:",
+ "Train": "يدرب",
+ "Enter the model name:": "أدخل اسم النموذج:",
+ "Target sample rate:": "معدل العينة المستهدف:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "ما إذا كان النموذج يحتوي على إرشادات لطبقة الصوت (مطلوبة للغناء، واختيارية للكلام):",
+ "Version": "إصدار",
+ "Number of CPU processes used for pitch extraction and data processing:": "عدد عمليات وحدة المعالجة المركزية المستخدمة لاستخراج الملعب ومعالجة البيانات:",
+ "Enter the path of the training folder:": "أدخل مسار مجلد التدريب:",
+ "Please specify the model ID:": "يرجى تحديد معرف النموذج:",
+ "Auto detect audio path and select from the dropdown:": "الكشف التلقائي عن مسار الصوت والاختيار من القائمة المنسدلة:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "أضف اسم الصوت إلى المسار إلى الملف الصوتي المراد معالجته (الافتراضي هو مثال التنسيق الصحيح) قم بإزالة المسار لاستخدام الصوت من القائمة المنسدلة:",
+ "Advanced Settings": "إعدادات متقدمة",
+ "Settings": "إعدادات",
+ "Status": "حالة",
+ "Process data": "معالجة البيانات",
+ "Drag your audio here and hit the refresh button": "اسحب الصوت الخاص بك هنا واضغط على زر التحديث",
+ "Or record an audio.": "أو تسجيل الصوت.",
+ "Formant shift inference audio": "تحويل صيغة الاستدلال الصوتي",
+ "Used for male to female and vice-versa conversions": "يستخدم للتحويل من ذكر إلى أنثى والعكس",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "يرجى تقديم فهرس (فهارس) وحدة معالجة الرسومات مفصولة بـ \"-\"، مثل 0-1-2 لاستخدام وحدات معالجة الرسومات 0 و1 و2:",
+ "GPU Information": "معلومات وحدة معالجة الرسومات",
+ "Feature extraction": "ميزة استخراج",
+ "Save frequency:": "حفظ التردد:",
+ "Training epochs:": "فترات التدريب:",
+ "Batch size per GPU:": "حجم الدفعة لكل GPU:",
+ "Save only the latest '.ckpt' file to save disk space:": "احفظ فقط أحدث ملف '.ckpt' لتوفير مساحة على القرص:",
+ "No": "لا",
+ "Save a small final model to the 'weights' folder at each save point:": "احفظ نموذجًا نهائيًا صغيرًا في مجلد \"الأوزان\" عند كل نقطة حفظ:",
+ "Load pre-trained base model G path:": "تحميل مسار G للنموذج الأساسي المُدرب مسبقًا:",
+ "Load pre-trained base model D path:": "تحميل المسار D للنموذج الأساسي المُدرب مسبقًا:",
+ "Train model": "نموذج القطار",
+ "Train feature index": "مؤشر ميزة القطار",
+ "One-click training": "التدريب بنقرة واحدة",
+ "Processing": "يعالج",
+ "Model fusion, can be used to test timbre fusion": "يمكن استخدام نموذج الاندماج لاختبار دمج الجرس",
+ "Path to Model A:": "المسار إلى النموذج أ:",
+ "Path to Model B:": "المسار إلى النموذج ب:",
+ "Weight for Model A:": "الوزن للنموذج أ:",
+ "Whether the model has pitch guidance:": "ما إذا كان النموذج يحتوي على توجيه الملعب:",
+ "Model information to be placed:": "معلومات النموذج المراد وضعها:",
+ "Saved model name (without extension):": "اسم النموذج المحفوظ (بدون الامتداد):",
+ "Model architecture version:": "نسخة البنية النموذجية:",
+ "Fusion": "انصهار",
+ "Modify model information": "تعديل معلومات النموذج",
+ "Path to Model:": "المسار إلى النموذج:",
+ "Model information to be modified:": "معلومات النموذج المراد تعديلها:",
+ "Save file name:": "حفظ اسم الملف:",
+ "Modify": "يُعدِّل",
+ "View model information": "عرض معلومات النموذج",
+ "View": "منظر",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "استخراج النموذج (أدخل مسار نموذج الملف الكبير ضمن مجلد \"السجلات\"). يعد هذا مفيدًا إذا كنت تريد إيقاف التدريب في منتصف الطريق واستخراج ملف نموذج صغير وحفظه يدويًا، أو إذا كنت تريد اختبار نموذج متوسط:",
+ "Save name:": "حفظ الاسم:",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "ما إذا كان النموذج يحتوي على توجيه درجة الصوت (1: نعم، 0: لا):",
+ "Extract": "يستخرج",
+ "Export Onnx": "تصدير اونكس",
+ "RVC Model Path:": "مسار نموذج RVC:",
+ "Onnx Export Path:": "مسار تصدير Onnx:",
+ "MoeVS Model": "نموذج MoVS",
+ "Export Onnx Model": "تصدير نموذج Onnx",
+ "Load model": "نموذج التحميل",
+ "Hubert Model": "نموذج هيوبرت",
+ "Select the .pth file": "حدد ملف .pth",
+ "Select the .index file": "حدد ملف الفهرس",
+ "Select the .npy file": "حدد ملف .npy",
+ "Input device": "جهاز الإدخال",
+ "Output device": "جهاز إخراج",
+ "Audio device (please use the same type of driver)": "جهاز الصوت (يرجى استخدام نفس نوع برنامج التشغيل)",
+ "Response threshold": "عتبة الاستجابة",
+ "Pitch settings": "إعدادات الملعب",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "ما إذا كان سيتم استخدام أسماء الملاحظات بدلاً من قيمتها بالهيرتز. على سبيل المثال. [C5، D6] بدلاً من [523.25، 1174.66] هرتز",
+ "Index Rate": "معدل المؤشر",
+ "General settings": "الاعدادات العامة",
+ "Sample length": "طول العينة",
+ "Fade length": "طول التلاشي",
+ "Extra inference time": "وقت الاستدلال الإضافي",
+ "Input noise reduction": "تقليل ضوضاء الإدخال",
+ "Output noise reduction": "الحد من الضوضاء الناتج",
+ "Performance settings": "إعدادات الأداء",
+ "Start audio conversion": "ابدأ تحويل الصوت",
+ "Stop audio conversion": "إيقاف تحويل الصوت",
+ "Inference time (ms):": "وقت الاستدلال (مللي ثانية):",
+ "Select the pth file": "حدد ملف pth",
+ "Select the index file": "حدد ملف الفهرس",
+ "The hubert model path must not contain Chinese characters": "يجب ألا يحتوي مسار نموذج Hubert على أحرف صينية",
+ "The pth file path must not contain Chinese characters.": "يجب ألا يحتوي مسار الملف pth على أحرف صينية.",
+ "The index file path must not contain Chinese characters.": "يجب ألا يحتوي مسار ملف الفهرس على أحرف صينية.",
+ "Step algorithm": "خوارزمية الخطوة",
+ "Number of epoch processes": "عدد عمليات العصر",
+ "Lowest points export": "أدنى نقاط التصدير",
+ "How many lowest points to save": "كم عدد أدنى النقاط للحفظ",
+ "Export lowest points of a model": "تصدير أدنى نقاط النموذج",
+ "Output models": "نماذج الإخراج",
+ "Stats of selected models": "إحصائيات النماذج المختارة",
+ "Custom f0 [Root pitch] File": "ملف f0 مخصص [درجة الجذر]",
+ "Min pitch": "الملعب دقيقة",
+ "Specify minimal pitch for inference [HZ]": "تحديد الحد الأدنى من درجة الاستدلال [HZ]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "تحديد الحد الأدنى من درجة الصوت للاستدلال [ملاحظة] [أوكتاف]",
+ "Max pitch": "ماكس الملعب",
+ "Specify max pitch for inference [HZ]": "تحديد أقصى درجة للاستدلال [HZ]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "تحديد أقصى درجة للاستدلال [ملاحظة] [أوكتاف]",
+ "Browse presets for formanting": "تصفح الإعدادات المسبقة للتشكيل",
+ "Presets are located in formantshiftcfg/ folder": "توجد الإعدادات المسبقة في المجلدformantshiftcfg/",
+ "Default value is 1.0": "القيمة الافتراضية هي 1.0",
+ "Quefrency for formant shifting": "التردد لتحويل الصياغة",
+ "Timbre for formant shifting": "Timbre لتحويل الصياغة",
+ "Apply": "يتقدم",
+ "Single": "أعزب",
+ "Batch": "حزمة",
+ "Separate YouTube tracks": "مسارات يوتيوب منفصلة",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "قم بتنزيل الصوت من مقطع فيديو على YouTube وفصل المسارات الصوتية والمسارات الآلية تلقائيًا",
+ "Extra": "إضافي",
+ "Merge": "دمج",
+ "Merge your generated audios with the instrumental": "دمج الصوتيات التي تم إنشاؤها مع الآلات الموسيقية",
+ "Choose your instrumental": "اختر آلتك الموسيقية",
+ "Choose the generated audio": "اختر الصوت الذي تم إنشاؤه",
+ "Combine": "يجمع",
+ "Download and Separate": "تحميل وفصل",
+ "Enter the youtube link": "أدخل رابط اليوتيوب",
+ "This section contains some extra utilities that often may be in experimental phases": "يحتوي هذا القسم على بعض الأدوات المساعدة الإضافية التي غالبًا ما تكون في مراحل تجريبية",
+ "Merge Audios": "دمج صوتيات",
+ "Audio files have been moved to the 'audios' folder.": "تم نقل الملفات الصوتية إلى مجلد \"التسجيلات الصوتية\".",
+ "Downloading audio from the video...": "تحميل الصوت من الفيديو...",
+ "Audio downloaded!": "تحميل الصوت!",
+ "An error occurred:": "حدث خطأ:",
+ "Separating audio...": "فصل الصوت...",
+ "File moved successfully.": "تم نقل الملف بنجاح.",
+ "Finished!": "انتهى!",
+ "The source file does not exist.": "الملف المصدر غير موجود.",
+ "Error moving the file:": "خطأ في نقل الملف:",
+ "Downloading {name} from drive": "تنزيل {name} من محرك الأقراص",
+ "The attempt to download using Drive didn't work": "لم تنجح محاولة التنزيل باستخدام Drive",
+ "Error downloading the file: {str(e)}": "حدث خطأ أثناء تنزيل الملف: {str(e)}",
+ "Downloading {name} from mega": "تنزيل {name} من ميجا",
+ "Downloading {name} from basic url": "تنزيل {name} من عنوان url الأساسي",
+ "Download Audio": "تحميل الصوت",
+ "Download audios of any format for use in inference (recommended for mobile users)": "تنزيل صوتيات بأي تنسيق لاستخدامها في الاستدلال (موصى به لمستخدمي الأجهزة المحمولة)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "تعتبر أي أخطاء في ConnectionResetErrors بعد التحويل غير ذات صلة ومرئية بحتة؛ يمكن تجاهلها.",
+ "Processed audio saved at: ": "تم حفظ الصوت المعالج في:",
+ "Conversion complete!": "اكتمل التحويل!",
+ "Reverb": "تردد",
+ "Compressor": "ضاغط",
+ "Noise Gate": "بوابة الضجيج",
+ "Volume": "مقدار",
+ "Drag the audio here and click the Refresh button": "اسحب الصوت هنا وانقر على زر التحديث",
+ "Select the generated audio": "حدد الصوت الذي تم إنشاؤه"
+}
\ No newline at end of file
diff --git a/i18n/en_US.json b/i18n/en_US.json
new file mode 100644
index 000000000..a3c8736fb
--- /dev/null
+++ b/i18n/en_US.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "Unfortunately, there is no compatible GPU available to support your training.",
+ "Yes": "Yes",
+ "Select your dataset.": "Select your dataset.",
+ "Update list.": "Update list.",
+ "Download Model": "Download Model",
+ "Download Backup": "Download Backup",
+ "Download Dataset": "Download Dataset",
+ "Download": "Download",
+ "Url:": "Url:",
+ "Build the index before saving.": "Build the index before saving.",
+ "Save your model once the training ends.": "Save your model once the training ends.",
+ "Save type": "Save type",
+ "Save model": "Save model",
+ "Choose the method": "Choose the method",
+ "Save all": "Save all",
+ "Save D and G": "Save D and G",
+ "Save voice": "Save voice",
+ "Downloading the file: ": "Downloading the file: ",
+ "Stop training": "Stop training",
+ "Too many users have recently viewed or downloaded this file": "Too many users have recently viewed or downloaded this file",
+ "Cannot get file from this private link": "Cannot get file from this private link",
+ "Full download": "Full download",
+ "An error occurred downloading": "An error occurred downloading",
+ "Model saved successfully": "Model saved successfully",
+ "Saving the model...": "Saving the model...",
+ "Saved without index...": "Saved without index...",
+ "model_name": "model_name",
+ "Saved without inference model...": "Saved without inference model...",
+ "An error occurred saving the model": "An error occurred saving the model",
+ "The model you want to save does not exist, be sure to enter the correct name.": "The model you want to save does not exist, be sure to enter the correct name.",
+ "The file could not be downloaded.": "The file could not be downloaded.",
+ "Unzip error.": "Unzip error.",
+ "Path to your added.index file (if it didn't automatically find it)": "Path to your added.index file (if it didn't automatically find it)",
+ "It has been downloaded successfully.": "It has been downloaded successfully.",
+ "Proceeding with the extraction...": "Proceeding with the extraction...",
+ "The Backup has been uploaded successfully.": "The Backup has been uploaded successfully.",
+ "The Dataset has been loaded successfully.": "The Dataset has been loaded successfully.",
+ "The Model has been loaded successfully.": "The Model has been loaded successfully.",
+ "It is used to download your inference models.": "It is used to download your inference models.",
+ "It is used to download your training backups.": "It is used to download your training backups.",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.",
+ "No relevant file was found to upload.": "No relevant file was found to upload.",
+ "The model works for inference, and has the .index file.": "The model works for inference, and has the .index file.",
+ "The model works for inference, but it doesn't have the .index file.": "The model works for inference, but it doesn't have the .index file.",
+ "This may take a few minutes, please wait...": "This may take a few minutes, please wait...",
+ "Resources": "Resources",
+ "Step 1: Processing data": "Step 1: Processing data",
+ "Step 2a: Skipping pitch extraction": "Step 2a: Skipping pitch extraction",
+ "Step 2b: Extracting features": "Step 2b: Extracting features",
+ "Step 3a: Model training started": "Step 3a: Model training started",
+ "Step 4: Export lowest points on a graph of the model": "Step 4: Export lowest points on a graph of the model",
+ "Training is done, check train.log": "Training is done, check train.log",
+ "All processes have been completed!": "All processes have been completed!",
+ "Model Inference": "Model Inference",
+ "Inferencing voice:": "Inferencing voice:",
+ "Model_Name": "Model_Name",
+ "Dataset_Name": "Dataset_Name",
+ "Whether the model has pitch guidance.": "Whether the model has pitch guidance.",
+ "Whether to save only the latest .ckpt file to save hard drive space": "Whether to save only the latest .ckpt file to save hard drive space",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training",
+ "Save a small final model to the 'weights' folder at each save point": "Save a small final model to the 'weights' folder at each save point",
+ "Refresh voice list, index path and audio files": "Refresh voice list, index path and audio files",
+ "Unload voice to save GPU memory:": "Unload voice to save GPU memory:",
+ "Select Speaker/Singer ID:": "Select Speaker/Singer ID:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "Enter the path of the audio file to be processed (default is the correct format example):",
+ "Select the pitch extraction algorithm:": "Select the pitch extraction algorithm:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.",
+ "Feature search dataset file path": "Feature search dataset file path",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Path to the feature index file. Leave blank to use the selected result from the dropdown:",
+ "Auto-detect index path and select from the dropdown": "Auto-detect index path and select from the dropdown",
+ "Path to feature file:": "Path to feature file:",
+ "Search feature ratio:": "Search feature ratio:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:",
+ "Convert": "Convert",
+ "Output information": "Output information",
+ "Export audio (click on the three dots in the lower right corner to download)": "Export audio (click on the three dots in the lower right corner to download)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').",
+ "Specify output folder:": "Specify output folder:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.",
+ "Export file format": "Export file format",
+ "UVR5": "UVR5",
+ "Enter the path of the audio folder to be processed:": "Enter the path of the audio folder to be processed:",
+ "Model": "Model",
+ "Vocal Extraction Aggressive": "Vocal Extraction Aggressive",
+ "Specify the output folder for vocals:": "Specify the output folder for vocals:",
+ "Specify the output folder for accompaniment:": "Specify the output folder for accompaniment:",
+ "Train": "Train",
+ "Enter the model name:": "Enter the model name:",
+ "Target sample rate:": "Target sample rate:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "Whether the model has pitch guidance (required for singing, optional for speech):",
+ "Version": "Version",
+ "Number of CPU processes used for pitch extraction and data processing:": "Number of CPU processes used for pitch extraction and data processing:",
+ "Enter the path of the training folder:": "Enter the path of the training folder:",
+ "Please specify the model ID:": "Please specify the model ID:",
+ "Auto detect audio path and select from the dropdown:": "Auto detect audio path and select from the dropdown:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:",
+ "Advanced Settings": "Advanced Settings",
+ "Settings": "Settings",
+ "Status": "Status",
+ "Process data": "Process data",
+ "Drag your audio here and hit the refresh button": "Drag your audio here and hit the refresh button",
+ "Or record an audio.": "Or record an audio.",
+ "Formant shift inference audio": "Formant shift inference audio",
+ "Used for male to female and vice-versa conversions": "Used for male to female and vice-versa conversions",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:",
+ "GPU Information": "GPU Information",
+ "Feature extraction": "Feature extraction",
+ "Save frequency:": "Save frequency:",
+ "Training epochs:": "Training epochs:",
+ "Batch size per GPU:": "Batch size per GPU:",
+ "Save only the latest '.ckpt' file to save disk space:": "Save only the latest '.ckpt' file to save disk space:",
+ "No": "No",
+ "Save a small final model to the 'weights' folder at each save point:": "Save a small final model to the 'weights' folder at each save point:",
+ "Load pre-trained base model G path:": "Load pre-trained base model G path:",
+ "Load pre-trained base model D path:": "Load pre-trained base model D path:",
+ "Train model": "Train model",
+ "Train feature index": "Train feature index",
+ "One-click training": "One-click training",
+ "Processing": "Processing",
+ "Model fusion, can be used to test timbre fusion": "Model fusion, can be used to test timbre fusion",
+ "Path to Model A:": "Path to Model A:",
+ "Path to Model B:": "Path to Model B:",
+ "Weight for Model A:": "Weight for Model A:",
+ "Whether the model has pitch guidance:": "Whether the model has pitch guidance:",
+ "Model information to be placed:": "Model information to be placed:",
+ "Saved model name (without extension):": "Saved model name (without extension):",
+ "Model architecture version:": "Model architecture version:",
+ "Fusion": "Fusion",
+ "Modify model information": "Modify model information",
+ "Path to Model:": "Path to Model:",
+ "Model information to be modified:": "Model information to be modified:",
+ "Save file name:": "Save file name:",
+ "Modify": "Modify",
+ "View model information": "View model information",
+ "View": "View",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:",
+ "Save name:": "Save name:",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "Whether the model has pitch guidance (1: yes, 0: no):",
+ "Extract": "Extract",
+ "Export Onnx": "Export Onnx",
+ "RVC Model Path:": "RVC Model Path:",
+ "Onnx Export Path:": "Onnx Export Path:",
+ "MoeVS Model": "MoeVS Model",
+ "Export Onnx Model": "Export Onnx Model",
+ "Load model": "Load model",
+ "Hubert Model": "Hubert Model",
+ "Select the .pth file": "Select the .pth file",
+ "Select the .index file": "Select the .index file",
+ "Select the .npy file": "Select the .npy file",
+ "Input device": "Input device",
+ "Output device": "Output device",
+ "Audio device (please use the same type of driver)": "Audio device (please use the same type of driver)",
+ "Response threshold": "Response threshold",
+ "Pitch settings": "Pitch settings",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz",
+ "Index Rate": "Index Rate",
+ "General settings": "General settings",
+ "Sample length": "Sample length",
+ "Fade length": "Fade length",
+ "Extra inference time": "Extra inference time",
+ "Input noise reduction": "Input noise reduction",
+ "Output noise reduction": "Output noise reduction",
+ "Performance settings": "Performance settings",
+ "Start audio conversion": "Start audio conversion",
+ "Stop audio conversion": "Stop audio conversion",
+ "Inference time (ms):": "Inference time (ms):",
+ "Select the pth file": "Select the pth file",
+ "Select the index file": "Select the index file",
+ "The hubert model path must not contain Chinese characters": "The hubert model path must not contain Chinese characters",
+ "The pth file path must not contain Chinese characters.": "The pth file path must not contain Chinese characters.",
+ "The index file path must not contain Chinese characters.": "The index file path must not contain Chinese characters.",
+ "Step algorithm": "Step algorithm",
+ "Number of epoch processes": "Number of epoch processes",
+ "Lowest points export": "Lowest points export",
+ "How many lowest points to save": "How many lowest points to save",
+ "Export lowest points of a model": "Export lowest points of a model",
+ "Output models": "Output models",
+ "Stats of selected models": "Stats of selected models",
+ "Custom f0 [Root pitch] File": "Custom f0 [Root pitch] File",
+ "Min pitch": "Min pitch",
+ "Specify minimal pitch for inference [HZ]": "Specify minimal pitch for inference [HZ]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "Specify minimal pitch for inference [NOTE][OCTAVE]",
+ "Max pitch": "Max pitch",
+ "Specify max pitch for inference [HZ]": "Specify max pitch for inference [HZ]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "Specify max pitch for inference [NOTE][OCTAVE]",
+ "Browse presets for formanting": "Browse presets for formanting",
+ "Presets are located in formantshiftcfg/ folder": "Presets are located in formantshiftcfg/ folder",
+ "Default value is 1.0": "Default value is 1.0",
+ "Quefrency for formant shifting": "Quefrency for formant shifting",
+ "Timbre for formant shifting": "Timbre for formant shifting",
+ "Apply": "Apply",
+ "Single": "Single",
+ "Batch": "Batch",
+ "Separate YouTube tracks": "Separate YouTube tracks",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks",
+ "Extra": "Extra",
+ "Merge": "Merge",
+ "Merge your generated audios with the instrumental": "Merge your generated audios with the instrumental",
+ "Choose your instrumental": "Choose your instrumental",
+ "Choose the generated audio": "Choose the generated audio",
+ "Combine": "Combine",
+ "Download and Separate": "Download and Separate",
+ "Enter the youtube link": "Enter the youtube link",
+ "This section contains some extra utilities that often may be in experimental phases": "This section contains some extra utilities that often may be in experimental phases",
+ "Merge Audios": "Merge Audios",
+ "Audio files have been moved to the 'audios' folder.": "Audio files have been moved to the 'audios' folder.",
+ "Downloading audio from the video...": "Downloading audio from the video...",
+ "Audio downloaded!": "Audio downloaded!",
+ "An error occurred:": "An error occurred:",
+ "Separating audio...": "Separating audio...",
+ "File moved successfully.": "File moved successfully.",
+ "Finished!": "Finished!",
+ "The source file does not exist.": "The source file does not exist.",
+ "Error moving the file:": "Error moving the file:",
+ "Downloading {name} from drive": "Downloading {name} from drive",
+ "The attempt to download using Drive didn't work": "The attempt to download using Drive didn't work",
+ "Error downloading the file: {str(e)}": "Error downloading the file: {str(e)}",
+ "Downloading {name} from mega": "Downloading {name} from mega",
+ "Downloading {name} from basic url": "Downloading {name} from basic url",
+ "Download Audio": "Download Audio",
+ "Download audios of any format for use in inference (recommended for mobile users)": "Download audios of any format for use in inference (recommended for mobile users)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n",
+ "Processed audio saved at: ": "Processed audio saved at: ",
+ "Conversion complete!": "Conversion complete!",
+ "Reverb": "Reverb",
+ "Compressor": "Compressor",
+ "Noise Gate": "Noise Gate",
+ "Volume": "Volume",
+ "Drag the audio here and click the Refresh button": "Drag the audio here and click the Refresh button",
+ "Select the generated audio": "Select the generated audio"
+}
diff --git a/i18n/es_ES.json b/i18n/es_ES.json
new file mode 100644
index 000000000..40e27a267
--- /dev/null
+++ b/i18n/es_ES.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "Lamentablemente, no hay una GPU compatible disponible para respaldar tu formación.",
+ "Yes": "Sí",
+ "Select your dataset.": "Seleccione su dataset.",
+ "Update list.": "Actualizar lista",
+ "Download Model": "Descargar modelo",
+ "Download Backup": "Descargar copias de seguridad",
+ "Download Dataset": "Descargar dataset",
+ "Download": "Descargar",
+ "Url:": "Enlace:",
+ "Build the index before saving.": "Genere el índice antes de guardarlo.",
+ "Save your model once the training ends.": "Guarde su modelo una vez que finalice la capacitación.",
+ "Save type": "Guardar tipo",
+ "Save model": "Guardar modelo",
+ "Choose the method": "Elige el método",
+ "Save all": "Guardar todo",
+ "Save D and G": "Guardar D y G",
+ "Save voice": "Guardar voz",
+ "Downloading the file: ": "Descargando el archivo:",
+ "Stop training": "Detener entrenamiento",
+ "Too many users have recently viewed or downloaded this file": "Demasiados usuarios han visto o descargado recientemente este archivo",
+ "Cannot get file from this private link": "No se puede obtener el archivo de este enlace privado",
+ "Full download": "Descarga completa",
+ "An error occurred downloading": "Se ha producido un error al descargar",
+ "Model saved successfully": "Modelo guardado correctamente",
+ "Saving the model...": "Guardando el modelo...",
+ "Saved without index...": "Guardado sin indexar...",
+ "model_name": "Nombre del modelo,",
+ "Saved without inference model...": "Guardado sin modelo de inferencia...",
+ "An error occurred saving the model": "Se ha producido un error al guardar el modelo",
+ "The model you want to save does not exist, be sure to enter the correct name.": "El modelo que desea guardar no existe, asegúrese de introducir el nombre correcto.",
+ "The file could not be downloaded.": "No se pudo bajar el archivo",
+ "Unzip error.": "Error al descomprimir.",
+ "Path to your added.index file (if it didn't automatically find it)": "Ruta a su archivo added.index (si no lo encontró automáticamente)",
+ "It has been downloaded successfully.": "Se ha descargado con éxito.",
+ "Proceeding with the extraction...": "Continuando con la extracción...",
+ "The Backup has been uploaded successfully.": "La copia de seguridad se ha cargado correctamente.",
+ "The Dataset has been loaded successfully.": "El dataset se ha cargado correctamente.",
+ "The Model has been loaded successfully.": "El modelo se ha cargado correctamente.",
+ "It is used to download your inference models.": "Se utiliza para descargar sus modelos de inferencia.",
+ "It is used to download your training backups.": "Se utiliza para descargar las copias de seguridad de su formación.",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Descargue el dataset con los audios en un formato compatible (.wav/.flac) para entrenar su modelo.",
+ "No relevant file was found to upload.": "No se ha encontrado ningún archivo relevante para cargar.",
+ "The model works for inference, and has the .index file.": "El modelo funciona para inferencia y tiene el archivo .index.",
+ "The model works for inference, but it doesn't have the .index file.": "El modelo funciona para inferencia, pero no tiene el archivo .index.",
+ "This may take a few minutes, please wait...": "Esto puede tardar unos minutos, espere por favor.",
+ "Resources": "Recursos",
+ "Step 1: Processing data": "Paso 1: Procesamiento de datos",
+ "Step 2a: Skipping pitch extraction": "Paso 2a: Omitir la extracción de brea",
+ "Step 2b: Extracting features": "Paso 2b: Extracción de funciones",
+ "Step 3a: Model training started": "Paso 3a: Capacitación modelo iniciada",
+ "Step 4: Export lowest points on a graph of the model": "Paso 4: Exporte los puntos más bajos en un gráfico del modelo",
+ "Training is done, check train.log": "El entrenamiento ha finalizado, revisa train.log.",
+ "All processes have been completed!": "¡Todos los procesos se han completado!",
+ "Model Inference": "Modelo de inferencia",
+ "Inferencing voice:": "Voz de inferencia:",
+ "Model_Name": "Nombre_Modelo",
+ "Dataset_Name": "Nombre_Dataset",
+ "Whether the model has pitch guidance.": "Si el modelo tiene guía de tono.",
+ "Whether to save only the latest .ckpt file to save hard drive space": "Si guardar solo el último archivo .ckpt para ahorrar espacio en el disco duro",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Almacene en caché todos los conjuntos de entrenamiento en la memoria de la GPU. El almacenamiento en caché de pequeños conjuntos de datos (menos de 10 minutos) puede acelerar la capacitación",
+ "Save a small final model to the 'weights' folder at each save point": "Guarde un pequeño modelo final en la carpeta \"pesos\" en cada punto de guardado",
+ "Refresh voice list, index path and audio files": "Actualiza la lista de voz, la ruta de indexación y los archivos de audio",
+ "Unload voice to save GPU memory:": "Descargue la voz para guardar la memoria de la GPU:",
+ "Select Speaker/Singer ID:": "Seleccionar ID de orador/cantante:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Se recomienda la tecla +12 para la conversión de hombre a mujer y la tecla -12 para la conversión de mujer a hombre. Si el rango de sonido va demasiado lejos y la voz está distorsionada, también puede ajustarlo al rango apropiado usted mismo.",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "Transponer (entero, número de semitonos, elevar una octava: 12, bajar una octava: -12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "Introduzca la ruta del archivo de audio a procesar (el ejemplo de formato correcto es el predeterminado):",
+ "Select the pitch extraction algorithm:": "Seleccione el algoritmo de extracción de pitch:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "Longitud de salto de mangio-crepe (solo se aplica a mangio-crepe): la longitud de salto se refiere al tiempo que tarda el hablante en saltar a un tono dramático. Las longitudes de salto más bajas tardan más tiempo en inferirse, pero son más precisas en el tono.",
+ "Feature search dataset file path": "Ruta del archivo del dataset de búsqueda de funciones",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Si >=3: aplicar filtrado de mediana a los resultados de brea cosechada. El valor representa el radio del filtro y puede reducir la transpiración.",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Ruta al archivo de indexación de entidades. Déjelo en blanco para usar el resultado seleccionado del menú desplegable:",
+ "Auto-detect index path and select from the dropdown": "Detectar automáticamente la ruta de indexación y seleccionar en el menú desplegable",
+ "Path to feature file:": "Ruta del archivo de características:",
+ "Search feature ratio:": "Proporción de función de búsqueda:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Vuelva a muestrear el audio de salida en el posprocesamiento a la frecuencia de muestreo final. Establecer en 0 para no volver a muestrear:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Utilice la envolvente de volumen de la entrada para reemplazar o mezclar con la envolvente de volumen de la salida. Cuanto más cerca esté la relación a 1, más se utilizará la envolvente de salida:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Proteja las consonantes sordas y los sonidos de la respiración para evitar artefactos como el desgarro en la música electrónica. Establecer en 0.5 para desactivar. Disminuya el valor para aumentar la protección, pero puede reducir la precisión de la indexación:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "Archivo de curva F0 (opcional). Un paso por línea. Sustituye la modulación de paso y F0 predeterminada:",
+ "Convert": "Convertir",
+ "Output information": "Información de salida",
+ "Export audio (click on the three dots in the lower right corner to download)": "Exportar audio (haga clic en los tres puntos en la esquina inferior derecha para descargar)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Conversión por lotes. Introduzca la carpeta que contiene los archivos de audio a convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (predeterminado: 'opt').",
+ "Specify output folder:": "Especificar carpeta de salida:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Introduzca la ruta de la carpeta de audio a procesar (cópiela desde la barra de direcciones del gestor de archivos):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "También puede introducir archivos de audio en lotes. Elige una de las dos opciones. Se da prioridad a la lectura de la carpeta.",
+ "Export file format": "Formato de archivo de exportación",
+ "UVR5": "UVR5",
+ "Enter the path of the audio folder to be processed:": "Introduzca la ruta de la carpeta de audio a procesar:",
+ "Model": "Modelo",
+ "Vocal Extraction Aggressive": "Extracción vocal agresiva",
+ "Specify the output folder for vocals:": "Especifique la carpeta de salida para las voces:",
+ "Specify the output folder for accompaniment:": "Especifique la carpeta de salida para el acompañamiento:",
+ "Train": "Entrenar",
+ "Enter the model name:": "Introducir el nombre de modelo:",
+ "Target sample rate:": "Tasa de muestreo objetivo:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "Si el modelo tiene guía de tono (requerido para cantar, opcional para el habla):",
+ "Version": "Versión",
+ "Number of CPU processes used for pitch extraction and data processing:": "Número de procesos de CPU utilizados para la extracción de pitch y el procesamiento de datos:",
+ "Enter the path of the training folder:": "Introduzca la ruta de la carpeta de formación:",
+ "Please specify the model ID:": "Por favor, especifique el ID del modelo:",
+ "Auto detect audio path and select from the dropdown:": "Detectar automáticamente la ruta de audio y seleccionar en el menú desplegable:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Agregue el nombre del audio a la ruta del archivo de audio que se procesará (el ejemplo de formato correcto es el predeterminado) Elimine la ruta para usar un audio de la lista desplegable:",
+ "Advanced Settings": "Ajustes avanzados",
+ "Settings": "Configuración",
+ "Status": "Estado",
+ "Process data": "Datos del proceso",
+ "Drag your audio here and hit the refresh button": "Arrastra tu audio aquí y pulsa el botón de actualización",
+ "Or record an audio.": "O graba un audio.",
+ "Formant shift inference audio": "Audio de inferencia de desplazamiento formante",
+ "Used for male to female and vice-versa conversions": "Se utiliza para conversiones de hombre a mujer y viceversa",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Proporcione los índices de la GPU separados por '-', como 0-1-2 para usar las GPU 0, 1 y 2:",
+ "GPU Information": "Información de la GPU",
+ "Feature extraction": "extracción de característicos",
+ "Save frequency:": "Guardar frecuencia:",
+ "Training epochs:": "Épocas de formación:",
+ "Batch size per GPU:": "Tamaño de lote por GPU:",
+ "Save only the latest '.ckpt' file to save disk space:": "Guarde solo el último archivo '.ckpt' para ahorrar espacio en disco:",
+ "No": "No",
+ "Save a small final model to the 'weights' folder at each save point:": "Guarde un pequeño modelo final en la carpeta 'pesos' en cada punto de guardado:",
+ "Load pre-trained base model G path:": "Cargar ruta base modelo G pre-entrenada:",
+ "Load pre-trained base model D path:": "Ruta del modelo D base de carga pre-entrenada:",
+ "Train model": "Modelo de tren",
+ "Train feature index": "Índice de características del tren",
+ "One-click training": "Capacitación con un solo clic",
+ "Processing": "Procesamiento",
+ "Model fusion, can be used to test timbre fusion": "Modelo de fusión, se puede utilizar para probar la fusión de timbre",
+ "Path to Model A:": "Ruta al Modelo A:",
+ "Path to Model B:": "Ruta al Modelo B:",
+ "Weight for Model A:": "Peso para el modelo A:",
+ "Whether the model has pitch guidance:": "Si el modelo tiene guía de tono:",
+ "Model information to be placed:": "Información del modelo a colocar:",
+ "Saved model name (without extension):": "Nombre de modelo guardado (sin extensión):",
+ "Model architecture version:": "Versión de la arquitectura del modelo:",
+ "Fusion": "Fusión",
+ "Modify model information": "Modificar información del modelo",
+ "Path to Model:": "Ruta al modelo:",
+ "Model information to be modified:": "Información del modelo a modificar:",
+ "Save file name:": "Guardar nombre de archivo:",
+ "Modify": "Modificar",
+ "View model information": "Información del modelo",
+ "View": "Ver",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "Extracción del modelo (introduzca la ruta del modelo de archivo grande en la carpeta 'logs'). Esto es útil si desea dejar de entrenar a mitad de camino y extraer y guardar manualmente un archivo de modelo pequeño, o si desea probar un modelo intermedio:",
+ "Save name:": "Guardar Nombre",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "Si el modelo tiene guía de tono (1: sí, 0: no):",
+ "Extract": "Extraer",
+ "Export Onnx": "Exportar Onnx",
+ "RVC Model Path:": "Ruta del modelo RVC:",
+ "Onnx Export Path:": "Ruta de exportación Onnx:",
+ "MoeVS Model": "Modelo MoeVS",
+ "Export Onnx Model": "Exportar modelo Onnx",
+ "Load model": "Cargar modelo",
+ "Hubert Model": "Modelo Hubert",
+ "Select the .pth file": "Seleccione un archivo.",
+ "Select the .index file": "Seleccione el archivo .index",
+ "Select the .npy file": "Seleccione un archivo.",
+ "Input device": "Dispositivo de entrada",
+ "Output device": "Dispositivo de salida",
+ "Audio device (please use the same type of driver)": "Dispositivo de audio (utilice el mismo tipo de controlador)",
+ "Response threshold": "Umbral de respuesta",
+ "Pitch settings": "Ajustes de tono",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Si se deben usar nombres de notas en lugar de su valor de hercios. POR EJEMPLO, [C5, D6] en lugar de [523,25, 1174,66]Hz",
+ "Index Rate": "Tasa de Índice",
+ "General settings": "Configuración general",
+ "Sample length": "Longitud de la muestra",
+ "Fade length": "Longitud del desvanecimiento",
+ "Extra inference time": "Tiempo de inferencia adicional",
+ "Input noise reduction": "Reducción de ruido de entrada",
+ "Output noise reduction": "Reducción de ruido de salida",
+ "Performance settings": "Ajustes de rendimiento",
+ "Start audio conversion": "Iniciar conversión de audio",
+ "Stop audio conversion": "Detener conversión de audio",
+ "Inference time (ms):": "Tiempo de inferencia (ms):",
+ "Select the pth file": "Seleccione un archivo.",
+ "Select the index file": "Selecciona el archivo de indexación",
+ "The hubert model path must not contain Chinese characters": "La ruta del modelo hubert no debe contener caracteres chinos",
+ "The pth file path must not contain Chinese characters.": "La ruta del archivo pth no debe contener caracteres chinos.",
+ "The index file path must not contain Chinese characters.": "La ruta del archivo de indexación no debe contener caracteres chinos.",
+ "Step algorithm": "Algoritmo de pasos",
+ "Number of epoch processes": "Número de procesos de Epoch",
+ "Lowest points export": "Exportación de puntos más bajos",
+ "How many lowest points to save": "Cuántos puntos más bajos ahorrar",
+ "Export lowest points of a model": "Exportar los puntos más bajos de un modelo",
+ "Output models": "Modelos de salida",
+ "Stats of selected models": "Estadísticas de los modelos seleccionados",
+ "Custom f0 [Root pitch] File": "Archivo personalizado f0 [Root pitch]",
+ "Min pitch": "Min pitch",
+ "Specify minimal pitch for inference [HZ]": "Especificar paso mínimo para inferencia [HZ]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "Especifique el tono mínimo para la inferencia [NOTA][OCTAVA]",
+ "Max pitch": "Tono máximo",
+ "Specify max pitch for inference [HZ]": "Especifique el paso máximo para la inferencia [HZ]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "Especifique el tono máximo para la inferencia [NOTA][OCTAVA]",
+ "Browse presets for formanting": "Examinar ajustes preestablecidos para formatear",
+ "Presets are located in formantshiftcfg/ folder": "Los ajustes preestablecidos se encuentran en formantshiftcfg/ folder",
+ "Default value is 1.0": "El valor por defecto es 1.",
+ "Quefrency for formant shifting": "Quefrencia para desplazamiento de formantes",
+ "Timbre for formant shifting": "Timbre para el cambio de formantes",
+ "Apply": "Aplicar",
+ "Single": "Soltero",
+ "Batch": "Lote",
+ "Separate YouTube tracks": "Pistas separadas de YouTube",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Descarga audio de un vídeo de YouTube y separa automáticamente las pistas vocales e instrumentales",
+ "Extra": "Extra",
+ "Merge": "Combinar",
+ "Merge your generated audios with the instrumental": "Combina tus audios generados con el instrumental",
+ "Choose your instrumental": "Elige tu instrumental",
+ "Choose the generated audio": "Elige el audio generado",
+ "Combine": "Combinar",
+ "Download and Separate": "Descargar y separar",
+ "Enter the youtube link": "Introduce el enlace de youtube",
+ "This section contains some extra utilities that often may be in experimental phases": "Esta sección contiene algunas utilidades adicionales que a menudo pueden estar en fases experimentales",
+ "Merge Audios": "Combinar audios",
+ "Audio files have been moved to the 'audios' folder.": "Los archivos de audio se han movido a la carpeta 'audios'.",
+ "Downloading audio from the video...": "Descargando audio del vídeo...",
+ "Audio downloaded!": "¡Audio descargado!",
+ "An error occurred:": "Se ha producido un error:",
+ "Separating audio...": "Separando audio...",
+ "File moved successfully.": "El artículo se ha movido correctamente",
+ "Finished!": "¡Listo!",
+ "The source file does not exist.": "El archivo no existe.",
+ "Error moving the file:": "Error al mover el archivo:",
+ "Downloading {name} from drive": "Descargando {name} de la unidad",
+ "The attempt to download using Drive didn't work": "El intento de descarga con Drive no ha funcionado",
+ "Error downloading the file: {str(e)}": "Error al descargar el archivo: {str(e)}",
+ "Downloading {name} from mega": "Descargando {name} de mega",
+ "Downloading {name} from basic url": "Descargando {name} desde la URL básica",
+ "Download Audio": "Descargar Audio",
+ "Download audios of any format for use in inference (recommended for mobile users)": "Descargar audios de cualquier formato para su uso en inferencia (recomendado para usuarios móviles)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Cualquier posconversión de ConnectionResetErrors es irrelevante y puramente visual; se puede ignorar.\n",
+ "Processed audio saved at: ": "Audio procesado guardado en:",
+ "Conversion complete!": "Actualización finalizada",
+ "Reverb": "Reverberación",
+ "Compressor": "Compresor",
+ "Noise Gate": "Puerta de ruido",
+ "Volume": "Volumen",
+ "Drag the audio here and click the Refresh button": "Arrastre el audio aquí y haga clic en el botón Actualizar",
+ "Select the generated audio": "Selecciona el audio generado"
+}
\ No newline at end of file
diff --git a/i18n/id_ID.json b/i18n/id_ID.json
new file mode 100644
index 000000000..14e7bcc85
--- /dev/null
+++ b/i18n/id_ID.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "Sayangnya, tidak ada GPU kompatibel yang tersedia untuk mendukung pelatihan Anda.",
+ "Yes": "Ya",
+ "Select your dataset.": "Pilih kumpulan data Anda.",
+ "Update list.": "Perbarui daftar.",
+ "Download Model": "Unduh Model",
+ "Download Backup": "Unduh Cadangan",
+ "Download Dataset": "Unduh Kumpulan Data",
+ "Download": "Unduh",
+ "Url:": "URL:",
+ "Build the index before saving.": "Bangun indeks sebelum menyimpan.",
+ "Save your model once the training ends.": "Simpan model Anda setelah pelatihan berakhir.",
+ "Save type": "Simpan jenis",
+ "Save model": "Simpan modelnya",
+ "Choose the method": "Pilih metodenya",
+ "Save all": "Simpan semua",
+ "Save D and G": "Simpan D dan G",
+ "Save voice": "Simpan suara",
+ "Downloading the file: ": "Mengunduh file:",
+ "Stop training": "Hentikan pelatihan",
+ "Too many users have recently viewed or downloaded this file": "Terlalu banyak pengguna yang baru-baru ini melihat atau mengunduh file ini",
+ "Cannot get file from this private link": "Tidak dapat memperoleh file dari tautan pribadi ini",
+ "Full download": "Unduhan penuh",
+ "An error occurred downloading": "Terjadi kesalahan saat mengunduh",
+ "Model saved successfully": "Model berhasil disimpan",
+ "Saving the model...": "Menyimpan model...",
+ "Saved without index...": "Disimpan tanpa indeks...",
+ "model_name": "nama model",
+ "Saved without inference model...": "Disimpan tanpa model inferensi...",
+ "An error occurred saving the model": "Terjadi kesalahan saat menyimpan model",
+ "The model you want to save does not exist, be sure to enter the correct name.": "Model yang ingin disimpan tidak ada, pastikan memasukkan nama yang benar.",
+ "The file could not be downloaded.": "File tidak dapat diunduh.",
+ "Unzip error.": "Kesalahan buka zip.",
+ "Path to your added.index file (if it didn't automatically find it)": "Jalur ke file add.index Anda (jika tidak menemukannya secara otomatis)",
+ "It has been downloaded successfully.": "Itu telah berhasil diunduh.",
+ "Proceeding with the extraction...": "Melanjutkan ekstraksi...",
+ "The Backup has been uploaded successfully.": "Cadangan telah berhasil diunggah.",
+ "The Dataset has been loaded successfully.": "Dataset telah berhasil dimuat.",
+ "The Model has been loaded successfully.": "Model telah berhasil dimuat.",
+ "It is used to download your inference models.": "Ini digunakan untuk mengunduh model inferensi Anda.",
+ "It is used to download your training backups.": "Ini digunakan untuk mengunduh cadangan pelatihan Anda.",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Unduh kumpulan data dengan audio dalam format yang kompatibel (.wav/.flac) untuk melatih model Anda.",
+ "No relevant file was found to upload.": "Tidak ada file relevan yang ditemukan untuk diunggah.",
+ "The model works for inference, and has the .index file.": "Model ini berfungsi untuk inferensi, dan memiliki file .index.",
+ "The model works for inference, but it doesn't have the .index file.": "Model ini berfungsi untuk inferensi, tetapi tidak memiliki file .index.",
+ "This may take a few minutes, please wait...": "Ini mungkin memakan waktu beberapa menit, harap tunggu...",
+ "Resources": "Sumber daya",
+ "Step 1: Processing data": "Langkah 1: Memproses data",
+ "Step 2a: Skipping pitch extraction": "Langkah 2a: Melewatkan ekstraksi nada",
+ "Step 2b: Extracting features": "Langkah 2b: Mengekstraksi fitur",
+ "Step 3a: Model training started": "Langkah 3a: Pelatihan model dimulai",
+ "Step 4: Export lowest points on a graph of the model": "Langkah 4: Ekspor titik terendah pada grafik model",
+ "Training is done, check train.log": "Pelatihan selesai, periksa train.log",
+ "All processes have been completed!": "Semua proses telah selesai!",
+ "Model Inference": "Inferensi Model",
+ "Inferencing voice:": "Menyimpulkan suara:",
+ "Model_Name": "Nama model",
+ "Dataset_Name": "Kumpulan Data_Nama",
+ "Whether the model has pitch guidance.": "Apakah model memiliki panduan nada.",
+ "Whether to save only the latest .ckpt file to save hard drive space": "Apakah hanya menyimpan file .ckpt terbaru untuk menghemat ruang hard drive",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Simpan semua set pelatihan ke memori GPU. Menyimpan kumpulan data kecil (kurang dari 10 menit) dapat mempercepat pelatihan",
+ "Save a small final model to the 'weights' folder at each save point": "Simpan model akhir kecil ke folder 'bobot' di setiap titik penyimpanan",
+ "Refresh voice list, index path and audio files": "Segarkan daftar suara, jalur indeks, dan file audio",
+ "Unload voice to save GPU memory:": "Bongkar suara untuk menghemat memori GPU:",
+ "Select Speaker/Singer ID:": "Pilih ID Pembicara/Penyanyi:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Direkomendasikan kunci +12 untuk konversi pria ke wanita, dan -12 kunci untuk konversi wanita ke pria. Jika rentang suara terlalu jauh dan suaranya terdistorsi, Anda juga dapat menyesuaikannya sendiri ke rentang yang sesuai.",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "Transpos (bilangan bulat, jumlah seminada, dinaikkan satu oktaf: 12, diturunkan satu oktaf: -12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "Masukkan jalur file audio yang akan diproses (defaultnya adalah contoh format yang benar):",
+ "Select the pitch extraction algorithm:": "Pilih algoritma ekstraksi nada:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "Panjang Hop Mangio-Crepe (Hanya berlaku untuk mangio-crepe): Panjang hop mengacu pada waktu yang dibutuhkan pembicara untuk melompat ke nada dramatis. Panjang lompatan yang lebih rendah membutuhkan lebih banyak waktu untuk menyimpulkan tetapi nadanya lebih akurat.",
+ "Feature search dataset file path": "Jalur file kumpulan data pencarian fitur",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Jika >=3: terapkan pemfilteran median pada hasil pitch yang dipanen. Nilai tersebut mewakili radius filter dan dapat mengurangi sesak napas.",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Jalur ke file indeks fitur. Biarkan kosong untuk menggunakan hasil yang dipilih dari dropdown:",
+ "Auto-detect index path and select from the dropdown": "Deteksi jalur indeks secara otomatis dan pilih dari dropdown",
+ "Path to feature file:": "Jalur ke file fitur:",
+ "Search feature ratio:": "Rasio fitur pencarian:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Sampel ulang audio keluaran dalam pasca-pemrosesan ke laju sampel akhir. Setel ke 0 tanpa pengambilan sampel ulang:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Gunakan amplop volume masukan untuk menggantikan atau mencampur dengan amplop volume keluaran. Semakin dekat rasionya ke 1, semakin banyak amplop keluaran yang digunakan:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Lindungi konsonan tak bersuara dan bunyi napas untuk mencegah artefak seperti robek pada musik elektronik. Setel ke 0,5 untuk menonaktifkan. Turunkan nilainya untuk meningkatkan perlindungan, namun hal ini dapat mengurangi akurasi pengindeksan:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "File kurva F0 (opsional). Satu nada per baris. Menggantikan F0 dan modulasi nada default:",
+ "Convert": "Mengubah",
+ "Output information": "Informasi keluaran",
+ "Export audio (click on the three dots in the lower right corner to download)": "Ekspor audio (klik tiga titik di pojok kanan bawah untuk mengunduh)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Konversi batch. Masuk ke folder yang berisi file audio yang akan dikonversi atau unggah beberapa file audio. Audio yang dikonversi akan dikeluarkan di folder yang ditentukan (default: 'opt').",
+ "Specify output folder:": "Tentukan folder keluaran:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Masukkan jalur folder audio yang akan diproses (salin dari bilah alamat pengelola file):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Anda juga dapat memasukkan file audio secara berkelompok. Pilih salah satu dari dua opsi. Prioritas diberikan untuk membaca dari folder.",
+ "Export file format": "Ekspor format file",
+ "UVR5": "UVR5",
+ "Enter the path of the audio folder to be processed:": "Masukkan jalur folder audio yang akan diproses:",
+ "Model": "Model",
+ "Vocal Extraction Aggressive": "Ekstraksi Vokal Agresif",
+ "Specify the output folder for vocals:": "Tentukan folder keluaran untuk vokal:",
+ "Specify the output folder for accompaniment:": "Tentukan folder keluaran untuk pengiring:",
+ "Train": "Kereta",
+ "Enter the model name:": "Masukkan nama model:",
+ "Target sample rate:": "Tingkat sampel target:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "Apakah model memiliki panduan nada (wajib untuk bernyanyi, opsional untuk berbicara):",
+ "Version": "Versi: kapan",
+ "Number of CPU processes used for pitch extraction and data processing:": "Jumlah proses CPU yang digunakan untuk ekstraksi nada dan pemrosesan data:",
+ "Enter the path of the training folder:": "Masukkan jalur folder pelatihan:",
+ "Please specify the model ID:": "Silakan tentukan ID model:",
+ "Auto detect audio path and select from the dropdown:": "Deteksi otomatis jalur audio dan pilih dari dropdown:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Tambahkan nama audio ke jalur ke file audio yang akan diproses (standarnya adalah contoh format yang benar) Hapus jalur untuk menggunakan audio dari daftar dropdown:",
+ "Advanced Settings": "Pengaturan lanjutan",
+ "Settings": "Pengaturan",
+ "Status": "Status",
+ "Process data": "Data proses",
+ "Drag your audio here and hit the refresh button": "Seret audio Anda ke sini dan tekan tombol segarkan",
+ "Or record an audio.": "Atau rekam audio.",
+ "Formant shift inference audio": "Audio inferensi pergeseran formant",
+ "Used for male to female and vice-versa conversions": "Digunakan untuk konversi pria ke wanita dan sebaliknya",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Harap berikan indeks GPU yang dipisahkan dengan '-', seperti 0-1-2 untuk menggunakan GPU 0, 1, dan 2:",
+ "GPU Information": "Informasi GPU",
+ "Feature extraction": "Ekstraksi fitur",
+ "Save frequency:": "Simpan frekuensi:",
+ "Training epochs:": "Zaman pelatihan:",
+ "Batch size per GPU:": "Ukuran batch per GPU:",
+ "Save only the latest '.ckpt' file to save disk space:": "Simpan hanya file '.ckpt' terbaru untuk menghemat ruang disk:",
+ "No": "TIDAK",
+ "Save a small final model to the 'weights' folder at each save point:": "Simpan model akhir kecil ke folder 'bobot' di setiap titik penyimpanan:",
+ "Load pre-trained base model G path:": "Memuat jalur G model dasar terlatih:",
+ "Load pre-trained base model D path:": "Memuat jalur model D dasar yang telah dilatih sebelumnya:",
+ "Train model": "Model kereta api",
+ "Train feature index": "Indeks fitur kereta",
+ "One-click training": "Pelatihan sekali klik",
+ "Processing": "Pengolahan",
+ "Model fusion, can be used to test timbre fusion": "Fusi model, dapat digunakan untuk menguji fusi timbre",
+ "Path to Model A:": "Jalan menuju Model A:",
+ "Path to Model B:": "Jalan menuju Model B:",
+ "Weight for Model A:": "Berat untuk Model A:",
+ "Whether the model has pitch guidance:": "Apakah model memiliki panduan nada:",
+ "Model information to be placed:": "Informasi model yang akan ditempatkan:",
+ "Saved model name (without extension):": "Nama model yang disimpan (tanpa ekstensi):",
+ "Model architecture version:": "Versi arsitektur model:",
+ "Fusion": "Fusi",
+ "Modify model information": "Ubah informasi model",
+ "Path to Model:": "Jalur Menuju Model:",
+ "Model information to be modified:": "Informasi model yang akan dimodifikasi:",
+ "Save file name:": "Simpan nama file:",
+ "Modify": "Memodifikasi",
+ "View model information": "Lihat informasi model",
+ "View": "Melihat",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "Ekstraksi model (masukkan jalur model file besar di bawah folder 'logs'). Ini berguna jika Anda ingin menghentikan pelatihan di tengah jalan dan mengekstrak serta menyimpan file model kecil secara manual, atau jika Anda ingin menguji model perantara:",
+ "Save name:": "Simpan nama:",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "Apakah model memiliki panduan nada (1: ya, 0: tidak):",
+ "Extract": "Ekstrak",
+ "Export Onnx": "Ekspor Onnx",
+ "RVC Model Path:": "Jalur Model RVC:",
+ "Onnx Export Path:": "Jalur Ekspor Onnx:",
+ "MoeVS Model": "Model MoeVS",
+ "Export Onnx Model": "Ekspor Model Onnx",
+ "Load model": "Model beban",
+ "Hubert Model": "Model Hubert",
+ "Select the .pth file": "Pilih file .pth",
+ "Select the .index file": "Pilih file .index",
+ "Select the .npy file": "Pilih file .npy",
+ "Input device": "Alat input",
+ "Output device": "Perangkat keluaran",
+ "Audio device (please use the same type of driver)": "Perangkat audio (harap gunakan jenis driver yang sama)",
+ "Response threshold": "Ambang batas respons",
+ "Pitch settings": "Pengaturan nada",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Apakah akan menggunakan nama nada dan bukan nilai hertznya. MISALNYA. [C5, D6] bukannya [523.25, 1174.66]Hz",
+ "Index Rate": "Tingkat Indeks",
+ "General settings": "Pengaturan Umum",
+ "Sample length": "Panjang sampel",
+ "Fade length": "Panjang pudar",
+ "Extra inference time": "Waktu inferensi ekstra",
+ "Input noise reduction": "Pengurangan kebisingan masukan",
+ "Output noise reduction": "Pengurangan kebisingan keluaran",
+ "Performance settings": "Pengaturan kinerja",
+ "Start audio conversion": "Mulai konversi audio",
+ "Stop audio conversion": "Hentikan konversi audio",
+ "Inference time (ms):": "Waktu inferensi (ms):",
+ "Select the pth file": "Pilih file pth",
+ "Select the index file": "Pilih file indeks",
+ "The hubert model path must not contain Chinese characters": "Jalur model hubert tidak boleh berisi karakter China",
+ "The pth file path must not contain Chinese characters.": "Jalur file pth tidak boleh berisi karakter Cina.",
+ "The index file path must not contain Chinese characters.": "Jalur file indeks tidak boleh berisi karakter Cina.",
+ "Step algorithm": "Algoritma langkah",
+ "Number of epoch processes": "Jumlah proses zaman",
+ "Lowest points export": "Ekspor poin terendah",
+ "How many lowest points to save": "Berapa banyak poin terendah yang harus disimpan",
+ "Export lowest points of a model": "Ekspor titik terendah suatu model",
+ "Output models": "Model keluaran",
+ "Stats of selected models": "Statistik model yang dipilih",
+ "Custom f0 [Root pitch] File": "File f0 [Root pitch] khusus",
+ "Min pitch": "nada minimal",
+ "Specify minimal pitch for inference [HZ]": "Tentukan nada minimal untuk inferensi [HZ]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "Tentukan nada minimal untuk inferensi [CATATAN][OCTAVE]",
+ "Max pitch": "Nada maksimal",
+ "Specify max pitch for inference [HZ]": "Tentukan nada maksimal untuk inferensi [HZ]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "Tentukan nada maksimal untuk inferensi [CATATAN][OCTAVE]",
+ "Browse presets for formanting": "Telusuri preset untuk pembentukan",
+ "Presets are located in formantshiftcfg/ folder": "Preset terletak di folder formantshiftcfg/",
+ "Default value is 1.0": "Nilai defaultnya adalah 1,0",
+ "Quefrency for formant shifting": "Quefrency untuk pergeseran formant",
+ "Timbre for formant shifting": "Timbre untuk pergeseran formant",
+ "Apply": "Menerapkan",
+ "Single": "Lajang",
+ "Batch": "Kelompok",
+ "Separate YouTube tracks": "Pisahkan trek YouTube",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Unduh audio dari video YouTube dan pisahkan trek vokal dan instrumental secara otomatis",
+ "Extra": "Tambahan",
+ "Merge": "Menggabungkan",
+ "Merge your generated audios with the instrumental": "Gabungkan audio yang Anda hasilkan dengan instrumental",
+ "Choose your instrumental": "Pilih instrumen Anda",
+ "Choose the generated audio": "Pilih audio yang dihasilkan",
+ "Combine": "Menggabungkan",
+ "Download and Separate": "Unduh dan Pisahkan",
+ "Enter the youtube link": "Masukkan tautan youtube",
+ "This section contains some extra utilities that often may be in experimental phases": "Bagian ini berisi beberapa utilitas tambahan yang mungkin sering berada dalam tahap percobaan",
+ "Merge Audios": "Gabungkan Audio",
+ "Audio files have been moved to the 'audios' folder.": "File audio telah dipindahkan ke folder 'audios'.",
+ "Downloading audio from the video...": "Mengunduh audio dari video...",
+ "Audio downloaded!": "Unduhan audio!",
+ "An error occurred:": "Terjadi kesalahan:",
+ "Separating audio...": "Memisahkan audio...",
+ "File moved successfully.": "File berhasil dipindahkan.",
+ "Finished!": "Selesai!",
+ "The source file does not exist.": "File sumber tidak ada.",
+ "Error moving the file:": "Kesalahan saat memindahkan file:",
+ "Downloading {name} from drive": "Mengunduh {name} dari drive",
+ "The attempt to download using Drive didn't work": "Upaya mengunduh menggunakan Drive tidak berhasil",
+ "Error downloading the file: {str(e)}": "Kesalahan saat mengunduh berkas: {str(e)}",
+ "Downloading {name} from mega": "Mengunduh {name} dari mega",
+ "Downloading {name} from basic url": "Mengunduh {name} dari url dasar",
+ "Download Audio": "Unduh Audio",
+ "Download audios of any format for use in inference (recommended for mobile users)": "Mengunduh audio dalam format apa pun untuk digunakan dalam inferensi (disarankan untuk pengguna seluler)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Setiap ConnectionResetErrors pasca-konversi tidak relevan dan murni visual; mereka dapat diabaikan.",
+ "Processed audio saved at: ": "Audio yang diproses disimpan di:",
+ "Conversion complete!": "Konversi selesai!",
+ "Reverb": "Berkumandang",
+ "Compressor": "Kompresor",
+ "Noise Gate": "Gerbang Kebisingan",
+ "Volume": "Volume",
+ "Drag the audio here and click the Refresh button": "Seret audio ke sini dan klik tombol Refresh",
+ "Select the generated audio": "Pilih audio yang dihasilkan"
+}
\ No newline at end of file
diff --git a/i18n/locale_diff.py b/i18n/locale_diff.py
new file mode 100644
index 000000000..387ddfe1b
--- /dev/null
+++ b/i18n/locale_diff.py
@@ -0,0 +1,45 @@
+import json
+import os
+from collections import OrderedDict
+
+# Define the standard file name
+standard_file = "en_US.json"
+
+# Find all JSON files in the directory
+dir_path = "./"
+languages = [
+ f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file
+]
+
+# Load the standard file
+with open(standard_file, "r", encoding="utf-8") as f:
+ standard_data = json.load(f, object_pairs_hook=OrderedDict)
+
+# Loop through each language file
+for lang_file in languages:
+ # Load the language file
+ with open(lang_file, "r", encoding="utf-8") as f:
+ lang_data = json.load(f, object_pairs_hook=OrderedDict)
+
+ # Find the difference between the language file and the standard file
+ diff = set(standard_data.keys()) - set(lang_data.keys())
+
+ miss = set(lang_data.keys()) - set(standard_data.keys())
+
+ # Add any missing keys to the language file
+ for key in diff:
+ lang_data[key] = key
+
+ # Del any extra keys to the language file
+ for key in miss:
+ del lang_data[key]
+
+ # Sort the keys of the language file to match the order of the standard file
+ lang_data = OrderedDict(
+ sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
+ )
+
+ # Save the updated language file
+ with open(lang_file, "w", encoding="utf-8") as f:
+ json.dump(lang_data, f, ensure_ascii=False, indent=4)
+ f.write("\n")
diff --git a/i18n/pt_PT.json b/i18n/pt_PT.json
new file mode 100644
index 000000000..b5ac5e8e6
--- /dev/null
+++ b/i18n/pt_PT.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "Infelizmente, não há GPU compatível disponível para apoiar o seu treinamento.",
+ "Yes": "Sim",
+ "Select your dataset.": "Selecione seu conjunto de dados.",
+ "Update list.": "Lista de atualização.",
+ "Download Model": "Baixar modelo",
+ "Download Backup": "Baixar cópia de segurança",
+ "Download Dataset": "Baixar conjunto de dados",
+ "Download": "Download",
+ "Url:": "URL:",
+ "Build the index before saving.": "Crie o índice antes de salvar.",
+ "Save your model once the training ends.": "Salve seu modelo quando o treinamento terminar.",
+ "Save type": "Salvar tipo",
+ "Save model": "Salvar modelo",
+ "Choose the method": "Escolha o método",
+ "Save all": "Salvar tudo",
+ "Save D and G": "Salve D e G",
+ "Save voice": "Salvar voz",
+ "Downloading the file: ": "Baixando o arquivo:",
+ "Stop training": "Pare de treinar",
+ "Too many users have recently viewed or downloaded this file": "Muitos usuários visualizaram ou baixaram este arquivo recentemente",
+ "Cannot get file from this private link": "Não é possível obter o arquivo deste link privado",
+ "Full download": "Download completo",
+ "An error occurred downloading": "Ocorreu um erro ao baixar",
+ "Model saved successfully": "Modelo salvo com sucesso",
+ "Saving the model...": "Salvando o modelo...",
+ "Saved without index...": "Salvo sem índice...",
+ "model_name": "nome_modelo",
+ "Saved without inference model...": "Salvo sem modelo de inferência...",
+ "An error occurred saving the model": "Ocorreu um erro ao salvar o modelo",
+ "The model you want to save does not exist, be sure to enter the correct name.": "O modelo que você deseja salvar não existe, certifique-se de inserir o nome correto.",
+ "The file could not be downloaded.": "O arquivo não pôde ser baixado.",
+ "Unzip error.": "Erro ao descompactar.",
+ "Path to your added.index file (if it didn't automatically find it)": "Caminho para o seu arquivo add.index (se não o encontrou automaticamente)",
+ "It has been downloaded successfully.": "Ele foi baixado com sucesso.",
+ "Proceeding with the extraction...": "Prosseguindo com a extração...",
+ "The Backup has been uploaded successfully.": "O backup foi carregado com sucesso.",
+ "The Dataset has been loaded successfully.": "O conjunto de dados foi carregado com sucesso.",
+ "The Model has been loaded successfully.": "O modelo foi carregado com sucesso.",
+ "It is used to download your inference models.": "Ele é usado para baixar seus modelos de inferência.",
+ "It is used to download your training backups.": "Ele é usado para baixar seus backups de treinamento.",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Baixe o conjunto de dados com os áudios em formato compatível (.wav/.flac) para treinar seu modelo.",
+ "No relevant file was found to upload.": "Nenhum arquivo relevante foi encontrado para upload.",
+ "The model works for inference, and has the .index file.": "O modelo funciona para inferência e possui o arquivo .index.",
+ "The model works for inference, but it doesn't have the .index file.": "O modelo funciona para inferência, mas não possui o arquivo .index.",
+ "This may take a few minutes, please wait...": "Isso pode levar alguns minutos, aguarde...",
+ "Resources": "Recursos",
+ "Step 1: Processing data": "Etapa 1: Processamento de dados",
+ "Step 2a: Skipping pitch extraction": "Passo 2a: Ignorando a extração do tom",
+ "Step 2b: Extracting features": "Etapa 2b: Extraindo recursos",
+ "Step 3a: Model training started": "Etapa 3a: treinamento do modelo iniciado",
+ "Step 4: Export lowest points on a graph of the model": "Etapa 4: Exportar os pontos mais baixos em um gráfico do modelo",
+ "Training is done, check train.log": "O treinamento foi concluído, verifique train.log",
+ "All processes have been completed!": "Todos os processos foram concluídos!",
+ "Model Inference": "Inferência de modelo",
+ "Inferencing voice:": "Inferência de voz:",
+ "Model_Name": "Nome_modelo",
+ "Dataset_Name": "Conjunto de dados_Nome",
+ "Whether the model has pitch guidance.": "Se o modelo tem orientação de pitch.",
+ "Whether to save only the latest .ckpt file to save hard drive space": "Se deseja salvar apenas o arquivo .ckpt mais recente para economizar espaço no disco rígido",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Armazene em cache todos os conjuntos de treinamento na memória da GPU. Armazenar pequenos conjuntos de dados em cache (menos de 10 minutos) pode acelerar o treinamento",
+ "Save a small final model to the 'weights' folder at each save point": "Salve um pequeno modelo final na pasta 'pesos' em cada ponto de salvamento",
+ "Refresh voice list, index path and audio files": "Atualizar lista de voz, caminho de índice e arquivos de áudio",
+ "Unload voice to save GPU memory:": "Descarregue a voz para economizar memória da GPU:",
+ "Select Speaker/Singer ID:": "Selecione o ID do palestrante/cantor:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Chave recomendada +12 para conversão de homem para mulher e chave -12 para conversão de mulher para homem. Se o alcance do som for muito longe e a voz estiver distorcida, você também poderá ajustá-lo para o alcance apropriado.",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "Transpor (inteiro, número de semitons, aumentar uma oitava: 12, diminuir uma oitava: -12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "Digite o caminho do arquivo de áudio a ser processado (o padrão é o exemplo de formato correto):",
+ "Select the pitch extraction algorithm:": "Selecione o algoritmo de extração de pitch:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "Comprimento do salto do Mangio-Crepe (aplica-se apenas ao mangio-crepe): A duração do salto refere-se ao tempo que leva para o locutor saltar para um tom dramático. Comprimentos de salto mais baixos levam mais tempo para serem inferidos, mas são mais precisos no tom.",
+ "Feature search dataset file path": "Caminho do arquivo do conjunto de dados de pesquisa de recursos",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Se >=3: aplique filtragem mediana aos resultados de pitch colhidos. O valor representa o raio do filtro e pode reduzir a soprosidade.",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Caminho para o arquivo de índice de recursos. Deixe em branco para usar o resultado selecionado no menu suspenso:",
+ "Auto-detect index path and select from the dropdown": "Detecte automaticamente o caminho do índice e selecione no menu suspenso",
+ "Path to feature file:": "Caminho para o arquivo de recurso:",
+ "Search feature ratio:": "Proporção de recursos de pesquisa:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Faça uma nova amostragem do áudio de saída no pós-processamento para a taxa de amostragem final. Defina como 0 para nenhuma reamostragem:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Use o envelope de volume da entrada para substituir ou mixar com o envelope de volume da saída. Quanto mais próxima a proporção estiver de 1, mais o envelope de saída será usado:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Proteja consoantes surdas e sons respiratórios para evitar artefatos como lacrimejamento na música eletrônica. Defina como 0,5 para desativar. Diminua o valor para aumentar a proteção, mas poderá reduzir a precisão da indexação:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "Arquivo de curva F0 (opcional). Um tom por linha. Substitui o F0 padrão e a modulação de pitch:",
+ "Convert": "Converter",
+ "Output information": "Informações de saída",
+ "Export audio (click on the three dots in the lower right corner to download)": "Exportar áudio (clique nos três pontos no canto inferior direito para fazer o download)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Conversão em lote. Entre na pasta que contém os arquivos de áudio a serem convertidos ou carregue vários arquivos de áudio. O áudio convertido será enviado para a pasta especificada (padrão: 'opt').",
+ "Specify output folder:": "Especifique a pasta de saída:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Digite o caminho da pasta de áudio a ser processada (copie-o da barra de endereço do gerenciador de arquivos):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Você também pode inserir arquivos de áudio em lotes. Escolha uma das duas opções. É dada prioridade à leitura da pasta.",
+ "Export file format": "Exportar formato de arquivo",
+ "UVR5": "UVR5",
+ "Enter the path of the audio folder to be processed:": "Digite o caminho da pasta de áudio a ser processada:",
+ "Model": "Modelo",
+ "Vocal Extraction Aggressive": "Extração Vocal Agressiva",
+ "Specify the output folder for vocals:": "Especifique a pasta de saída para vocais:",
+ "Specify the output folder for accompaniment:": "Especifique a pasta de saída para acompanhamento:",
+ "Train": "Trem",
+ "Enter the model name:": "Digite o nome do modelo:",
+ "Target sample rate:": "Taxa de amostragem desejada:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "Se o modelo possui orientação de tom (obrigatório para canto, opcional para fala):",
+ "Version": "Versão",
+ "Number of CPU processes used for pitch extraction and data processing:": "Número de processos de CPU usados para extração de pitch e processamento de dados:",
+ "Enter the path of the training folder:": "Digite o caminho da pasta de treinamento:",
+ "Please specify the model ID:": "Especifique o ID do modelo:",
+ "Auto detect audio path and select from the dropdown:": "Detecte automaticamente o caminho de áudio e selecione no menu suspenso:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Adicione o nome do áudio ao caminho do arquivo de áudio a ser processado (o padrão é o exemplo de formato correto) Remova o caminho para usar um áudio da lista suspensa:",
+ "Advanced Settings": "Configurações avançadas",
+ "Settings": "Configurações",
+ "Status": "Status",
+ "Process data": "Processar dados",
+ "Drag your audio here and hit the refresh button": "Arraste seu áudio aqui e aperte o botão atualizar",
+ "Or record an audio.": "Ou grave um áudio.",
+ "Formant shift inference audio": "Áudio de inferência de mudança de formante",
+ "Used for male to female and vice-versa conversions": "Usado para conversões de homem para mulher e vice-versa",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Forneça os índices de GPU separados por '-', como 0-1-2 para usar GPUs 0, 1 e 2:",
+ "GPU Information": "Informações da GPU",
+ "Feature extraction": "Extração de recursos",
+ "Save frequency:": "Salvar frequência:",
+ "Training epochs:": "Épocas de treinamento:",
+ "Batch size per GPU:": "Tamanho do lote por GPU:",
+ "Save only the latest '.ckpt' file to save disk space:": "Salve apenas o arquivo '.ckpt' mais recente para economizar espaço em disco:",
+ "No": "Não",
+ "Save a small final model to the 'weights' folder at each save point:": "Salve um pequeno modelo final na pasta 'pesos' em cada ponto de salvamento:",
+ "Load pre-trained base model G path:": "Carregar caminho G do modelo base pré-treinado:",
+ "Load pre-trained base model D path:": "Carregar caminho D do modelo base pré-treinado:",
+ "Train model": "Modelo de trem",
+ "Train feature index": "Índice de recursos de trem",
+ "One-click training": "Treinamento com um clique",
+ "Processing": "Em processamento",
+ "Model fusion, can be used to test timbre fusion": "Fusão de modelos, pode ser usada para testar a fusão de timbres",
+ "Path to Model A:": "Caminho para o modelo A:",
+ "Path to Model B:": "Caminho para o modelo B:",
+ "Weight for Model A:": "Peso para o Modelo A:",
+ "Whether the model has pitch guidance:": "Se o modelo tem orientação de pitch:",
+ "Model information to be placed:": "Informações do modelo a ser colocado:",
+ "Saved model name (without extension):": "Nome do modelo salvo (sem extensão):",
+ "Model architecture version:": "Versão da arquitetura do modelo:",
+ "Fusion": "Fusão",
+ "Modify model information": "Modificar informações do modelo",
+ "Path to Model:": "Caminho para o modelo:",
+ "Model information to be modified:": "Informações do modelo a serem modificadas:",
+ "Save file name:": "Salvar nome do arquivo:",
+ "Modify": "Modificar",
+ "View model information": "Ver informações do modelo",
+ "View": "Visualizar",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "Extração de modelo (insira o caminho do modelo de arquivo grande na pasta 'logs'). Isso é útil se você quiser interromper o treinamento no meio e extrair e salvar manualmente um arquivo de modelo pequeno, ou se quiser testar um modelo intermediário:",
+ "Save name:": "Salvar nome:",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "Se o modelo possui orientação de pitch (1: sim, 0: não):",
+ "Extract": "Extrair",
+ "Export Onnx": "Exportar Onnx",
+ "RVC Model Path:": "Caminho do modelo RVC:",
+ "Onnx Export Path:": "Caminho de exportação Onnx:",
+ "MoeVS Model": "Modelo MoeVS",
+ "Export Onnx Model": "Exportar modelo Onnx",
+ "Load model": "Modelo de carga",
+ "Hubert Model": "Modelo Hubert",
+ "Select the .pth file": "Selecione o arquivo .pth",
+ "Select the .index file": "Selecione o arquivo .index",
+ "Select the .npy file": "Selecione o arquivo .npy",
+ "Input device": "Dispositivo de entrada",
+ "Output device": "Dispositivo de saída",
+ "Audio device (please use the same type of driver)": "Dispositivo de áudio (use o mesmo tipo de driver)",
+ "Response threshold": "Limite de resposta",
+ "Pitch settings": "Configurações de tom",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Se deve usar nomes de notas em vez de seu valor em hertz. POR EXEMPLO. [C5, D6] em vez de [523,25, 1174,66] Hz",
+ "Index Rate": "Taxa de índice",
+ "General settings": "Configurações Gerais",
+ "Sample length": "Comprimento da amostra",
+ "Fade length": "Comprimento do esmaecimento",
+ "Extra inference time": "Tempo extra de inferência",
+ "Input noise reduction": "Redução de ruído de entrada",
+ "Output noise reduction": "Redução de ruído de saída",
+ "Performance settings": "Configurações de desempenho",
+ "Start audio conversion": "Iniciar conversão de áudio",
+ "Stop audio conversion": "Pare a conversão de áudio",
+ "Inference time (ms):": "Tempo de inferência (ms):",
+ "Select the pth file": "Selecione o arquivo pth",
+ "Select the index file": "Selecione o arquivo de índice",
+ "The hubert model path must not contain Chinese characters": "O caminho do modelo Hubert não deve conter caracteres chineses",
+ "The pth file path must not contain Chinese characters.": "O caminho do arquivo pth não deve conter caracteres chineses.",
+ "The index file path must not contain Chinese characters.": "O caminho do arquivo de índice não deve conter caracteres chineses.",
+ "Step algorithm": "Algoritmo de etapas",
+ "Number of epoch processes": "Número de processos de época",
+ "Lowest points export": "Exportação de pontos mais baixos",
+ "How many lowest points to save": "Quantos pontos mais baixos salvar",
+ "Export lowest points of a model": "Exportar os pontos mais baixos de um modelo",
+ "Output models": "Modelos de saída",
+ "Stats of selected models": "Estatísticas dos modelos selecionados",
+ "Custom f0 [Root pitch] File": "Arquivo f0 [inclinação da raiz] personalizado",
+ "Min pitch": "Passo mínimo",
+ "Specify minimal pitch for inference [HZ]": "Especifique o tom mínimo para inferência [HZ]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "Especifique o tom mínimo para inferência [NOTA][OCTAVE]",
+ "Max pitch": "Tom máximo",
+ "Specify max pitch for inference [HZ]": "Especifique o tom máximo para inferência [HZ]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "Especifique o tom máximo para inferência [NOTE][OCTAVE]",
+ "Browse presets for formanting": "Procure predefinições para formatação",
+ "Presets are located in formantshiftcfg/ folder": "As predefinições estão localizadas na pasta formantshiftcfg/",
+ "Default value is 1.0": "O valor padrão é 1,0",
+ "Quefrency for formant shifting": "Quefrency para mudança de formantes",
+ "Timbre for formant shifting": "Timbre para mudança de formantes",
+ "Apply": "Aplicar",
+ "Single": "Solteiro",
+ "Batch": "Lote",
+ "Separate YouTube tracks": "Faixas separadas do YouTube",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Baixe o áudio de um vídeo do YouTube e separe automaticamente as faixas vocais e instrumentais",
+ "Extra": "Extra",
+ "Merge": "Mesclar",
+ "Merge your generated audios with the instrumental": "Mescle seus áudios gerados com o instrumental",
+ "Choose your instrumental": "Escolha seu instrumental",
+ "Choose the generated audio": "Escolha o áudio gerado",
+ "Combine": "Combinar",
+ "Download and Separate": "Baixe e separe",
+ "Enter the youtube link": "Digite o link do youtube",
+ "This section contains some extra utilities that often may be in experimental phases": "Esta seção contém alguns utilitários extras que muitas vezes podem estar em fases experimentais",
+ "Merge Audios": "Mesclar áudios",
+ "Audio files have been moved to the 'audios' folder.": "Os arquivos de áudio foram movidos para a pasta ‘audios’.",
+ "Downloading audio from the video...": "Baixando o áudio do vídeo...",
+ "Audio downloaded!": "Baixar áudio!",
+ "An error occurred:": "Um erro ocorreu:",
+ "Separating audio...": "Separando áudio...",
+ "File moved successfully.": "Arquivo movido com sucesso.",
+ "Finished!": "Finalizado!",
+ "The source file does not exist.": "O arquivo de origem não existe.",
+ "Error moving the file:": "Erro ao mover o arquivo:",
+ "Downloading {name} from drive": "Baixando {name} da unidade",
+ "The attempt to download using Drive didn't work": "A tentativa de download usando o Drive não funcionou",
+ "Error downloading the file: {str(e)}": "Erro ao baixar o arquivo: {str(e)}",
+ "Downloading {name} from mega": "Baixando {nome} do mega",
+ "Downloading {name} from basic url": "Baixando {nome} do URL básico",
+ "Download Audio": "Baixar áudio",
+ "Download audios of any format for use in inference (recommended for mobile users)": "Baixe áudios de qualquer formato para uso em inferência (recomendado para usuários móveis)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Qualquer ConnectionResetErrors pós-conversão é irrelevante e puramente visual; eles podem ser ignorados.",
+ "Processed audio saved at: ": "Áudio processado salvo em:",
+ "Conversion complete!": "Conversão concluída!",
+ "Reverb": "Ressonância",
+ "Compressor": "Compressor",
+ "Noise Gate": "Portão de Ruído",
+ "Volume": "Volume",
+ "Drag the audio here and click the Refresh button": "Arraste o áudio aqui e clique no botão Atualizar",
+ "Select the generated audio": "Selecione o áudio gerado"
+}
\ No newline at end of file
diff --git a/i18n/ru_RU.json b/i18n/ru_RU.json
new file mode 100644
index 000000000..5fd65e520
--- /dev/null
+++ b/i18n/ru_RU.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "К сожалению, для вашего обучения не существует совместимого графического процессора.",
+ "Yes": "Да",
+ "Select your dataset.": "Выберите свой набор данных.",
+ "Update list.": "Обновить список.",
+ "Download Model": "Скачать модель",
+ "Download Backup": "Скачать резервную копию",
+ "Download Dataset": "Скачать набор данных",
+ "Download": "Скачать",
+ "Url:": "URL:",
+ "Build the index before saving.": "Создайте индекс перед сохранением.",
+ "Save your model once the training ends.": "Сохраните свою модель после окончания обучения.",
+ "Save type": "Тип сохранения",
+ "Save model": "Сохранить модель",
+ "Choose the method": "Выберите метод",
+ "Save all": "Сохранить все",
+ "Save D and G": "Спасите D и G",
+ "Save voice": "Сохранить голос",
+ "Downloading the file: ": "Скачиваем файл:",
+ "Stop training": "Прекратить тренировку",
+ "Too many users have recently viewed or downloaded this file": "Слишком много пользователей недавно просмотрели или скачали этот файл.",
+ "Cannot get file from this private link": "Невозможно получить файл по этой частной ссылке",
+ "Full download": "Полная загрузка",
+ "An error occurred downloading": "Произошла ошибка при загрузке",
+ "Model saved successfully": "Модель успешно сохранена",
+ "Saving the model...": "Сохраняем модель...",
+ "Saved without index...": "Сохранено без индекса...",
+ "model_name": "название модели",
+ "Saved without inference model...": "Сохранено без модели вывода...",
+ "An error occurred saving the model": "Произошла ошибка при сохранении модели.",
+ "The model you want to save does not exist, be sure to enter the correct name.": "Модель, которую вы хотите сохранить, не существует. Обязательно введите правильное имя.",
+ "The file could not be downloaded.": "Не удалось загрузить файл.",
+ "Unzip error.": "Ошибка разархивирования.",
+ "Path to your added.index file (if it didn't automatically find it)": "Путь к файлу add.index (если он не был найден автоматически)",
+ "It has been downloaded successfully.": "Он был успешно загружен.",
+ "Proceeding with the extraction...": "Приступаем к извлечению...",
+ "The Backup has been uploaded successfully.": "Резервная копия успешно загружена.",
+ "The Dataset has been loaded successfully.": "Набор данных успешно загружен.",
+ "The Model has been loaded successfully.": "Модель успешно загружена.",
+ "It is used to download your inference models.": "Он используется для загрузки ваших моделей вывода.",
+ "It is used to download your training backups.": "Он используется для загрузки резервных копий тренировок.",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "Загрузите набор данных со звуками в совместимом формате (.wav/.flac), чтобы обучить свою модель.",
+ "No relevant file was found to upload.": "Не найден соответствующий файл для загрузки.",
+ "The model works for inference, and has the .index file.": "Модель работает для вывода и имеет файл .index.",
+ "The model works for inference, but it doesn't have the .index file.": "Модель работает для вывода, но у нее нет файла .index.",
+ "This may take a few minutes, please wait...": "Это может занять несколько минут, пожалуйста, подождите...",
+ "Resources": "Ресурсы",
+ "Step 1: Processing data": "Шаг 1: Обработка данных",
+ "Step 2a: Skipping pitch extraction": "Шаг 2а: Пропуск извлечения высоты тона",
+ "Step 2b: Extracting features": "Шаг 2б: Извлечение объектов",
+ "Step 3a: Model training started": "Шаг 3а: Начало обучения модели",
+ "Step 4: Export lowest points on a graph of the model": "Шаг 4. Экспортируйте самые низкие точки на графике модели.",
+ "Training is done, check train.log": "Обучение завершено, проверьте train.log",
+ "All processes have been completed!": "Все процессы завершены!",
+ "Model Inference": "Вывод модели",
+ "Inferencing voice:": "Выводящий голос:",
+ "Model_Name": "Название модели",
+ "Dataset_Name": "Имя_набора данных",
+ "Whether the model has pitch guidance.": "Имеет ли модель наведение по тангажу.",
+ "Whether to save only the latest .ckpt file to save hard drive space": "Сохранять ли только последний файл .ckpt для экономии места на жестком диске",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "Кэшируйте все обучающие наборы в память графического процессора. Кэширование небольших наборов данных (менее 10 минут) может ускорить обучение.",
+ "Save a small final model to the 'weights' folder at each save point": "Сохраняйте небольшую окончательную модель в папке «веса» в каждой точке сохранения.",
+ "Refresh voice list, index path and audio files": "Обновить список голосов, индексный путь и аудиофайлы.",
+ "Unload voice to save GPU memory:": "Выгрузите голос, чтобы сэкономить память графического процессора:",
+ "Select Speaker/Singer ID:": "Выберите идентификатор докладчика/певца:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "Рекомендуемый ключ +12 для преобразования мужчины в женщину и ключ -12 для преобразования женщины в мужчину. Если звуковой диапазон заходит слишком далеко и голос искажается, вы также можете самостоятельно настроить его на соответствующий диапазон.",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "Транспонирование (целое число, количество полутонов, повышение на октаву: 12, понижение на октаву: -12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "Введите путь к аудиофайлу, который необходимо обработать (по умолчанию — правильный пример формата):",
+ "Select the pitch extraction algorithm:": "Выберите алгоритм извлечения высоты звука:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "Длина прыжка с манжио-крепом (применяется только к крепу с манжио): длина прыжка означает время, необходимое говорящему, чтобы перейти на драматический тон. Для определения более низкой длины скачка требуется больше времени, но они более точны по высоте.",
+ "Feature search dataset file path": "Путь к файлу набора данных поиска объектов",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "Если >=3: применить медианную фильтрацию к собранным результатам высоты тона. Значение представляет собой радиус фильтра и может уменьшить одышку.",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "Путь к индексному файлу объекта. Оставьте поле пустым, чтобы использовать выбранный результат из раскрывающегося списка:",
+ "Auto-detect index path and select from the dropdown": "Автоматическое определение пути к индексу и выбор из раскрывающегося списка.",
+ "Path to feature file:": "Путь к файлу объекта:",
+ "Search feature ratio:": "Соотношение функций поиска:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "Повторно дискретизируйте выходной звук при постобработке до окончательной частоты дискретизации. Установите значение 0, чтобы не выполнять повторную выборку:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "Используйте огибающую громкости входа для замены или смешивания с огибающей громкости выхода. Чем ближе соотношение к 1, тем больше используется выходная огибающая:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "Защищайте глухие согласные и звуки дыхания, чтобы предотвратить появление таких артефактов, как разрывы в электронной музыке. Установите значение 0,5, чтобы отключить. Уменьшите значение, чтобы повысить защиту, но это может снизить точность индексации:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "Файл кривой F0 (опционально). Один шаг на строку. Заменяет стандартную F0 и модуляцию высоты тона:",
+ "Convert": "Конвертировать",
+ "Output information": "Выходная информация",
+ "Export audio (click on the three dots in the lower right corner to download)": "Экспортируйте аудио (нажмите на три точки в правом нижнем углу, чтобы загрузить)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "Пакетное преобразование. Введите папку, содержащую аудиофайлы, которые нужно преобразовать, или загрузите несколько аудиофайлов. Конвертированный звук будет выводиться в указанную папку (по умолчанию: «opt»).",
+ "Specify output folder:": "Укажите выходную папку:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "Введите путь к папке аудио, подлежащей обработке (скопируйте его из адресной строки файлового менеджера):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "Вы также можете вводить аудиофайлы в пакетном режиме. Выберите один из двух вариантов. Приоритет отдается чтению из папки.",
+ "Export file format": "Формат файла экспорта",
+ "UVR5": "УВР5",
+ "Enter the path of the audio folder to be processed:": "Введите путь к аудиопапке, которую необходимо обработать:",
+ "Model": "Модель",
+ "Vocal Extraction Aggressive": "Извлечение вокала агрессивное",
+ "Specify the output folder for vocals:": "Укажите выходную папку для вокала:",
+ "Specify the output folder for accompaniment:": "Укажите выходную папку для аккомпанемента:",
+ "Train": "Тренироваться",
+ "Enter the model name:": "Введите название модели:",
+ "Target sample rate:": "Целевая частота дискретизации:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "Имеет ли модель управление высотой тона (обязательно для пения, необязательно для речи):",
+ "Version": "Версия",
+ "Number of CPU processes used for pitch extraction and data processing:": "Количество процессов ЦП, используемых для извлечения высоты звука и обработки данных:",
+ "Enter the path of the training folder:": "Введите путь к папке обучения:",
+ "Please specify the model ID:": "Пожалуйста, укажите идентификатор модели:",
+ "Auto detect audio path and select from the dropdown:": "Автоматическое определение пути аудио и выбор из раскрывающегося списка:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "Добавьте имя аудио к пути к обрабатываемому аудиофайлу (по умолчанию используется правильный пример формата). Удалите путь для использования аудио из раскрывающегося списка:",
+ "Advanced Settings": "Расширенные настройки",
+ "Settings": "Настройки",
+ "Status": "Положение дел",
+ "Process data": "Данные обработки",
+ "Drag your audio here and hit the refresh button": "Перетащите сюда свой аудиофайл и нажмите кнопку «Обновить».",
+ "Or record an audio.": "Или записать звук.",
+ "Formant shift inference audio": "Звук вывода формантного сдвига",
+ "Used for male to female and vice-versa conversions": "Используется для преобразования мужского и женского пола и наоборот.",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "Укажите индексы графического процессора, разделенные знаком «-», например 0-1-2, для использования графических процессоров 0, 1 и 2:",
+ "GPU Information": "Информация о графическом процессоре",
+ "Feature extraction": "Извлечение признаков",
+ "Save frequency:": "Частота сохранения:",
+ "Training epochs:": "Эпохи обучения:",
+ "Batch size per GPU:": "Размер пакета на графический процессор:",
+ "Save only the latest '.ckpt' file to save disk space:": "Сохраните только последний файл «.ckpt», чтобы сэкономить место на диске:",
+ "No": "Нет",
+ "Save a small final model to the 'weights' folder at each save point:": "Сохраните небольшую окончательную модель в папке «веса» в каждой точке сохранения:",
+ "Load pre-trained base model G path:": "Загрузите предварительно обученную базовую модель G-путь:",
+ "Load pre-trained base model D path:": "Загрузите путь D предварительно обученной базовой модели:",
+ "Train model": "Модель поезда",
+ "Train feature index": "Индекс характеристик поезда",
+ "One-click training": "Обучение в один клик",
+ "Processing": "Обработка",
+ "Model fusion, can be used to test timbre fusion": "Модель Fusion, можно использовать для проверки синтеза тембров.",
+ "Path to Model A:": "Путь к модели А:",
+ "Path to Model B:": "Путь к модели Б:",
+ "Weight for Model A:": "Вес модели А:",
+ "Whether the model has pitch guidance:": "Имеет ли модель наведение по тангажу:",
+ "Model information to be placed:": "Информация о модели, которую необходимо разместить:",
+ "Saved model name (without extension):": "Сохраненное название модели (без расширения):",
+ "Model architecture version:": "Версия архитектуры модели:",
+ "Fusion": "Слияние",
+ "Modify model information": "Изменить информацию о модели",
+ "Path to Model:": "Путь к модели:",
+ "Model information to be modified:": "Информация о модели, которую необходимо изменить:",
+ "Save file name:": "Имя файла сохранения:",
+ "Modify": "Изменить",
+ "View model information": "Просмотр информации о модели",
+ "View": "Вид",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "Извлечение модели (введите путь к модели большого файла в папке «logs»). Это полезно, если вы хотите остановить обучение на полпути и вручную извлечь и сохранить небольшой файл модели или если вы хотите протестировать промежуточную модель:",
+ "Save name:": "Сохранить имя:",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "Имеет ли модель управление по тангажу (1: да, 0: нет):",
+ "Extract": "Извлекать",
+ "Export Onnx": "Экспортировать Onnx",
+ "RVC Model Path:": "Путь модели RVC:",
+ "Onnx Export Path:": "Путь экспорта Onnx:",
+ "MoeVS Model": "Модель МоэВС",
+ "Export Onnx Model": "Экспорт модели Onnx",
+ "Load model": "Загрузить модель",
+ "Hubert Model": "Хьюберт Модель",
+ "Select the .pth file": "Выберите файл .pth.",
+ "Select the .index file": "Выберите файл .index.",
+ "Select the .npy file": "Выберите файл .npy.",
+ "Input device": "Устройство ввода",
+ "Output device": "Устройство вывода",
+ "Audio device (please use the same type of driver)": "Аудиоустройство (пожалуйста, используйте драйвер того же типа)",
+ "Response threshold": "Порог ответа",
+ "Pitch settings": "Настройки высоты тона",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "Использовать ли названия нот вместо их значения в герцах. НАПРИМЕР. [C5, D6] вместо [523,25, 1174,66] Гц",
+ "Index Rate": "Индексная ставка",
+ "General settings": "Общие настройки",
+ "Sample length": "Длина образца",
+ "Fade length": "Длина затухания",
+ "Extra inference time": "Дополнительное время вывода",
+ "Input noise reduction": "Снижение входного шума",
+ "Output noise reduction": "Снижение выходного шума",
+ "Performance settings": "Настройки производительности",
+ "Start audio conversion": "Начать преобразование аудио",
+ "Stop audio conversion": "Остановить преобразование аудио",
+ "Inference time (ms):": "Время вывода (мс):",
+ "Select the pth file": "Выберите pth-файл",
+ "Select the index file": "Выберите индексный файл",
+ "The hubert model path must not contain Chinese characters": "Путь модели Хьюберта не должен содержать китайские символы.",
+ "The pth file path must not contain Chinese characters.": "Путь к файлу pth не должен содержать китайских символов.",
+ "The index file path must not contain Chinese characters.": "Путь к индексному файлу не должен содержать китайских символов.",
+ "Step algorithm": "Пошаговый алгоритм",
+ "Number of epoch processes": "Количество эпохальных процессов",
+ "Lowest points export": "Экспорт наименьших баллов",
+ "How many lowest points to save": "Сколько самых низких баллов нужно сохранить",
+ "Export lowest points of a model": "Экспортировать самые низкие точки модели",
+ "Output models": "Выходные модели",
+ "Stats of selected models": "Статистика выбранных моделей",
+ "Custom f0 [Root pitch] File": "Пользовательский файл f0 [Шаг основного тона]",
+ "Min pitch": "Минимальный шаг",
+ "Specify minimal pitch for inference [HZ]": "Укажите минимальный шаг для вывода [Гц]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "Укажите минимальный шаг для вывода [NOTE][OCTAVE]",
+ "Max pitch": "Максимальный шаг",
+ "Specify max pitch for inference [HZ]": "Укажите максимальную высоту звука для вывода [Гц]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "Укажите максимальный шаг для вывода [ПРИМЕЧАНИЕ][ОКТАВА]",
+ "Browse presets for formanting": "Просмотр пресетов для форматирования",
+ "Presets are located in formantshiftcfg/ folder": "Пресеты находятся в папке formantshiftcfg/.",
+ "Default value is 1.0": "Значение по умолчанию — 1,0.",
+ "Quefrency for formant shifting": "Quefrency для сдвига форманты",
+ "Timbre for formant shifting": "Тембр для смещения форманты",
+ "Apply": "Применять",
+ "Single": "Одинокий",
+ "Batch": "Партия",
+ "Separate YouTube tracks": "Отдельные треки YouTube",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "Загрузите аудио из видео YouTube и автоматически разделите вокальные и инструментальные дорожки.",
+ "Extra": "Дополнительный",
+ "Merge": "Объединить",
+ "Merge your generated audios with the instrumental": "Объедините сгенерированные аудио с инструментальной композицией.",
+ "Choose your instrumental": "Выберите свой инструментал",
+ "Choose the generated audio": "Выберите сгенерированный звук",
+ "Combine": "Объединить",
+ "Download and Separate": "Скачать и отделить",
+ "Enter the youtube link": "Введите ссылку на ютуб",
+ "This section contains some extra utilities that often may be in experimental phases": "Этот раздел содержит некоторые дополнительные утилиты, которые часто могут находиться на экспериментальной стадии.",
+ "Merge Audios": "Объединить аудио",
+ "Audio files have been moved to the 'audios' folder.": "Аудиофайлы перемещены в папку «audios».",
+ "Downloading audio from the video...": "Загрузка звука из видео...",
+ "Audio downloaded!": "Аудио скачать!",
+ "An error occurred:": "Произошла ошибка:",
+ "Separating audio...": "Разделение звука...",
+ "File moved successfully.": "Файл успешно перемещен.",
+ "Finished!": "Законченный!",
+ "The source file does not exist.": "Исходный файл не существует.",
+ "Error moving the file:": "Ошибка перемещения файла:",
+ "Downloading {name} from drive": "Загрузка {name} с диска",
+ "The attempt to download using Drive didn't work": "Попытка скачать с Диска не удалась.",
+ "Error downloading the file: {str(e)}": "Ошибка загрузки файла: {str(e)}",
+ "Downloading {name} from mega": "Скачиваю {name} из мега",
+ "Downloading {name} from basic url": "Загрузка {name} с основного URL",
+ "Download Audio": "Скачать аудио",
+ "Download audios of any format for use in inference (recommended for mobile users)": "Загрузите аудио любого формата для использования в умозаключениях (рекомендуется для мобильных пользователей)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "Любые пост-преобразования ConnectionResetErrors не имеют значения и являются чисто визуальными; их можно игнорировать.",
+ "Processed audio saved at: ": "Обработанный звук сохранен по адресу:",
+ "Conversion complete!": "Преобразование завершено!",
+ "Reverb": "Реверберация",
+ "Compressor": "Компрессор",
+ "Noise Gate": "Шумовые ворота",
+ "Volume": "Объем",
+ "Drag the audio here and click the Refresh button": "Перетащите аудио сюда и нажмите кнопку «Обновить».",
+ "Select the generated audio": "Выберите сгенерированный звук"
+}
\ No newline at end of file
diff --git a/i18n/ur_UR.json b/i18n/ur_UR.json
new file mode 100644
index 000000000..5d6c358d4
--- /dev/null
+++ b/i18n/ur_UR.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "بدقسمتی سے، آپ کی تربیت کو سپورٹ کرنے کے لیے کوئی ہم آہنگ GPU دستیاب نہیں ہے۔",
+ "Yes": "جی ہاں",
+ "Select your dataset.": "اپنا ڈیٹا سیٹ منتخب کریں۔",
+ "Update list.": "فہرست کو اپ ڈیٹ کریں۔",
+ "Download Model": "ماڈل ڈاؤن لوڈ کریں۔",
+ "Download Backup": "بیک اپ ڈاؤن لوڈ کریں۔",
+ "Download Dataset": "ڈیٹا سیٹ ڈاؤن لوڈ کریں۔",
+ "Download": "ڈاؤن لوڈ کریں",
+ "Url:": "یو آر ایل:",
+ "Build the index before saving.": "محفوظ کرنے سے پہلے انڈیکس بنائیں۔",
+ "Save your model once the training ends.": "ٹریننگ ختم ہونے کے بعد اپنے ماڈل کو محفوظ کریں۔",
+ "Save type": "قسم محفوظ کریں۔",
+ "Save model": "ماڈل کو محفوظ کریں۔",
+ "Choose the method": "طریقہ منتخب کریں۔",
+ "Save all": "محفوظ کریں",
+ "Save D and G": "ڈی اور جی کو محفوظ کریں۔",
+ "Save voice": "آواز محفوظ کریں۔",
+ "Downloading the file: ": "فائل ڈاؤن لوڈ کرنا:",
+ "Stop training": "تربیت بند کرو",
+ "Too many users have recently viewed or downloaded this file": "بہت سارے صارفین نے حال ہی میں اس فائل کو دیکھا یا ڈاؤن لوڈ کیا ہے۔",
+ "Cannot get file from this private link": "اس نجی لنک سے فائل حاصل نہیں کی جا سکتی",
+ "Full download": "مکمل ڈاؤن لوڈ",
+ "An error occurred downloading": "ڈاؤن لوڈ کرنے میں ایک خرابی پیش آگئی",
+ "Model saved successfully": "ماڈل کامیابی سے محفوظ ہو گیا۔",
+ "Saving the model...": "ماڈل محفوظ ہو رہا ہے...",
+ "Saved without index...": "انڈیکس کے بغیر محفوظ کیا گیا...",
+ "model_name": "ماڈل_نام",
+ "Saved without inference model...": "بغیر کسی اندازہ کے ماڈل کے محفوظ کیا گیا...",
+ "An error occurred saving the model": "ماڈل کو محفوظ کرنے میں ایک خرابی پیش آگئی",
+ "The model you want to save does not exist, be sure to enter the correct name.": "آپ جس ماڈل کو محفوظ کرنا چاہتے ہیں وہ موجود نہیں ہے، درست نام ضرور درج کریں۔",
+ "The file could not be downloaded.": "فائل ڈاؤن لوڈ نہیں ہو سکی۔",
+ "Unzip error.": "ان زپ کی خرابی۔",
+ "Path to your added.index file (if it didn't automatically find it)": "آپ کی add.index فائل کا راستہ (اگر یہ خود بخود اسے نہیں مل پاتی ہے)",
+ "It has been downloaded successfully.": "اسے کامیابی کے ساتھ ڈاؤن لوڈ کر لیا گیا ہے۔",
+ "Proceeding with the extraction...": "نکالنے کے ساتھ آگے بڑھ رہا ہے...",
+ "The Backup has been uploaded successfully.": "بیک اپ کامیابی کے ساتھ اپ لوڈ ہو گیا ہے۔",
+ "The Dataset has been loaded successfully.": "ڈیٹا سیٹ کامیابی کے ساتھ لوڈ ہو گیا ہے۔",
+ "The Model has been loaded successfully.": "ماڈل کامیابی کے ساتھ لوڈ ہو گیا ہے۔",
+ "It is used to download your inference models.": "یہ آپ کے انفرنس ماڈلز کو ڈاؤن لوڈ کرنے کے لیے استعمال ہوتا ہے۔",
+ "It is used to download your training backups.": "یہ آپ کے تربیتی بیک اپ کو ڈاؤن لوڈ کرنے کے لیے استعمال ہوتا ہے۔",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "اپنے ماڈل کو تربیت دینے کے لیے ڈیٹاسیٹ کو آڈیوز کے ساتھ مطابقت پذیر فارمیٹ (.wav/.flac) میں ڈاؤن لوڈ کریں۔",
+ "No relevant file was found to upload.": "اپ لوڈ کرنے کے لیے کوئی متعلقہ فائل نہیں ملی۔",
+ "The model works for inference, and has the .index file.": "ماڈل تخمینہ کے لیے کام کرتا ہے، اور اس میں .index فائل ہے۔",
+ "The model works for inference, but it doesn't have the .index file.": "ماڈل تخمینہ کے لیے کام کرتا ہے، لیکن اس میں .index فائل نہیں ہے۔",
+ "This may take a few minutes, please wait...": "اس میں کچھ منٹ لگ سکتے ہیں، براہ کرم انتظار کریں...",
+ "Resources": "حوالہ جات",
+ "Step 1: Processing data": "مرحلہ 1: ڈیٹا پر کارروائی کرنا",
+ "Step 2a: Skipping pitch extraction": "مرحلہ 2a: پچ نکالنا چھوڑنا",
+ "Step 2b: Extracting features": "مرحلہ 2b: خصوصیات کو نکالنا",
+ "Step 3a: Model training started": "مرحلہ 3a: ماڈل ٹریننگ شروع ہوئی۔",
+ "Step 4: Export lowest points on a graph of the model": "مرحلہ 4: ماڈل کے گراف پر سب سے کم پوائنٹس برآمد کریں۔",
+ "Training is done, check train.log": "ٹریننگ ہو چکی ہے، ٹرین ڈاٹ لاگ چیک کریں۔",
+ "All processes have been completed!": "تمام عمل مکمل ہو چکے ہیں!",
+ "Model Inference": "ماڈل کا اندازہ",
+ "Inferencing voice:": "اندازہ لگانے والی آواز:",
+ "Model_Name": "ماڈل_نام",
+ "Dataset_Name": "ڈیٹا سیٹ_نام",
+ "Whether the model has pitch guidance.": "آیا ماڈل میں پچ گائیڈنس ہے۔",
+ "Whether to save only the latest .ckpt file to save hard drive space": "آیا ہارڈ ڈرائیو کی جگہ بچانے کے لیے صرف تازہ ترین .ckpt فائل کو محفوظ کرنا ہے۔",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "تمام تربیتی سیٹوں کو GPU میموری میں کیش کریں۔ چھوٹے ڈیٹا سیٹس (10 منٹ سے کم) کیشنگ ٹریننگ کو تیز کر سکتی ہے۔",
+ "Save a small final model to the 'weights' folder at each save point": "ہر سیو پوائنٹ پر ایک چھوٹا فائنل ماڈل 'وزن' فولڈر میں محفوظ کریں۔",
+ "Refresh voice list, index path and audio files": "آواز کی فہرست، انڈیکس پاتھ اور آڈیو فائلوں کو ریفریش کریں۔",
+ "Unload voice to save GPU memory:": "GPU میموری کو بچانے کے لیے آواز اتاریں:",
+ "Select Speaker/Singer ID:": "اسپیکر/گلوکار کی شناخت منتخب کریں:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "مرد سے خاتون کی تبدیلی کے لیے تجویز کردہ +12 کلید، اور عورت سے مرد کی تبدیلی کے لیے -12 کلید۔ اگر آواز کی حد بہت دور جاتی ہے اور آواز بگڑ جاتی ہے، تو آپ اسے خود بھی مناسب رینج میں ایڈجسٹ کر سکتے ہیں۔",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "ٹرانسپوز (انٹیجر، سیمیٹونز کی تعداد، ایک آکٹیو سے بڑھائیں: 12، ایک آکٹیو سے کم: -12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "کارروائی کی جانے والی آڈیو فائل کا راستہ درج کریں (پہلے سے طے شدہ فارمیٹ کی صحیح مثال ہے):",
+ "Select the pitch extraction algorithm:": "پچ نکالنے کا الگورتھم منتخب کریں:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "Mangio-Crepe Hop Length (صرف mangio-crepe پر لاگو ہوتا ہے): ہاپ کی لمبائی سے مراد وہ وقت ہوتا ہے جو اسپیکر کو ڈرامائی انداز میں چھلانگ لگانے میں لگتا ہے۔ نچلی ہاپ کی لمبائی کا اندازہ لگانے میں زیادہ وقت لگتا ہے لیکن پچ زیادہ درست ہوتی ہے۔",
+ "Feature search dataset file path": "فیچر سرچ ڈیٹاسیٹ فائل پاتھ",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "اگر >=3: کٹائی ہوئی پچ کے نتائج پر میڈین فلٹرنگ لگائیں۔ قدر فلٹر کے رداس کی نمائندگی کرتی ہے اور سانس لینے میں کمی کر سکتی ہے۔",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "فیچر انڈیکس فائل کا راستہ۔ ڈراپ ڈاؤن سے منتخب کردہ نتیجہ کو استعمال کرنے کے لیے خالی چھوڑ دیں:",
+ "Auto-detect index path and select from the dropdown": "انڈیکس پاتھ کا خود بخود پتہ لگائیں اور ڈراپ ڈاؤن سے منتخب کریں۔",
+ "Path to feature file:": "فیچر فائل کا راستہ:",
+ "Search feature ratio:": "تلاش کی خصوصیت کا تناسب:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "پوسٹ پروسیسنگ میں آؤٹ پٹ آڈیو کو حتمی نمونے کی شرح پر دوبارہ نمونہ دیں۔ دوبارہ نمونے لینے کے لیے 0 پر سیٹ کریں:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "آؤٹ پٹ کے والیوم لفافے کو تبدیل کرنے یا ملانے کے لیے ان پٹ کے والیوم لفافے کا استعمال کریں۔ تناسب 1 کے جتنا قریب ہوگا، اتنا ہی زیادہ آؤٹ پٹ لفافہ استعمال ہوتا ہے:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "الیکٹرونک میوزک میں پھاڑنے جیسے فن پاروں کو روکنے کے لیے بے آواز تلفظ اور سانس کی آوازوں کی حفاظت کریں۔ غیر فعال کرنے کے لیے 0.5 پر سیٹ کریں۔ تحفظ کو بڑھانے کے لیے قدر کو کم کریں، لیکن یہ اشاریہ سازی کی درستگی کو کم کر سکتا ہے:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0 وکر فائل (اختیاری)۔ فی لائن ایک پچ۔ پہلے سے طے شدہ F0 اور پچ ماڈیولیشن کو بدل دیتا ہے:",
+ "Convert": "تبدیل کریں",
+ "Output information": "آؤٹ پٹ کی معلومات",
+ "Export audio (click on the three dots in the lower right corner to download)": "آڈیو برآمد کریں (ڈاؤن لوڈ کرنے کے لیے نیچے دائیں کونے میں تین نقطوں پر کلک کریں)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "بیچ کی تبدیلی۔ وہ فولڈر درج کریں جس میں آڈیو فائلیں تبدیل کی جائیں یا متعدد آڈیو فائلیں اپ لوڈ کریں۔ تبدیل شدہ آڈیو مخصوص فولڈر میں آؤٹ پٹ ہو گا (پہلے سے طے شدہ: 'opt')۔",
+ "Specify output folder:": "آؤٹ پٹ فولڈر کی وضاحت کریں:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "آڈیو فولڈر کا راستہ درج کریں جس پر کارروائی کی جائے (اسے فائل مینیجر کے ایڈریس بار سے کاپی کریں):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "آپ آڈیو فائلوں کو بیچوں میں بھی ڈال سکتے ہیں۔ دو آپشنز میں سے ایک کا انتخاب کریں۔ فولڈر سے پڑھنے کو ترجیح دی جاتی ہے۔",
+ "Export file format": "فائل کی شکل برآمد کریں۔",
+ "UVR5": "UVR5",
+ "Enter the path of the audio folder to be processed:": "جس آڈیو فولڈر پر کارروائی کی جائے گی اس کا راستہ درج کریں:",
+ "Model": "ماڈل",
+ "Vocal Extraction Aggressive": "آواز نکالنا جارحانہ",
+ "Specify the output folder for vocals:": "آواز کے لیے آؤٹ پٹ فولڈر کی وضاحت کریں:",
+ "Specify the output folder for accompaniment:": "ساتھ کے لیے آؤٹ پٹ فولڈر کی وضاحت کریں:",
+ "Train": "ٹرین",
+ "Enter the model name:": "ماڈل کا نام درج کریں:",
+ "Target sample rate:": "ہدف نمونہ کی شرح:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "آیا ماڈل میں پچ گائیڈنس ہے (گانے کے لیے ضروری، تقریر کے لیے اختیاری):",
+ "Version": "ورژن",
+ "Number of CPU processes used for pitch extraction and data processing:": "پچ نکالنے اور ڈیٹا پروسیسنگ کے لیے استعمال ہونے والے CPU عملوں کی تعداد:",
+ "Enter the path of the training folder:": "ٹریننگ فولڈر کا راستہ درج کریں:",
+ "Please specify the model ID:": "براہ کرم ماڈل ID کی وضاحت کریں:",
+ "Auto detect audio path and select from the dropdown:": "آڈیو پاتھ کا خود بخود پتہ لگائیں اور ڈراپ ڈاؤن سے منتخب کریں:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "آڈیو فائل کے راستے میں آڈیو کا نام شامل کریں جس پر کارروائی کی جائے (پہلے سے طے شدہ فارمیٹ کی صحیح مثال ہے) ڈراپ ڈاؤن فہرست سے آڈیو استعمال کرنے کے لیے راستے کو ہٹا دیں:",
+ "Advanced Settings": "اعلی درجے کی ترتیبات",
+ "Settings": "ترتیبات",
+ "Status": "حالت",
+ "Process data": "ڈیٹا پر کارروائی کریں۔",
+ "Drag your audio here and hit the refresh button": "اپنے آڈیو کو یہاں گھسیٹیں اور ریفریش بٹن کو دبائیں۔",
+ "Or record an audio.": "یا آڈیو ریکارڈ کریں۔",
+ "Formant shift inference audio": "فارمینٹ شفٹ انفرنس آڈیو",
+ "Used for male to female and vice-versa conversions": "مرد سے عورت اور اس کے برعکس تبادلوں کے لیے استعمال کیا جاتا ہے۔",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "براہ کرم '-' سے الگ کردہ GPU انڈیکس فراہم کریں، جیسے GPUs 0، 1، اور 2 استعمال کرنے کے لیے 0-1-2:",
+ "GPU Information": "GPU کی معلومات",
+ "Feature extraction": "خصوصیت کا اخراج",
+ "Save frequency:": "تعدد کو محفوظ کریں:",
+ "Training epochs:": "تربیتی دور:",
+ "Batch size per GPU:": "بیچ سائز فی GPU:",
+ "Save only the latest '.ckpt' file to save disk space:": "ڈسک کی جگہ بچانے کے لیے صرف تازہ ترین '.ckpt' فائل کو محفوظ کریں:",
+ "No": "نہیں",
+ "Save a small final model to the 'weights' folder at each save point:": "ہر سیو پوائنٹ پر ایک چھوٹا فائنل ماڈل 'وزن' فولڈر میں محفوظ کریں:",
+ "Load pre-trained base model G path:": "پہلے سے تربیت یافتہ بیس ماڈل جی پاتھ لوڈ کریں:",
+ "Load pre-trained base model D path:": "پہلے سے تربیت یافتہ بیس ماڈل ڈی پاتھ لوڈ کریں:",
+ "Train model": "ٹرین ماڈل",
+ "Train feature index": "ٹرین فیچر انڈیکس",
+ "One-click training": "ایک کلک کی تربیت",
+ "Processing": "پروسیسنگ",
+ "Model fusion, can be used to test timbre fusion": "ماڈل فیوژن، ٹمبر فیوژن کو جانچنے کے لیے استعمال کیا جا سکتا ہے۔",
+ "Path to Model A:": "ماڈل A کا راستہ:",
+ "Path to Model B:": "ماڈل B کا راستہ:",
+ "Weight for Model A:": "ماڈل A کے لیے وزن:",
+ "Whether the model has pitch guidance:": "آیا ماڈل میں پچ گائیڈنس ہے:",
+ "Model information to be placed:": "ماڈل کی معلومات رکھی جائے گی:",
+ "Saved model name (without extension):": "محفوظ کردہ ماڈل کا نام (بغیر توسیع کے):",
+ "Model architecture version:": "ماڈل آرکیٹیکچر ورژن:",
+ "Fusion": "امتزاج",
+ "Modify model information": "ماڈل کی معلومات میں ترمیم کریں۔",
+ "Path to Model:": "ماڈل کا راستہ:",
+ "Model information to be modified:": "ماڈل کی معلومات میں ترمیم کی جائے گی:",
+ "Save file name:": "فائل کا نام محفوظ کریں:",
+ "Modify": "ترمیم کریں۔",
+ "View model information": "ماڈل کی معلومات دیکھیں",
+ "View": "دیکھیں",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "ماڈل نکالنا ('لاگز' فولڈر کے نیچے بڑی فائل ماڈل کا راستہ داخل کریں)۔ یہ مفید ہے اگر آپ تربیت کو آدھے راستے سے روکنا چاہتے ہیں اور دستی طور پر ایک چھوٹی ماڈل فائل کو نکالنا اور محفوظ کرنا چاہتے ہیں، یا اگر آپ انٹرمیڈیٹ ماڈل کی جانچ کرنا چاہتے ہیں:",
+ "Save name:": "نام محفوظ کریں:",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "آیا ماڈل میں پچ گائیڈنس ہے (1: ہاں، 0: نہیں):",
+ "Extract": "نکالنا",
+ "Export Onnx": "Onnx برآمد کریں۔",
+ "RVC Model Path:": "RVC ماڈل کا راستہ:",
+ "Onnx Export Path:": "Onnx برآمد کا راستہ:",
+ "MoeVS Model": "MoeVS ماڈل",
+ "Export Onnx Model": "Onnx ماڈل برآمد کریں۔",
+ "Load model": "لوڈ ماڈل",
+ "Hubert Model": "ہیوبرٹ ماڈل",
+ "Select the .pth file": ".pth فائل کو منتخب کریں۔",
+ "Select the .index file": ".index فائل کو منتخب کریں۔",
+ "Select the .npy file": ".npy فائل کو منتخب کریں۔",
+ "Input device": "ان پٹ ڈیوائس",
+ "Output device": "آؤٹ پٹ ڈیوائس",
+ "Audio device (please use the same type of driver)": "آڈیو ڈیوائس (براہ کرم ایک ہی قسم کا ڈرائیور استعمال کریں)",
+ "Response threshold": "جوابی حد",
+ "Pitch settings": "پچ کی ترتیبات",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "آیا نوٹ کے نام ان کی ہرٹز قدر کے بجائے استعمال کیے جائیں۔ ای جی [C5, D6] بجائے [523.25, 1174.66]Hz",
+ "Index Rate": "انڈیکس ریٹ",
+ "General settings": "عام ترتیبات",
+ "Sample length": "نمونہ کی لمبائی",
+ "Fade length": "دھندلا لمبائی",
+ "Extra inference time": "اضافی تخمینہ کا وقت",
+ "Input noise reduction": "ان پٹ شور کی کمی",
+ "Output noise reduction": "آؤٹ پٹ شور کی کمی",
+ "Performance settings": "کارکردگی کی ترتیبات",
+ "Start audio conversion": "آڈیو کی تبدیلی شروع کریں۔",
+ "Stop audio conversion": "آڈیو تبادلوں کو روکیں۔",
+ "Inference time (ms):": "انفرنس ٹائم (ms):",
+ "Select the pth file": "pth فائل کو منتخب کریں۔",
+ "Select the index file": "انڈیکس فائل کو منتخب کریں۔",
+ "The hubert model path must not contain Chinese characters": "ہیوبرٹ ماڈل پاتھ میں چینی حروف نہیں ہونے چاہئیں",
+ "The pth file path must not contain Chinese characters.": "pth فائل کا راستہ چینی حروف پر مشتمل نہیں ہونا چاہیے۔",
+ "The index file path must not contain Chinese characters.": "انڈیکس فائل کا راستہ چینی حروف پر مشتمل نہیں ہونا چاہیے۔",
+ "Step algorithm": "مرحلہ الگورتھم",
+ "Number of epoch processes": "عہد کے عمل کی تعداد",
+ "Lowest points export": "کم ترین پوائنٹس کی برآمد",
+ "How many lowest points to save": "کتنے کم پوائنٹس کو بچانا ہے۔",
+ "Export lowest points of a model": "ماڈل کے سب سے کم پوائنٹس برآمد کریں۔",
+ "Output models": "آؤٹ پٹ ماڈلز",
+ "Stats of selected models": "منتخب ماڈلز کے اعدادوشمار",
+ "Custom f0 [Root pitch] File": "اپنی مرضی کے مطابق f0 [روٹ پچ] فائل",
+ "Min pitch": "منٹ پچ",
+ "Specify minimal pitch for inference [HZ]": "تخمینہ کے لیے کم سے کم پچ کی وضاحت کریں [HZ]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "قیاس کے لیے کم سے کم پچ کی وضاحت کریں [NOTE][OCTAVE]",
+ "Max pitch": "زیادہ سے زیادہ پچ",
+ "Specify max pitch for inference [HZ]": "تخمینہ کے لیے زیادہ سے زیادہ پچ کی وضاحت کریں [HZ]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "تخمینہ کے لیے زیادہ سے زیادہ پچ کی وضاحت کریں [NOTE][OCTAVE]",
+ "Browse presets for formanting": "فارمیٹنگ کے لیے پیش سیٹوں کو براؤز کریں۔",
+ "Presets are located in formantshiftcfg/ folder": "presets formantshiftcfg/ فولڈر میں واقع ہیں۔",
+ "Default value is 1.0": "پہلے سے طے شدہ قدر 1.0 ہے۔",
+ "Quefrency for formant shifting": "فارمینٹ شفٹنگ کے لیے Quefrency",
+ "Timbre for formant shifting": "فارمینٹ شفٹنگ کے لیے ٹمبر",
+ "Apply": "درخواست دیں",
+ "Single": "سنگل",
+ "Batch": "بیچ",
+ "Separate YouTube tracks": "یوٹیوب ٹریکس کو الگ کریں۔",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "یوٹیوب ویڈیو سے آڈیو ڈاؤن لوڈ کریں اور خودکار طور پر آواز اور ساز کے ٹریک کو الگ کریں۔",
+ "Extra": "اضافی",
+ "Merge": "ضم",
+ "Merge your generated audios with the instrumental": "اپنے تیار کردہ آڈیوز کو انسٹرومینٹل کے ساتھ ضم کریں۔",
+ "Choose your instrumental": "اپنے آلے کا انتخاب کریں۔",
+ "Choose the generated audio": "تیار کردہ آڈیو کا انتخاب کریں۔",
+ "Combine": "یکجا",
+ "Download and Separate": "ڈاؤن لوڈ کریں اور الگ کریں۔",
+ "Enter the youtube link": "یوٹیوب کا لنک درج کریں۔",
+ "This section contains some extra utilities that often may be in experimental phases": "اس حصے میں کچھ اضافی افادیتیں ہیں جو اکثر تجرباتی مراحل میں ہو سکتی ہیں۔",
+ "Merge Audios": "آڈیوز کو ضم کریں۔",
+ "Audio files have been moved to the 'audios' folder.": "آڈیو فائلوں کو 'آڈیوز' فولڈر میں منتقل کر دیا گیا ہے۔",
+ "Downloading audio from the video...": "ویڈیو سے آڈیو ڈاؤن لوڈ ہو رہا ہے...",
+ "Audio downloaded!": "آڈیو ڈاؤن لوڈ!",
+ "An error occurred:": "ایک خرابی آگئی:",
+ "Separating audio...": "آڈیو کو الگ کیا جا رہا ہے...",
+ "File moved successfully.": "فائل کامیابی سے منتقل ہو گئی۔",
+ "Finished!": "ختم!",
+ "The source file does not exist.": "سورس فائل موجود نہیں ہے۔",
+ "Error moving the file:": "فائل کو منتقل کرنے میں خرابی:",
+ "Downloading {name} from drive": "ڈرائیو سے {name} ڈاؤن لوڈ ہو رہا ہے۔",
+ "The attempt to download using Drive didn't work": "Drive کا استعمال کرتے ہوئے ڈاؤن لوڈ کرنے کی کوشش نے کام نہیں کیا۔",
+ "Error downloading the file: {str(e)}": "فائل ڈاؤن لوڈ کرنے میں خرابی: {str(e)}",
+ "Downloading {name} from mega": "میگا سے {name} ڈاؤن لوڈ ہو رہا ہے۔",
+ "Downloading {name} from basic url": "بنیادی url سے {name} ڈاؤن لوڈ ہو رہا ہے۔",
+ "Download Audio": "آڈیو ڈاؤن لوڈ کریں۔",
+ "Download audios of any format for use in inference (recommended for mobile users)": "کسی بھی فارمیٹ کے آڈیوز کو قیاس میں استعمال کرنے کے لیے ڈاؤن لوڈ کریں (موبائل صارفین کے لیے تجویز کردہ)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "تبدیلی کے بعد کی کوئی بھی ConnectionResetErrors غیر متعلقہ اور خالصتاً بصری ہیں۔ انہیں نظر انداز کیا جا سکتا ہے.",
+ "Processed audio saved at: ": "پروسیس شدہ آڈیو کو محفوظ کیا گیا:",
+ "Conversion complete!": "تبدیلی مکمل!",
+ "Reverb": "Reverb",
+ "Compressor": "کمپریسر",
+ "Noise Gate": "شور گیٹ",
+ "Volume": "حجم",
+ "Drag the audio here and click the Refresh button": "آڈیو کو یہاں گھسیٹیں اور ریفریش بٹن پر کلک کریں۔",
+ "Select the generated audio": "تیار کردہ آڈیو کو منتخب کریں۔"
+}
\ No newline at end of file
diff --git a/i18n/zh_CN.json b/i18n/zh_CN.json
new file mode 100644
index 000000000..adef58b0f
--- /dev/null
+++ b/i18n/zh_CN.json
@@ -0,0 +1,239 @@
+{
+ "Unfortunately, there is no compatible GPU available to support your training.": "不幸的是,没有可用的兼容 GPU 来支持您的训练。",
+ "Yes": "是的",
+ "Select your dataset.": "选择您的数据集。",
+ "Update list.": "更新列表。",
+ "Download Model": "下载模型",
+ "Download Backup": "下载备份",
+ "Download Dataset": "下载数据集",
+ "Download": "下载",
+ "Url:": "网址:",
+ "Build the index before saving.": "保存前构建索引。",
+ "Save your model once the training ends.": "训练结束后保存您的模型。",
+ "Save type": "保存类型",
+ "Save model": "保存模型",
+ "Choose the method": "选择方法",
+ "Save all": "保存全部",
+ "Save D and G": "保存D和G",
+ "Save voice": "保存语音",
+ "Downloading the file: ": "下载文件:",
+ "Stop training": "停止训练",
+ "Too many users have recently viewed or downloaded this file": "最近有太多用户查看或下载了此文件",
+ "Cannot get file from this private link": "无法从此私人链接获取文件",
+ "Full download": "完整下载",
+ "An error occurred downloading": "下载时发生错误",
+ "Model saved successfully": "模型保存成功",
+ "Saving the model...": "保存模型...",
+ "Saved without index...": "保存时没有索引...",
+ "model_name": "型号名称",
+ "Saved without inference model...": "保存时没有推理模型...",
+ "An error occurred saving the model": "保存模型时出错",
+ "The model you want to save does not exist, be sure to enter the correct name.": "您要保存的模型不存在,请务必输入正确的名称。",
+ "The file could not be downloaded.": "无法下载该文件。",
+ "Unzip error.": "解压错误。",
+ "Path to your added.index file (if it didn't automatically find it)": "添加的.index 文件的路径(如果没有自动找到它)",
+ "It has been downloaded successfully.": "已经下载成功了。",
+ "Proceeding with the extraction...": "继续提取...",
+ "The Backup has been uploaded successfully.": "备份已成功上传。",
+ "The Dataset has been loaded successfully.": "数据集已成功加载。",
+ "The Model has been loaded successfully.": "模型已成功加载。",
+ "It is used to download your inference models.": "它用于下载您的推理模型。",
+ "It is used to download your training backups.": "它用于下载您的训练备份。",
+ "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.": "下载包含兼容格式 (.wav/.flac) 音频的数据集来训练您的模型。",
+ "No relevant file was found to upload.": "没有找到相关文件可以上传。",
+ "The model works for inference, and has the .index file.": "该模型用于推理,并具有 .index 文件。",
+ "The model works for inference, but it doesn't have the .index file.": "该模型适用于推理,但没有 .index 文件。",
+ "This may take a few minutes, please wait...": "这可能需要几分钟,请稍候...",
+ "Resources": "资源",
+ "Step 1: Processing data": "步骤一:处理数据",
+ "Step 2a: Skipping pitch extraction": "步骤 2a:跳过音调提取",
+ "Step 2b: Extracting features": "步骤2b:提取特征",
+ "Step 3a: Model training started": "步骤3a:模型训练开始",
+ "Step 4: Export lowest points on a graph of the model": "步骤 4:导出模型图表上的最低点",
+ "Training is done, check train.log": "训练完成,查看train.log",
+ "All processes have been completed!": "所有流程已完成!",
+ "Model Inference": "模型推理",
+ "Inferencing voice:": "推理语音:",
+ "Model_Name": "型号名称",
+ "Dataset_Name": "数据集_名称",
+ "Whether the model has pitch guidance.": "模型是否有俯仰引导。",
+ "Whether to save only the latest .ckpt file to save hard drive space": "是否仅保存最新的.ckpt文件以节省硬盘空间",
+ "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training": "将所有训练集缓存到 GPU 内存。缓存小数据集(少于 10 分钟)可以加快训练速度",
+ "Save a small final model to the 'weights' folder at each save point": "在每个保存点将一个小的最终模型保存到“权重”文件夹中",
+ "Refresh voice list, index path and audio files": "刷新语音列表、索引路径和音频文件",
+ "Unload voice to save GPU memory:": "卸载语音以节省 GPU 内存:",
+ "Select Speaker/Singer ID:": "选择演讲者/歌手 ID:",
+ "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.": "建议+12键用于男性到女性的转换,-12键用于女性到男性的转换。如果音域走得太远,声音失真,也可以自行调整到合适的音域。",
+ "Transpose (integer, number Fof semitones, raise by an octave: 12, lower by an octave: -12):": "移调(整数,半音数,升高八度:12,降低八度:-12):",
+ "Enter the path of the audio file to be processed (default is the correct format example):": "输入要处理的音频文件的路径(默认为正确格式示例):",
+ "Select the pitch extraction algorithm:": "选择音高提取算法:",
+ "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.": "Mangio-Crepe 跳跃长度(仅适用于 mangio-crepe):跳跃长度是指说话者跳跃到戏剧性音高所需的时间。较短的跳跃长度需要更多的时间来推断,但音高更准确。",
+ "Feature search dataset file path": "特征搜索数据集文件路径",
+ "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.": "如果 >=3:对收获的音高结果应用中值滤波。该值代表过滤半径,可以减少呼吸味。",
+ "Path to the feature index file. Leave blank to use the selected result from the dropdown:": "功能索引文件的路径。留空以使用下拉列表中选定的结果:",
+ "Auto-detect index path and select from the dropdown": "自动检测索引路径并从下拉列表中选择",
+ "Path to feature file:": "功能文件的路径:",
+ "Search feature ratio:": "搜索特征比例:",
+ "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:": "在后处理中将输出音频重新采样到最终采样率。设置为 0 表示不重采样:",
+ "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:": "使用输入的音量包络来替换或与输出的音量包络混合。该比率越接近 1,使用的输出包络就越多:",
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:": "保护清辅音和呼吸音,以防止电子音乐中出现撕裂等伪影。设置为 0.5 以禁用。减小该值可增强保护,但可能会降低索引精度:",
+ "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:": "F0 曲线文件(可选)。每行一个音高。替换默认的 F0 和音调调制:",
+ "Convert": "转变",
+ "Output information": "输出信息",
+ "Export audio (click on the three dots in the lower right corner to download)": "导出音频(点击右下角三点即可下载)",
+ "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').": "批量转换。输入包含要转换的音频文件的文件夹或上传多个音频文件。转换后的音频将输出到指定文件夹(默认:“opt”)。",
+ "Specify output folder:": "指定输出文件夹:",
+ "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):": "输入要处理的音频文件夹路径(从文件管理器地址栏复制):",
+ "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.": "您还可以批量输入音频文件。选择两个选项之一。优先从文件夹中读取。",
+ "Export file format": "导出文件格式",
+ "UVR5": "紫外线5",
+ "Enter the path of the audio folder to be processed:": "输入要处理的音频文件夹路径:",
+ "Model": "模型",
+ "Vocal Extraction Aggressive": "声音提取 攻击性",
+ "Specify the output folder for vocals:": "指定人声的输出文件夹:",
+ "Specify the output folder for accompaniment:": "指定伴奏的输出文件夹:",
+ "Train": "火车",
+ "Enter the model name:": "输入型号名称:",
+ "Target sample rate:": "目标采样率:",
+ "Whether the model has pitch guidance (required for singing, optional for speech):": "模型是否有音调引导(唱歌时需要,语音时可选):",
+ "Version": "版本",
+ "Number of CPU processes used for pitch extraction and data processing:": "用于音高提取和数据处理的CPU进程数:",
+ "Enter the path of the training folder:": "输入训练文件夹的路径:",
+ "Please specify the model ID:": "请指定型号 ID:",
+ "Auto detect audio path and select from the dropdown:": "自动检测音频路径并从下拉列表中选择:",
+ "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:": "将音频的名称添加到要处理的音频文件的路径中(默认是正确的格式示例)从下拉列表中删除使用音频的路径:",
+ "Advanced Settings": "高级设置",
+ "Settings": "设置",
+ "Status": "地位",
+ "Process data": "处理数据",
+ "Drag your audio here and hit the refresh button": "将音频拖到此处并点击刷新按钮",
+ "Or record an audio.": "或者录制音频。",
+ "Formant shift inference audio": "共振峰移位推断音频",
+ "Used for male to female and vice-versa conversions": "用于男性到女性的转换,反之亦然",
+ "Please provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:": "请提供以“-”分隔的 GPU 索引,例如使用 GPU 0、1 和 2 时为 0-1-2:",
+ "GPU Information": "GPU信息",
+ "Feature extraction": "特征提取",
+ "Save frequency:": "保存频率:",
+ "Training epochs:": "训练时期:",
+ "Batch size per GPU:": "每个 GPU 的批量大小:",
+ "Save only the latest '.ckpt' file to save disk space:": "仅保存最新的“.ckpt”文件以节省磁盘空间:",
+ "No": "不",
+ "Save a small final model to the 'weights' folder at each save point:": "在每个保存点将一个小的最终模型保存到“权重”文件夹中:",
+ "Load pre-trained base model G path:": "加载预训练的基础模型G路径:",
+ "Load pre-trained base model D path:": "加载预训练的基础模型D路径:",
+ "Train model": "火车模型",
+ "Train feature index": "列车特征指标",
+ "One-click training": "一键培训",
+ "Processing": "加工",
+ "Model fusion, can be used to test timbre fusion": "模型融合,可用于测试音色融合",
+ "Path to Model A:": "模型 A 的路径:",
+ "Path to Model B:": "模型 B 的路径:",
+ "Weight for Model A:": "A 型重量:",
+ "Whether the model has pitch guidance:": "模型是否有俯仰引导:",
+ "Model information to be placed:": "需放置的型号信息:",
+ "Saved model name (without extension):": "保存的模型名称(不带扩展名):",
+ "Model architecture version:": "模型架构版本:",
+ "Fusion": "融合",
+ "Modify model information": "修改型号信息",
+ "Path to Model:": "模型路径:",
+ "Model information to be modified:": "待修改型号信息:",
+ "Save file name:": "保存文件名:",
+ "Modify": "调整",
+ "View model information": "查看型号信息",
+ "View": "看法",
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:": "模型提取(输入“logs”文件夹下大文件模型的路径)。如果您想中途停止训练并手动提取并保存一个小模型文件,或者如果您想测试中间模型,这非常有用:",
+ "Save name:": "保存名称:",
+ "Whether the model has pitch guidance (1: yes, 0: no):": "模型是否有俯仰引导(1:有,0:无):",
+ "Extract": "提炼",
+ "Export Onnx": "导出Onnx",
+ "RVC Model Path:": "RVC模型路径:",
+ "Onnx Export Path:": "Onnx 导出路径:",
+ "MoeVS Model": "MoeVS模型",
+ "Export Onnx Model": "导出 Onnx 模型",
+ "Load model": "负载模型",
+ "Hubert Model": "休伯特模型",
+ "Select the .pth file": "选择 .pth 文件",
+ "Select the .index file": "选择.index文件",
+ "Select the .npy file": "选择.npy 文件",
+ "Input device": "输入设备",
+ "Output device": "输出设备",
+ "Audio device (please use the same type of driver)": "音频设备(请使用同类型驱动程序)",
+ "Response threshold": "反应阈值",
+ "Pitch settings": "音调设置",
+ "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz": "是否使用音符名称而不是赫兹值。例如。 [C5,D6]而不是[523.25,1174.66]Hz",
+ "Index Rate": "指数率",
+ "General settings": "常规设置",
+ "Sample length": "样品长度",
+ "Fade length": "淡入淡出长度",
+ "Extra inference time": "额外的推理时间",
+ "Input noise reduction": "输入噪声降低",
+ "Output noise reduction": "输出噪声降低",
+ "Performance settings": "性能设置",
+ "Start audio conversion": "开始音频转换",
+ "Stop audio conversion": "停止音频转换",
+ "Inference time (ms):": "推理时间(毫秒):",
+ "Select the pth file": "选择.pth文件",
+ "Select the index file": "选择索引文件",
+ "The hubert model path must not contain Chinese characters": "hubert模型路径不能包含中文字符",
+ "The pth file path must not contain Chinese characters.": "pth文件路径不能包含中文字符。",
+ "The index file path must not contain Chinese characters.": "索引文件路径不能包含中文字符。",
+ "Step algorithm": "步进算法",
+ "Number of epoch processes": "纪元进程数",
+ "Lowest points export": "最低点导出",
+ "How many lowest points to save": "保存多少个最低点",
+ "Export lowest points of a model": "导出模型的最低点",
+ "Output models": "输出型号",
+ "Stats of selected models": "所选模型的统计数据",
+ "Custom f0 [Root pitch] File": "自定义 f0 [根音] 文件",
+ "Min pitch": "最小间距",
+ "Specify minimal pitch for inference [HZ]": "指定推理的最小间距 [HZ]",
+ "Specify minimal pitch for inference [NOTE][OCTAVE]": "指定推理的最小间距 [NOTE][OCTAVE]",
+ "Max pitch": "最大螺距",
+ "Specify max pitch for inference [HZ]": "指定推理的最大间距 [HZ]",
+ "Specify max pitch for inference [NOTE][OCTAVE]": "指定推理的最大音高 [NOTE][OCTAVE]",
+ "Browse presets for formanting": "浏览共振峰预设",
+ "Presets are located in formantshiftcfg/ folder": "预设位于formantshiftcfg/文件夹中",
+ "Default value is 1.0": "默认值为 1.0",
+ "Quefrency for formant shifting": "共振峰移位频率",
+ "Timbre for formant shifting": "共振峰转换的音色",
+ "Apply": "申请",
+ "Single": "单身的",
+ "Batch": "批",
+ "Separate YouTube tracks": "单独的 YouTube 曲目",
+ "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks": "从 YouTube 视频下载音频并自动分离人声和器乐曲目",
+ "Extra": "额外的",
+ "Merge": "合并",
+ "Merge your generated audios with the instrumental": "将生成的音频与乐器合并",
+ "Choose your instrumental": "选择您的乐器",
+ "Choose the generated audio": "选择生成的音频",
+ "Combine": "结合",
+ "Download and Separate": "下载并分离",
+ "Enter the youtube link": "输入 YouTube 链接",
+ "This section contains some extra utilities that often may be in experimental phases": "本节包含一些通常可能处于实验阶段的额外实用程序",
+ "Merge Audios": "合并音频",
+ "Audio files have been moved to the 'audios' folder.": "音频文件已移至“audios”文件夹。",
+ "Downloading audio from the video...": "正在从视频下载音频...",
+ "Audio downloaded!": "音频下载!",
+ "An error occurred:": "发生错误:",
+ "Separating audio...": "分离音频...",
+ "File moved successfully.": "文件移动成功。",
+ "Finished!": "完成的!",
+ "The source file does not exist.": "源文件不存在。",
+ "Error moving the file:": "移动文件时出错:",
+ "Downloading {name} from drive": "正在从驱动器下载 {name}",
+ "The attempt to download using Drive didn't work": "尝试使用云端硬盘下载失败",
+ "Error downloading the file: {str(e)}": "下载文件时出错:{str(e)}",
+ "Downloading {name} from mega": "正在从 mega 下载 {name}",
+ "Downloading {name} from basic url": "从基本网址下载 {name}",
+ "Download Audio": "下载音频",
+ "Download audios of any format for use in inference (recommended for mobile users)": "下载任何格式的音频用于推理(推荐移动用户)",
+ "Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n": "转换后的任何 ConnectionResetErrors 都是无关紧要的并且纯粹是视觉上的;它们可以被忽略。",
+ "Processed audio saved at: ": "处理后的音频保存在:",
+ "Conversion complete!": "转换完成!",
+ "Reverb": "混响",
+ "Compressor": "压缩机",
+ "Noise Gate": "噪声门",
+ "Volume": "体积",
+ "Drag the audio here and click the Refresh button": "将音频拖至此处并单击刷新按钮",
+ "Select the generated audio": "选择生成的音频"
+}
\ No newline at end of file
diff --git a/images/icon.png b/images/icon.png
new file mode 100644
index 000000000..f9aae65bb
Binary files /dev/null and b/images/icon.png differ
diff --git a/infer-web.py b/infer-web.py
new file mode 100644
index 000000000..6d5da8900
--- /dev/null
+++ b/infer-web.py
@@ -0,0 +1,2473 @@
+import sys
+from shutil import rmtree
+import shutil
+import json # Mangio fork using json for preset saving
+import datetime
+import unicodedata
+from glob import glob1
+from signal import SIGTERM
+import librosa
+import os
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+import lib.globals.globals as rvc_globals
+from LazyImport import lazyload
+
+math = lazyload('math')
+
+import traceback
+import warnings
+tensorlowest = lazyload('tensorlowest')
+import faiss
+ffmpeg = lazyload('ffmpeg')
+
+np = lazyload("numpy")
+torch = lazyload('torch')
+re = lazyload('regex')
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
+os.environ["OPENBLAS_NUM_THREADS"] = "1"
+os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
+import logging
+from random import shuffle
+from subprocess import Popen
+import easy_infer
+import audioEffects
+gr = lazyload("gradio")
+SF = lazyload("soundfile")
+SFWrite = SF.write
+from config import Config
+from fairseq import checkpoint_utils
+from i18n import I18nAuto
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
+from infer_uvr5 import _audio_pre_, _audio_pre_new
+from MDXNet import MDXNetDereverb
+from my_utils import load_audio
+from train.process_ckpt import change_info, extract_small_model, merge, show_info
+from vc_infer_pipeline import VC
+from sklearn.cluster import MiniBatchKMeans
+
+import time
+import threading
+
+from shlex import quote as SQuote
+
+RQuote = lambda val: SQuote(str(val))
+
+tmp = os.path.join(now_dir, "TEMP")
+runtime_dir = os.path.join(now_dir, "runtime/Lib/site-packages")
+directories = ['logs', 'audios', 'datasets', 'weights']
+
+rmtree(tmp, ignore_errors=True)
+rmtree(os.path.join(runtime_dir, "infer_pack"), ignore_errors=True)
+rmtree(os.path.join(runtime_dir, "uvr5_pack"), ignore_errors=True)
+
+os.makedirs(tmp, exist_ok=True)
+for folder in directories:
+ os.makedirs(os.path.join(now_dir, folder), exist_ok=True)
+
+os.environ["TEMP"] = tmp
+warnings.filterwarnings("ignore")
+torch.manual_seed(114514)
+logging.getLogger("numba").setLevel(logging.WARNING)
+try:
+ file = open('csvdb/stop.csv', 'x')
+ file.close()
+except FileExistsError: pass
+
+global DoFormant, Quefrency, Timbre
+
+DoFormant = rvc_globals.DoFormant
+Quefrency = rvc_globals.Quefrency
+Timbre = rvc_globals.Timbre
+
+config = Config()
+i18n = I18nAuto()
+i18n.print()
+ngpu = torch.cuda.device_count()
+gpu_infos = []
+mem = []
+if_gpu_ok = False
+
+keywords = ["10", "16", "20", "30", "40", "A2", "A3", "A4", "P4", "A50", "500", "A60",
+ "70", "80", "90", "M4", "T4", "TITAN"]
+
+if torch.cuda.is_available() or ngpu != 0:
+ for i in range(ngpu):
+ gpu_name = torch.cuda.get_device_name(i).upper()
+ if any(keyword in gpu_name for keyword in keywords):
+ if_gpu_ok = True
+ gpu_infos.append("%s\t%s" % (i, gpu_name))
+ mem.append(int(torch.cuda.get_device_properties(i).total_memory / 1e9 + 0.4))
+
+gpu_info = "\n".join(gpu_infos) if if_gpu_ok and gpu_infos else i18n("Unfortunately, there is no compatible GPU available to support your training.")
+default_batch_size = min(mem) // 2 if if_gpu_ok and gpu_infos else 1
+gpus = "-".join(i[0] for i in gpu_infos)
+
+hubert_model = None
+
+def load_hubert():
+ global hubert_model
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"], suffix="")
+ hubert_model = models[0].to(config.device)
+
+ if config.is_half:
+ hubert_model = hubert_model.half()
+
+ hubert_model.eval()
+
+datasets_root = "datasets"
+weight_root = "weights"
+weight_uvr5_root = "uvr5_weights"
+index_root = "logs"
+fshift_root = "formantshiftcfg"
+audio_root = "audios"
+audio_others_root = "audio-others"
+
+sup_audioext = {'wav', 'mp3', 'flac', 'ogg', 'opus',
+ 'm4a', 'mp4', 'aac', 'alac', 'wma',
+ 'aiff', 'webm', 'ac3'}
+
+names = [os.path.join(root, file)
+ for root, _, files in os.walk(weight_root)
+ for file in files
+ if file.endswith((".pth", ".onnx"))]
+
+indexes_list = [os.path.join(root, name)
+ for root, _, files in os.walk(index_root, topdown=False)
+ for name in files
+ if name.endswith(".index") and "trained" not in name]
+
+audio_paths = [os.path.join(root, name)
+ for root, _, files in os.walk(audio_root, topdown=False)
+ for name in files
+ if name.endswith(tuple(sup_audioext))]
+
+audio_others_paths = [os.path.join(root, name)
+ for root, _, files in os.walk(audio_others_root, topdown=False)
+ for name in files
+ if name.endswith(tuple(sup_audioext))]
+
+uvr5_names = [name.replace(".pth", "")
+ for name in os.listdir(weight_uvr5_root)
+ if name.endswith(".pth") or "onnx" in name]
+
+check_for_name = lambda: sorted(names)[0] if names else ''
+
+datasets=[]
+for foldername in os.listdir(os.path.join(now_dir, datasets_root)):
+ if "." not in foldername:
+ datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername))
+
+def get_dataset():
+ if len(datasets) > 0:
+ return sorted(datasets)[0]
+ else:
+ return ''
+
+def update_dataset_list(name):
+ new_datasets = []
+ for foldername in os.listdir(os.path.join(now_dir, datasets_root)):
+ if "." not in foldername:
+ new_datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername))
+ return gr.Dropdown.update(choices=new_datasets)
+
+def get_indexes():
+ indexes_list = [
+ os.path.join(dirpath, filename)
+ for dirpath, _, filenames in os.walk(index_root)
+ for filename in filenames
+ if filename.endswith(".index") and "trained" not in filename
+ ]
+
+ return indexes_list if indexes_list else ''
+
+def get_fshift_presets():
+ fshift_presets_list = [
+ os.path.join(dirpath, filename)
+ for dirpath, _, filenames in os.walk(fshift_root)
+ for filename in filenames
+ if filename.endswith(".txt")
+ ]
+
+ return fshift_presets_list if fshift_presets_list else ''
+
+import soundfile as sf
+
+def generate_output_path(output_folder, base_name, extension):
+ # Generar un nombre único para el archivo de salida
+ index = 1
+ while True:
+ output_path = os.path.join(output_folder, f"{base_name}_{index}.{extension}")
+ if not os.path.exists(output_path):
+ return output_path
+ index += 1
+
+def combine_and_save_audios(audio1_path, audio2_path, output_path, volume_factor_audio1, volume_factor_audio2):
+ audio1, sr1 = librosa.load(audio1_path, sr=None)
+ audio2, sr2 = librosa.load(audio2_path, sr=None)
+
+ # Alinear las tasas de muestreo
+ if sr1 != sr2:
+ if sr1 > sr2:
+ audio2 = librosa.resample(audio2, orig_sr=sr2, target_sr=sr1)
+ else:
+ audio1 = librosa.resample(audio1, orig_sr=sr1, target_sr=sr2)
+
+ # Ajustar los audios para que tengan la misma longitud
+ target_length = min(len(audio1), len(audio2))
+ audio1 = librosa.util.fix_length(audio1, target_length)
+ audio2 = librosa.util.fix_length(audio2, target_length)
+
+ # Ajustar el volumen de los audios multiplicando por el factor de ganancia
+ audio1 *= volume_factor_audio1
+ audio2 *= volume_factor_audio2
+
+ # Combinar los audios
+ combined_audio = audio1 + audio2
+
+ sf.write(output_path, combined_audio, sr1)
+
+# Resto de tu código...
+
+# Define función de conversión llamada por el botón
+def audio_combined(audio1_path, audio2_path, volume_factor_audio1=1.0, volume_factor_audio2=1.0, reverb_enabled=False, compressor_enabled=False, noise_gate_enabled=False):
+ output_folder = os.path.join(now_dir, "audio-outputs")
+ os.makedirs(output_folder, exist_ok=True)
+
+ # Generar nombres únicos para los archivos de salida
+ base_name = "combined_audio"
+ extension = "wav"
+ output_path = generate_output_path(output_folder, base_name, extension)
+ print(reverb_enabled)
+ print(compressor_enabled)
+ print(noise_gate_enabled)
+
+ if reverb_enabled or compressor_enabled or noise_gate_enabled:
+ # Procesa el primer audio con los efectos habilitados
+ base_name = "effect_audio"
+ output_path = generate_output_path(output_folder, base_name, extension)
+ processed_audio_path = audioEffects.process_audio(audio2_path, output_path, reverb_enabled, compressor_enabled, noise_gate_enabled)
+ base_name = "combined_audio"
+ output_path = generate_output_path(output_folder, base_name, extension)
+ # Combina el audio procesado con el segundo audio usando audio_combined
+ combine_and_save_audios(audio1_path, processed_audio_path, output_path, volume_factor_audio1, volume_factor_audio2)
+
+ return i18n("Conversion complete!"), output_path
+ else:
+ base_name = "combined_audio"
+ output_path = generate_output_path(output_folder, base_name, extension)
+ # No hay efectos habilitados, combina directamente los audios sin procesar
+ combine_and_save_audios(audio1_path, audio2_path, output_path, volume_factor_audio1, volume_factor_audio2)
+
+ return i18n("Conversion complete!"), output_path
+
+
+def vc_single(
+ sid: str,
+ input_audio_path0: str,
+ input_audio_path1: str,
+ f0_up_key: int,
+ f0_file: str,
+ f0_method: str,
+ file_index: str,
+ file_index2: str,
+ index_rate: float,
+ filter_radius: int,
+ resample_sr: int,
+ rms_mix_rate: float,
+ protect: float,
+ crepe_hop_length: int,
+ f0_min: int,
+ note_min: str,
+ f0_max: int,
+ note_max: str,
+):
+ global total_time
+ total_time = 0
+ start_time = time.time()
+ global tgt_sr, net_g, vc, hubert_model, version
+ if not input_audio_path0 and not input_audio_path1:
+ return "You need to upload an audio", None
+
+ if (not os.path.exists(input_audio_path0)) and (not os.path.exists(os.path.join(now_dir, input_audio_path0))):
+ return "Audio was not properly selected or doesn't exist", None
+
+ input_audio_path1 = input_audio_path1 or input_audio_path0
+ print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'")
+ print("-------------------")
+
+ f0_up_key = int(f0_up_key)
+
+ if rvc_globals.NotesOrHertz and f0_method != 'rmvpe':
+ f0_min = note_to_hz(note_min) if note_min else 50
+ f0_max = note_to_hz(note_max) if note_max else 1100
+ print(f"Converted min pitch freq - {f0_min}\n"
+ f"Converted max pitch freq - {f0_max}")
+ else:
+ f0_min = f0_min or 50
+ f0_max = f0_max or 1100
+ try:
+ print(f"Attempting to load {input_audio_path1}....")
+ audio = load_audio(input_audio_path1,
+ 16000,
+ DoFormant=rvc_globals.DoFormant,
+ Quefrency=rvc_globals.Quefrency,
+ Timbre=rvc_globals.Timbre)
+
+ audio_max = np.abs(audio).max() / 0.95
+ if audio_max > 1:
+ audio /= audio_max
+
+ times = [0, 0, 0]
+ if not hubert_model:
+ print("Loading hubert for the first time...")
+ load_hubert()
+
+ try:
+ if_f0 = cpt.get("f0", 1)
+ except NameError:
+ message = "Model was not properly selected"
+ print(message)
+ return message, None
+
+ file_index = (
+ file_index.strip(" ").strip('"').strip("\n").strip('"').strip(" ").replace("trained", "added")
+ ) if file_index != "" else file_index2
+
+ try:
+ audio_opt = vc.pipeline(
+ hubert_model,
+ net_g,
+ sid,
+ audio,
+ input_audio_path1,
+ times,
+ f0_up_key,
+ f0_method,
+ file_index,
+ index_rate,
+ if_f0,
+ filter_radius,
+ tgt_sr,
+ resample_sr,
+ rms_mix_rate,
+ version,
+ protect,
+ crepe_hop_length,
+ f0_file=f0_file,
+ f0_min=f0_min,
+ f0_max=f0_max
+ )
+ except AssertionError:
+ message = "Mismatching index version detected (v1 with v2, or v2 with v1)."
+ print(message)
+ return message, None
+ except NameError:
+ message = "RVC libraries are still loading. Please try again in a few seconds."
+ print(message)
+ return message, None
+
+ if tgt_sr != resample_sr >= 16000:
+ tgt_sr = resample_sr
+
+ index_info = "Using index:%s." % file_index if os.path.exists(file_index) else "Index not used."
+
+ end_time = time.time()
+ total_time = end_time - start_time
+
+ return f"Success.\n {index_info}\nTime:\n npy:{times[0]}, f0:{times[1]}, infer:{times[2]}\nTotal Time: {total_time} seconds", (tgt_sr, audio_opt)
+ except:
+ info = traceback.format_exc()
+ print(info)
+ return info, (None, None)
+
+def vc_multi(
+ sid,
+ dir_path,
+ opt_root,
+ paths,
+ f0_up_key,
+ f0_method,
+ file_index,
+ file_index2,
+ index_rate,
+ filter_radius,
+ resample_sr,
+ rms_mix_rate,
+ protect,
+ format1,
+ crepe_hop_length,
+ f0_min,
+ note_min,
+ f0_max,
+ note_max,
+):
+ if rvc_globals.NotesOrHertz and f0_method != 'rmvpe':
+ f0_min = note_to_hz(note_min) if note_min else 50
+ f0_max = note_to_hz(note_max) if note_max else 1100
+ print(f"Converted min pitch freq - {f0_min}\n"
+ f"Converted max pitch freq - {f0_max}")
+ else:
+ f0_min = f0_min or 50
+ f0_max = f0_max or 1100
+
+ try:
+ dir_path, opt_root = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [dir_path, opt_root]]
+ os.makedirs(opt_root, exist_ok=True)
+
+ paths = [os.path.join(dir_path, name) for name in os.listdir(dir_path)] if dir_path else [path.name for path in paths]
+ infos = []
+
+ for path in paths:
+ info, opt = vc_single(sid, path, None, f0_up_key, None, f0_method, file_index, file_index2, index_rate, filter_radius,
+ resample_sr, rms_mix_rate, protect, crepe_hop_length, f0_min, note_min, f0_max, note_max)
+
+ if "Success" in info:
+ try:
+ tgt_sr, audio_opt = opt
+ base_name = os.path.splitext(os.path.basename(path))[0]
+ output_path = f"{opt_root}/{base_name}.{format1}"
+ path, extension = output_path, format1
+ path, extension = output_path if format1 in ["wav", "flac", "mp3", "ogg", "aac", "m4a"] else f"{output_path}.wav", format1
+ SFWrite(path, audio_opt, tgt_sr)
+ #sys.stdout.write("\nFile Written Successfully with SFWrite") # Debugging print
+ if os.path.exists(path) and extension not in ["wav", "flac", "mp3", "ogg", "aac", "m4a"]:
+ sys.stdout.write(f"Running command: ffmpeg -i {RQuote(path)} -vn {RQuote(path[:-4] + '.' + extension)} -q:a 2 -y")
+ os.system(f"ffmpeg -i {RQuote(path)} -vn {RQuote(path[:-4] + '.' + extension)} -q:a 2 -y")
+ #print(f"\nFile Converted to {extension} using ffmpeg") # Debugging print
+ except:
+ info += traceback.format_exc()
+ print(f"\nException encountered: {info}") # Debugging print
+ infos.append(f"{os.path.basename(path)}->{info}")
+ yield "\n".join(infos)
+ yield "\n".join(infos)
+ except:
+ yield traceback.format_exc()
+
+
+def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
+ infos = []
+ try:
+ inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]]
+
+ pre_fun = MDXNetDereverb(15) if model_name == "onnx_dereverb_By_FoxJoy" else (_audio_pre_ if "DeEcho" not in model_name else _audio_pre_new)(
+ agg=int(agg),
+ model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
+ device=config.device,
+ is_half=config.is_half,
+ )
+
+ paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)] if inp_root else [path.name for path in paths]
+
+ for path in paths:
+ inp_path = os.path.join(inp_root, path)
+ need_reformat, done = 1, 0
+
+ try:
+ info = ffmpeg.probe(inp_path, cmd="ffprobe")
+ if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100":
+ need_reformat = 0
+ pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0)
+ done = 1
+ except:
+ traceback.print_exc()
+
+ if need_reformat:
+ tmp_path = f"{tmp}/{os.path.basename(RQuote(inp_path))}.reformatted.wav"
+ os.system(f"ffmpeg -i {RQuote(inp_path)} -vn -acodec pcm_s16le -ac 2 -ar 44100 {RQuote(tmp_path)} -y")
+ inp_path = tmp_path
+
+ try:
+ if not done:
+ pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0)
+ infos.append(f"{os.path.basename(inp_path)}->Success")
+ yield "\n".join(infos)
+ except:
+ infos.append(f"{os.path.basename(inp_path)}->{traceback.format_exc()}")
+ yield "\n".join(infos)
+ except:
+ infos.append(traceback.format_exc())
+ yield "\n".join(infos)
+ finally:
+ try:
+ if model_name == "onnx_dereverb_By_FoxJoy":
+ del pre_fun.pred.model
+ del pre_fun.pred.model_
+ else:
+ del pre_fun.model
+
+ del pre_fun
+ except: traceback.print_exc()
+
+ print("clean_empty_cache")
+
+ if torch.cuda.is_available(): torch.cuda.empty_cache()
+
+ yield "\n".join(infos)
+
+def get_vc(sid, to_return_protect0, to_return_protect1):
+ global n_spk, tgt_sr, net_g, vc, cpt, version, hubert_model
+ if not sid:
+ if hubert_model is not None:
+ print("clean_empty_cache")
+ del net_g, n_spk, vc, hubert_model, tgt_sr
+ hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ if_f0, version = cpt.get("f0", 1), cpt.get("version", "v1")
+ net_g = (SynthesizerTrnMs256NSFsid if version == "v1" else SynthesizerTrnMs768NSFsid)(
+ *cpt["config"], is_half=config.is_half) if if_f0 == 1 else (SynthesizerTrnMs256NSFsid_nono if version == "v1" else SynthesizerTrnMs768NSFsid_nono)(*cpt["config"])
+ del net_g, cpt
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ cpt = None
+ return ({"visible": False, "__type__": "update"},) * 3
+
+ print(f"loading {sid}")
+ cpt = torch.load(sid, map_location="cpu")
+ tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
+
+ if cpt.get("f0", 1) == 0:
+ to_return_protect0 = to_return_protect1 = {"visible": False, "value": 0.5, "__type__": "update"}
+ else:
+ to_return_protect0 = {"visible": True, "value": to_return_protect0, "__type__": "update"}
+ to_return_protect1 = {"visible": True, "value": to_return_protect1, "__type__": "update"}
+
+ version = cpt.get("version", "v1")
+ net_g = (SynthesizerTrnMs256NSFsid if version == "v1" else SynthesizerTrnMs768NSFsid)(
+ *cpt["config"], is_half=config.is_half) if cpt.get("f0", 1) == 1 else (SynthesizerTrnMs256NSFsid_nono if version == "v1" else SynthesizerTrnMs768NSFsid_nono)(*cpt["config"])
+ del net_g.enc_q
+
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
+ net_g.eval().to(config.device)
+ net_g = net_g.half() if config.is_half else net_g.float()
+
+ vc = VC(tgt_sr, config)
+ n_spk = cpt["config"][-3]
+
+ return (
+ {"visible": False, "maximum": n_spk, "__type__": "update"},
+ to_return_protect0,
+ to_return_protect1
+ )
+
+
+def change_choices():
+ names = [os.path.join(root, file)
+ for root, _, files in os.walk(weight_root)
+ for file in files
+ if file.endswith((".pth", ".onnx"))]
+ indexes_list = [os.path.join(root, name) for root, _, files in os.walk(index_root, topdown=False) for name in files if name.endswith(".index") and "trained" not in name]
+ audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))]
+
+
+ return (
+ {"choices": sorted(names), "__type__": "update"},
+ {"choices": sorted(indexes_list), "__type__": "update"},
+ {"choices": sorted(audio_paths), "__type__": "update"}
+ )
+def change_choices3():
+
+ audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))]
+ audio_others_paths = [os.path.join(audio_others_root, file) for file in os.listdir(os.path.join(now_dir, "audio-others"))]
+
+
+ return (
+ {"choices": sorted(audio_others_paths), "__type__": "update"},
+ {"choices": sorted(audio_paths), "__type__": "update"}
+ )
+
+sr_dict = {
+ "32k": 32000,
+ "40k": 40000,
+ "48k": 48000,
+}
+
+def if_done(done, p):
+ while p.poll() is None:
+ time.sleep(0.5)
+
+ done[0] = True
+
+def if_done_multi(done, ps):
+ while not all(p.poll() is not None for p in ps):
+ time.sleep(0.5)
+ done[0] = True
+
+def formant_enabled(cbox, qfrency, tmbre):
+ global DoFormant, Quefrency, Timbre
+
+ DoFormant = cbox
+ Quefrency = qfrency
+ Timbre = tmbre
+
+ rvc_globals.DoFormant = cbox
+ rvc_globals.Quefrency = qfrency
+ rvc_globals.Timbre = tmbre
+
+ visibility_update = {"visible": DoFormant, "__type__": "update"}
+
+ return (
+ {"value": DoFormant, "__type__": "update"},
+ ) + (visibility_update,) * 6
+
+
+def formant_apply(qfrency, tmbre):
+ global Quefrency, Timbre, DoFormant
+
+ Quefrency = qfrency
+ Timbre = tmbre
+ DoFormant = True
+
+ rvc_globals.DoFormant = True
+ rvc_globals.Quefrency = qfrency
+ rvc_globals.Timbre = tmbre
+
+ return ({"value": Quefrency, "__type__": "update"}, {"value": Timbre, "__type__": "update"})
+
+def update_fshift_presets(preset, qfrency, tmbre):
+
+ if preset:
+ with open(preset, 'r') as p:
+ content = p.readlines()
+ qfrency, tmbre = content[0].strip(), content[1]
+
+ formant_apply(qfrency, tmbre)
+ else:
+ qfrency, tmbre = preset_apply(preset, qfrency, tmbre)
+
+ return (
+ {"choices": get_fshift_presets(), "__type__": "update"},
+ {"value": qfrency, "__type__": "update"},
+ {"value": tmbre, "__type__": "update"},
+ )
+
+def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
+ sr = sr_dict[sr]
+
+ log_dir = os.path.join(now_dir, "logs", exp_dir)
+ log_file = os.path.join(log_dir, "preprocess.log")
+
+ os.makedirs(log_dir, exist_ok=True)
+
+ with open(log_file, "w") as f: pass
+
+ cmd = (
+ f"{config.python_cmd} "
+ "trainset_preprocess_pipeline_print.py "
+ f"{trainset_dir} "
+ f"{RQuote(sr)} "
+ f"{RQuote(n_p)} "
+ f"{log_dir} "
+ f"{RQuote(config.noparallel)}"
+ )
+ print(cmd)
+
+ p = Popen(cmd, shell=True)
+ done = [False]
+
+ threading.Thread(target=if_done, args=(done,p,)).start()
+
+ while not done[0]:
+ with open(log_file, "r") as f:
+ yield f.read()
+ time.sleep(1)
+
+ with open(log_file, "r") as f:
+ log = f.read()
+
+ print(log)
+ yield log
+
+def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl):
+ gpus = gpus.split("-")
+ log_dir = f"{now_dir}/logs/{exp_dir}"
+ log_file = f"{log_dir}/extract_f0_feature.log"
+ os.makedirs(log_dir, exist_ok=True)
+ with open(log_file, "w") as f: pass
+
+ if if_f0:
+ cmd = (
+ f"{config.python_cmd} extract_f0_print.py {log_dir} "
+ f"{RQuote(n_p)} {RQuote(f0method)} {RQuote(echl)}"
+ )
+ print(cmd)
+ p = Popen(cmd, shell=True, cwd=now_dir)
+ done = [False]
+ threading.Thread(target=if_done, args=(done, p)).start()
+
+ while not done[0]:
+ with open(log_file, "r") as f:
+ yield f.read()
+ time.sleep(1)
+
+ leng = len(gpus)
+ ps = []
+
+ for idx, n_g in enumerate(gpus):
+ cmd = (
+ f"{config.python_cmd} extract_feature_print.py {RQuote(config.device)} "
+ f"{RQuote(leng)} {RQuote(idx)} {RQuote(n_g)} {log_dir} {RQuote(version19)}"
+ )
+ print(cmd)
+ p = Popen(cmd, shell=True, cwd=now_dir)
+ ps.append(p)
+
+ done = [False]
+ threading.Thread(target=if_done_multi, args=(done, ps)).start()
+
+ while not done[0]:
+ with open(log_file, "r") as f:
+ yield f.read()
+ time.sleep(1)
+
+ with open(log_file, "r") as f:
+ log = f.read()
+
+ print(log)
+ yield log
+
+def change_sr2(sr2, if_f0_3, version19):
+ path_str = "" if version19 == "v1" else "_v2"
+ f0_str = "f0" if if_f0_3 else ""
+ model_paths = {"G": "", "D": ""}
+
+ for model_type in model_paths:
+ file_path = f"pretrained{path_str}/{f0_str}{model_type}{sr2}.pth"
+ if os.access(file_path, os.F_OK):
+ model_paths[model_type] = file_path
+ else:
+ print(f"{file_path} doesn't exist, will not use pretrained model.")
+
+ return (model_paths["G"], model_paths["D"])
+
+
+def change_version19(sr2, if_f0_3, version19):
+ path_str = "" if version19 == "v1" else "_v2"
+ sr2 = "40k" if (sr2 == "32k" and version19 == "v1") else sr2
+ choices_update = {
+ "choices": ["40k", "48k"], "__type__": "update", "value": sr2
+ } if version19 == "v1" else {
+ "choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2}
+
+ f0_str = "f0" if if_f0_3 else ""
+ model_paths = {"G": "", "D": ""}
+
+ for model_type in model_paths:
+ file_path = f"pretrained{path_str}/{f0_str}{model_type}{sr2}.pth"
+ if os.access(file_path, os.F_OK):
+ model_paths[model_type] = file_path
+ else:
+ print(f"{file_path} doesn't exist, will not use pretrained model.")
+
+ return (model_paths["G"], model_paths["D"], choices_update)
+
+
+def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
+ path_str = "" if version19 == "v1" else "_v2"
+
+ pth_format = "pretrained%s/f0%s%s.pth"
+ model_desc = { "G": "", "D": "" }
+
+ for model_type in model_desc:
+ file_path = pth_format % (path_str, model_type, sr2)
+ if os.access(file_path, os.F_OK):
+ model_desc[model_type] = file_path
+ else:
+ print(file_path, "doesn't exist, will not use pretrained model")
+
+ return (
+ {"visible": if_f0_3, "__type__": "update"},
+ model_desc["G"],
+ model_desc["D"],
+ {"visible": if_f0_3, "__type__": "update"}
+ )
+
+
+global log_interval
+
+def set_log_interval(exp_dir, batch_size12):
+ log_interval = 1
+ folder_path = os.path.join(exp_dir, "1_16k_wavs")
+
+ if os.path.isdir(folder_path):
+ wav_files_num = len(glob1(folder_path,"*.wav"))
+
+ if wav_files_num > 0:
+ log_interval = math.ceil(wav_files_num / batch_size12)
+ if log_interval > 1:
+ log_interval += 1
+
+ return log_interval
+
+global PID, PROCESS
+
+def click_train(
+ exp_dir1,
+ sr2,
+ if_f0_3,
+ spk_id5,
+ save_epoch10,
+ total_epoch11,
+ batch_size12,
+ if_save_latest13,
+ pretrained_G14,
+ pretrained_D15,
+ gpus16,
+ if_cache_gpu17,
+ if_save_every_weights18,
+ version19,
+):
+ with open('csvdb/stop.csv', 'w+') as file: file.write("False")
+ log_dir = os.path.join(now_dir, "logs", exp_dir1)
+
+ os.makedirs(log_dir, exist_ok=True)
+
+ gt_wavs_dir = os.path.join(log_dir, "0_gt_wavs")
+ feature_dim = "256" if version19 == "v1" else "768"
+
+ feature_dir = os.path.join(log_dir, f"3_feature{feature_dim}")
+
+ log_interval = set_log_interval(log_dir, batch_size12)
+
+ required_dirs = [gt_wavs_dir, feature_dir]
+
+ if if_f0_3:
+ f0_dir = f"{log_dir}/2a_f0"
+ f0nsf_dir = f"{log_dir}/2b-f0nsf"
+ required_dirs.extend([f0_dir, f0nsf_dir])
+
+ names = set(name.split(".")[0] for directory in required_dirs for name in os.listdir(directory))
+
+ def generate_paths(name):
+ paths = [gt_wavs_dir, feature_dir]
+ if if_f0_3:
+ paths.extend([f0_dir, f0nsf_dir])
+ return '|'.join([path.replace('\\', '\\\\') + '/' + name + ('.wav.npy' if path in [f0_dir, f0nsf_dir] else '.wav' if path == gt_wavs_dir else '.npy') for path in paths])
+
+ opt = [f"{generate_paths(name)}|{spk_id5}" for name in names]
+ mute_dir = f"{now_dir}/logs/mute"
+
+ for _ in range(2):
+ mute_string = f"{mute_dir}/0_gt_wavs/mute{sr2}.wav|{mute_dir}/3_feature{feature_dim}/mute.npy"
+ if if_f0_3:
+ mute_string += f"|{mute_dir}/2a_f0/mute.wav.npy|{mute_dir}/2b-f0nsf/mute.wav.npy"
+ opt.append(mute_string+f"|{spk_id5}")
+
+ shuffle(opt)
+ with open(f"{log_dir}/filelist.txt", "w") as f:
+ f.write("\n".join(opt))
+
+ print("write filelist done")
+ print("use gpus:", gpus16)
+
+ if pretrained_G14 == "":
+ print("no pretrained Generator")
+ if pretrained_D15 == "":
+ print("no pretrained Discriminator")
+
+ G_train = f"-pg {pretrained_G14}" if pretrained_G14 else ""
+ D_train = f"-pd {pretrained_D15}" if pretrained_D15 else ""
+
+ cmd = (
+ f"{config.python_cmd} train_nsf_sim_cache_sid_load_pretrain.py -e {exp_dir1} -sr {sr2} -f0 {int(if_f0_3)} -bs {batch_size12}"
+ f" -g {gpus16 if gpus16 is not None else ''} -te {total_epoch11} -se {save_epoch10} {G_train} {D_train} -l {int(if_save_latest13)}"
+ f" -c {int(if_cache_gpu17)} -sw {int(if_save_every_weights18)} -v {version19} -li {log_interval}"
+ )
+
+ print(cmd)
+
+ global p
+ p = Popen(cmd, shell=True, cwd=now_dir)
+ global PID
+ PID = p.pid
+
+ p.wait()
+
+ return i18n("Training is done, check train.log"), {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"}
+
+def train_index(exp_dir1, version19):
+ exp_dir = os.path.join(now_dir, 'logs', exp_dir1)
+ os.makedirs(exp_dir, exist_ok=True)
+
+ feature_dim = '256' if version19 == "v1" else '768'
+ feature_dir = os.path.join(exp_dir, f"3_feature{feature_dim}")
+
+ if not os.path.exists(feature_dir) or len(os.listdir(feature_dir)) == 0:
+ return "请先进行特征提取!"
+
+ npys = [np.load(os.path.join(feature_dir, name)) for name in sorted(os.listdir(feature_dir))]
+
+ big_npy = np.concatenate(npys, 0)
+ np.random.shuffle(big_npy)
+
+ infos = []
+ if big_npy.shape[0] > 2*10**5:
+ infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0])
+ yield "\n".join(infos)
+ try:
+ big_npy = MiniBatchKMeans(n_clusters=10000, verbose=True, batch_size=256 * config.n_cpu,
+ compute_labels=False,init="random").fit(big_npy).cluster_centers_
+ except Exception as e:
+ infos.append(str(e))
+ yield "\n".join(infos)
+
+ np.save(os.path.join(exp_dir, "total_fea.npy"), big_npy)
+
+ n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
+ infos.append("%s,%s" % (big_npy.shape, n_ivf))
+ yield "\n".join(infos)
+
+ index = faiss.index_factory(int(feature_dim), f"IVF{n_ivf},Flat")
+
+ index_ivf = faiss.extract_index_ivf(index)
+ index_ivf.nprobe = 1
+
+ index.train(big_npy)
+
+ index_file_base = f"{exp_dir}/trained_IVF{n_ivf}_Flat_nprobe_{index_ivf.nprobe}_{exp_dir1}_{version19}.index"
+ faiss.write_index(index, index_file_base)
+
+ infos.append("adding")
+ yield "\n".join(infos)
+
+ batch_size_add = 8192
+ for i in range(0, big_npy.shape[0], batch_size_add):
+ index.add(big_npy[i:i + batch_size_add])
+
+ index_file_base = f"{exp_dir}/added_IVF{n_ivf}_Flat_nprobe_{index_ivf.nprobe}_{exp_dir1}_{version19}.index"
+ faiss.write_index(index, index_file_base)
+
+ infos.append(f"Successful Index Construction,added_IVF{n_ivf}_Flat_nprobe_{index_ivf.nprobe}_{exp_dir1}_{version19}.index")
+ yield "\n".join(infos)
+
+def change_info_(ckpt_path):
+ train_log_path = os.path.join(os.path.dirname(ckpt_path), "train.log")
+
+ if not os.path.exists(train_log_path):
+ return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+
+ try:
+ with open(train_log_path, "r") as f:
+ info_line = next(f).strip()
+ info = eval(info_line.split("\t")[-1])
+
+ sr, f0 = info.get("sample_rate"), info.get("if_f0")
+ version = "v2" if info.get("version") == "v2" else "v1"
+
+ return sr, str(f0), version
+
+ except Exception as e:
+ print(f"Exception occurred: {str(e)}, Traceback: {traceback.format_exc()}")
+ return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
+
+def export_onnx(model_path, exported_path):
+ device = torch.device("cpu")
+ checkpoint = torch.load(model_path, map_location=device)
+ vec_channels = 256 if checkpoint.get("version", "v1") == "v1" else 768
+
+ test_inputs = {
+ "phone": torch.rand(1, 200, vec_channels),
+ "phone_lengths": torch.LongTensor([200]),
+ "pitch": torch.randint(5, 255, (1, 200)),
+ "pitchf": torch.rand(1, 200),
+ "ds": torch.zeros(1).long(),
+ "rnd": torch.rand(1, 192, 200)
+ }
+
+ checkpoint["config"][-3] = checkpoint["weight"]["emb_g.weight"].shape[0]
+ net_g = SynthesizerTrnMsNSFsidM(*checkpoint["config"], is_half=False, version=checkpoint.get("version", "v1"))
+
+ net_g.load_state_dict(checkpoint["weight"], strict=False)
+ net_g = net_g.to(device)
+
+ dynamic_axes = {"phone": [1], "pitch": [1], "pitchf": [1], "rnd": [2]}
+
+ torch.onnx.export(
+ net_g,
+ tuple(value.to(device) for value in test_inputs.values()),
+ exported_path,
+ dynamic_axes=dynamic_axes,
+ do_constant_folding=False,
+ opset_version=13,
+ verbose=False,
+ input_names=list(test_inputs.keys()),
+ output_names=["audio"],
+ )
+ return "Finished"
+
+
+import scipy.io.wavfile as wavfile
+
+cli_current_page = "HOME"
+
+def cli_split_command(com):
+ exp = r'(?:(?<=\s)|^)"(.*?)"(?=\s|$)|(\S+)'
+ split_array = re.findall(exp, com)
+ split_array = [group[0] if group[0] else group[1] for group in split_array]
+ return split_array
+
+execute_generator_function = lambda genObject: all(x is not None for x in genObject)
+
+def cli_infer(com):
+ model_name, source_audio_path, output_file_name, feature_index_path, speaker_id, transposition, f0_method, crepe_hop_length, harvest_median_filter, resample, mix, feature_ratio, protection_amnt, _, f0_min, f0_max, do_formant = cli_split_command(com)[:17]
+
+ speaker_id, crepe_hop_length, harvest_median_filter, resample = map(int, [speaker_id, crepe_hop_length, harvest_median_filter, resample])
+ transposition, mix, feature_ratio, protection_amnt = map(float, [transposition, mix, feature_ratio, protection_amnt])
+
+ if do_formant.lower() == 'false':
+ Quefrency = 1.0
+ Timbre = 1.0
+ else:
+ Quefrency, Timbre = map(float, cli_split_command(com)[17:19])
+
+ rvc_globals.DoFormant = do_formant.lower() == 'true'
+ rvc_globals.Quefrency = Quefrency
+ rvc_globals.Timbre = Timbre
+
+ output_message = 'Infer-CLI:'
+ output_path = f'audio-others/{output_file_name}'
+
+ print(f"{output_message} Starting the inference...")
+ vc_data = get_vc(model_name, protection_amnt, protection_amnt)
+ print(vc_data)
+
+ print(f"{output_message} Performing inference...")
+ conversion_data = vc_single(
+ speaker_id,
+ source_audio_path,
+ source_audio_path,
+ transposition,
+ None, # f0 file support not implemented
+ f0_method,
+ feature_index_path,
+ feature_index_path,
+ feature_ratio,
+ harvest_median_filter,
+ resample,
+ mix,
+ protection_amnt,
+ crepe_hop_length,
+ f0_min=f0_min,
+ note_min=None,
+ f0_max=f0_max,
+ note_max=None
+ )
+
+ if "Success." in conversion_data[0]:
+ print(f"{output_message} Inference succeeded. Writing to {output_path}...")
+ wavfile.write(output_path, conversion_data[1][0], conversion_data[1][1])
+ print(f"{output_message} Finished! Saved output to {output_path}")
+ else:
+ print(f"{output_message} Inference failed. Here's the traceback: {conversion_data[0]}")
+
+def cli_pre_process(com):
+ print("Pre-process: Starting...")
+ execute_generator_function(
+ preprocess_dataset(
+ *cli_split_command(com)[:3],
+ int(cli_split_command(com)[3])
+ )
+ )
+ print("Pre-process: Finished")
+
+def cli_extract_feature(com):
+ model_name, gpus, num_processes, has_pitch_guidance, f0_method, crepe_hop_length, version = cli_split_command(com)
+
+ num_processes = int(num_processes)
+ has_pitch_guidance = bool(int(has_pitch_guidance))
+ crepe_hop_length = int(crepe_hop_length)
+
+ print(
+ f"Extract Feature Has Pitch: {has_pitch_guidance}"
+ f"Extract Feature Version: {version}"
+ "Feature Extraction: Starting..."
+ )
+ generator = extract_f0_feature(
+ gpus,
+ num_processes,
+ f0_method,
+ has_pitch_guidance,
+ model_name,
+ version,
+ crepe_hop_length
+ )
+ execute_generator_function(generator)
+ print("Feature Extraction: Finished")
+
+def cli_train(com):
+ com = cli_split_command(com)
+ model_name = com[0]
+ sample_rate = com[1]
+ bool_flags = [bool(int(i)) for i in com[2:11]]
+ version = com[11]
+
+ pretrained_base = "pretrained/" if version == "v1" else "pretrained_v2/"
+
+ g_pretrained_path = f"{pretrained_base}f0G{sample_rate}.pth"
+ d_pretrained_path = f"{pretrained_base}f0D{sample_rate}.pth"
+
+ print("Train-CLI: Training...")
+ click_train(model_name, sample_rate, *bool_flags, g_pretrained_path, d_pretrained_path, version)
+
+def cli_train_feature(com):
+ output_message = 'Train Feature Index-CLI'
+ print(f"{output_message}: Training... Please wait")
+ execute_generator_function(train_index(*cli_split_command(com)))
+ print(f"{output_message}: Done!")
+
+def cli_extract_model(com):
+ extract_small_model_process = extract_small_model(*cli_split_command(com))
+ print(
+ "Extract Small Model: Success!"
+ if extract_small_model_process == "Success."
+ else f"{extract_small_model_process}\nExtract Small Model: Failed!"
+ )
+
+def preset_apply(preset, qfer, tmbr):
+ if preset:
+ try:
+ with open(preset, 'r') as p:
+ content = p.read().splitlines()
+ qfer, tmbr = content[0], content[1]
+ formant_apply(qfer, tmbr)
+ except IndexError:
+ print("Error: File does not have enough lines to read 'qfer' and 'tmbr'")
+ except FileNotFoundError:
+ print("Error: File does not exist")
+ except Exception as e:
+ print("An unexpected error occurred", e)
+
+ return ({"value": qfer, "__type__": "update"}, {"value": tmbr, "__type__": "update"})
+
+def print_page_details():
+ page_description = {
+
+ 'HOME':
+ "\n go home : Takes you back to home with a navigation list."
+ "\n go infer : Takes you to inference command execution."
+ "\n go pre-process : Takes you to training step.1) pre-process command execution."
+ "\n go extract-feature : Takes you to training step.2) extract-feature command execution."
+ "\n go train : Takes you to training step.3) being or continue training command execution."
+ "\n go train-feature : Takes you to the train feature index command execution."
+ "\n go extract-model : Takes you to the extract small model command execution."
+
+ , 'INFER':
+ "\n arg 1) model name with .pth in ./weights: mi-test.pth"
+ "\n arg 2) source audio path: myFolder\\MySource.wav"
+ "\n arg 3) output file name to be placed in './audio-others': MyTest.wav"
+ "\n arg 4) feature index file path: logs/mi-test/added_IVF3042_Flat_nprobe_1.index"
+ "\n arg 5) speaker id: 0"
+ "\n arg 6) transposition: 0"
+ "\n arg 7) f0 method: harvest (pm, harvest, crepe, crepe-tiny, hybrid[x,x,x,x], mangio-crepe, mangio-crepe-tiny, rmvpe)"
+ "\n arg 8) crepe hop length: 160"
+ "\n arg 9) harvest median filter radius: 3 (0-7)"
+ "\n arg 10) post resample rate: 0"
+ "\n arg 11) mix volume envelope: 1"
+ "\n arg 12) feature index ratio: 0.78 (0-1)"
+ "\n arg 13) Voiceless Consonant Protection (Less Artifact): 0.33 (Smaller number = more protection. 0.50 means Dont Use.)"
+ "\n arg 14) Whether to formant shift the inference audio before conversion: False (if set to false, you can ignore setting the quefrency and timbre values for formanting)"
+ "\n arg 15)* Quefrency for formanting: 8.0 (no need to set if arg14 is False/false)"
+ "\n arg 16)* Timbre for formanting: 1.2 (no need to set if arg14 is False/false) \n"
+ "\nExample: mi-test.pth saudio/Sidney.wav myTest.wav logs/mi-test/added_index.index 0 -2 harvest 160 3 0 1 0.95 0.33 0.45 True 8.0 1.2"
+
+ , 'PRE-PROCESS':
+ "\n arg 1) Model folder name in ./logs: mi-test"
+ "\n arg 2) Trainset directory: mydataset (or) E:\\my-data-set"
+ "\n arg 3) Sample rate: 40k (32k, 40k, 48k)"
+ "\n arg 4) Number of CPU threads to use: 8 \n"
+ "\nExample: mi-test mydataset 40k 24"
+
+ , 'EXTRACT-FEATURE':
+ "\n arg 1) Model folder name in ./logs: mi-test"
+ "\n arg 2) Gpu card slot: 0 (0-1-2 if using 3 GPUs)"
+ "\n arg 3) Number of CPU threads to use: 8"
+ "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)"
+ "\n arg 5) f0 Method: harvest (pm, harvest, dio, crepe)"
+ "\n arg 6) Crepe hop length: 128"
+ "\n arg 7) Version for pre-trained models: v2 (use either v1 or v2)\n"
+ "\nExample: mi-test 0 24 1 harvest 128 v2"
+
+ , 'TRAIN':
+ "\n arg 1) Model folder name in ./logs: mi-test"
+ "\n arg 2) Sample rate: 40k (32k, 40k, 48k)"
+ "\n arg 3) Has Pitch Guidance?: 1 (0 for no, 1 for yes)"
+ "\n arg 4) speaker id: 0"
+ "\n arg 5) Save epoch iteration: 50"
+ "\n arg 6) Total epochs: 10000"
+ "\n arg 7) Batch size: 8"
+ "\n arg 8) Gpu card slot: 0 (0-1-2 if using 3 GPUs)"
+ "\n arg 9) Save only the latest checkpoint: 0 (0 for no, 1 for yes)"
+ "\n arg 10) Whether to cache training set to vram: 0 (0 for no, 1 for yes)"
+ "\n arg 11) Save extracted small model every generation?: 0 (0 for no, 1 for yes)"
+ "\n arg 12) Model architecture version: v2 (use either v1 or v2)\n"
+ "\nExample: mi-test 40k 1 0 50 10000 8 0 0 0 0 v2"
+
+ , 'TRAIN-FEATURE':
+ "\n arg 1) Model folder name in ./logs: mi-test"
+ "\n arg 2) Model architecture version: v2 (use either v1 or v2)\n"
+ "\nExample: mi-test v2"
+
+ , 'EXTRACT-MODEL':
+ "\n arg 1) Model Path: logs/mi-test/G_168000.pth"
+ "\n arg 2) Model save name: MyModel"
+ "\n arg 3) Sample rate: 40k (32k, 40k, 48k)"
+ "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)"
+ '\n arg 5) Model information: "My Model"'
+ "\n arg 6) Model architecture version: v2 (use either v1 or v2)\n"
+ '\nExample: logs/mi-test/G_168000.pth MyModel 40k 1 "Created by Cole Mangio" v2'
+
+ }
+
+ print(page_description.get(cli_current_page, 'Invalid page'))
+
+
+def change_page(page):
+ global cli_current_page
+ cli_current_page = page
+ return 0
+def execute_command(com):
+ command_to_page = {
+ "go home": "HOME",
+ "go infer": "INFER",
+ "go pre-process": "PRE-PROCESS",
+ "go extract-feature": "EXTRACT-FEATURE",
+ "go train": "TRAIN",
+ "go train-feature": "TRAIN-FEATURE",
+ "go extract-model": "EXTRACT-MODEL",
+ }
+
+ page_to_function = {
+ "INFER": cli_infer,
+ "PRE-PROCESS": cli_pre_process,
+ "EXTRACT-FEATURE": cli_extract_feature,
+ "TRAIN": cli_train,
+ "TRAIN-FEATURE": cli_train_feature,
+ "EXTRACT-MODEL": cli_extract_model,
+ }
+
+ if com in command_to_page:
+ return change_page(command_to_page[com])
+
+ if com[:3] == "go ":
+ print(f"page '{com[3:]}' does not exist!")
+ return 0
+
+ if cli_current_page in page_to_function:
+ page_to_function[cli_current_page](com)
+
+def cli_navigation_loop():
+ while True:
+ print(f"\nYou are currently in '{cli_current_page}':")
+ print_page_details()
+ print(f"{cli_current_page}: ", end="")
+ try: execute_command(input())
+ except Exception as e: print(f"An error occurred: {traceback.format_exc()}")
+
+if(config.is_cli):
+ print(
+ "\n\nMangio-RVC-Fork v2 CLI App!\n"
+ "Welcome to the CLI version of RVC. Please read the documentation on https://github.com/Mangio621/Mangio-RVC-Fork (README.MD) to understand how to use this app.\n"
+ )
+ cli_navigation_loop()
+
+'''
+def get_presets():
+ data = None
+ with open('../inference-presets.json', 'r') as file:
+ data = json.load(file)
+ preset_names = []
+ for preset in data['presets']:
+ preset_names.append(preset['name'])
+
+ return preset_names
+'''
+
+def switch_pitch_controls(f0method0):
+ is_visible = f0method0 != 'rmvpe'
+
+ if rvc_globals.NotesOrHertz:
+ return (
+ {"visible": False, "__type__": "update"},
+ {"visible": is_visible, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ {"visible": is_visible, "__type__": "update"}
+ )
+ else:
+ return (
+ {"visible": is_visible, "__type__": "update"},
+ {"visible": False, "__type__": "update"},
+ {"visible": is_visible, "__type__": "update"},
+ {"visible": False, "__type__": "update"}
+ )
+
+def match_index(sid0: str) -> tuple:
+ sid0strip = re.sub(r'\.pth|\.onnx$', '', sid0)
+ sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory
+
+ # Check if the sid0strip has the specific ending format _eXXX_sXXX
+ if re.match(r'.+_e\d+_s\d+$', sid0name):
+ base_model_name = sid0name.rsplit('_', 2)[0]
+ else:
+ base_model_name = sid0name
+
+ sid_directory = os.path.join(index_root, base_model_name)
+ directories_to_search = [sid_directory] if os.path.exists(sid_directory) else []
+ directories_to_search.append(index_root)
+
+ matching_index_files = []
+
+ for directory in directories_to_search:
+ for filename in os.listdir(directory):
+ if filename.endswith('.index') and 'trained' not in filename:
+ # Condition to match the name
+ name_match = any(name.lower() in filename.lower() for name in [sid0name, base_model_name])
+
+ # If in the specific directory, it's automatically a match
+ folder_match = directory == sid_directory
+
+ if name_match or folder_match:
+ index_path = os.path.join(directory, filename)
+ if index_path in indexes_list:
+ matching_index_files.append((index_path, os.path.getsize(index_path), ' ' not in filename))
+
+ if matching_index_files:
+ # Sort by favoring files without spaces and by size (largest size first)
+ matching_index_files.sort(key=lambda x: (-x[2], -x[1]))
+ best_match_index_path = matching_index_files[0][0]
+ return best_match_index_path, best_match_index_path
+
+ return '', ''
+def stoptraining(mim):
+ if mim:
+ try:
+ with open('csvdb/stop.csv', 'w+') as file: file.write("True")
+ os.kill(PID, SIGTERM)
+ except Exception as e:
+ print(f"Couldn't click due to {e}")
+ return (
+ {"visible": True , "__type__": "update"},
+ {"visible": False, "__type__": "update"})
+ return (
+ {"visible": False, "__type__": "update"},
+ {"visible": True , "__type__": "update"})
+
+
+weights_dir = 'weights/'
+
+def note_to_hz(note_name):
+ SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2}
+ pitch_class, octave = note_name[:-1], int(note_name[-1])
+ semitone = SEMITONES[pitch_class]
+ note_number = 12 * (octave - 4) + semitone
+ frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number
+ return frequency
+
+def save_to_wav(record_button):
+ if record_button is None:
+ pass
+ else:
+ path_to_file=record_button
+ new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
+ new_path='./audios/'+new_name
+ shutil.move(path_to_file,new_path)
+ return new_name
+
+def save_to_wav2(dropbox):
+ file_path = dropbox.name
+ target_path = os.path.join('audios', os.path.basename(file_path))
+
+ if os.path.exists(target_path):
+ os.remove(target_path)
+ print('Replacing old dropdown file...')
+
+ shutil.move(file_path, target_path)
+ return target_path
+
+def change_choices2():
+ return ""
+
+def GradioSetup(UTheme=gr.themes.Soft()):
+
+ default_weight = names[0] if names else ''
+
+ with gr.Blocks(theme='JohnSmith9982/small_and_pretty', title="Applio") as app:
+ gr.HTML(" 🍏 Applio (Mangio-RVC-Fork)
")
+ with gr.Tabs():
+ with gr.TabItem(i18n("Model Inference")):
+ with gr.Row():
+ sid0 = gr.Dropdown(label=i18n("Inferencing voice:"), choices=sorted(names), value=default_weight)
+ refresh_button = gr.Button(i18n("Refresh voice list, index path and audio files"), variant="primary")
+ clean_button = gr.Button(i18n("Unload voice to save GPU memory:"), variant="primary")
+ clean_button.click(fn=lambda: ({"value": "", "__type__": "update"}), inputs=[], outputs=[sid0])
+
+
+ with gr.TabItem(i18n("Single")):
+ with gr.Row():
+ spk_item = gr.Slider(
+ minimum=0,
+ maximum=2333,
+ step=1,
+ label=i18n("Select Speaker/Singer ID:"),
+ value=0,
+ visible=False,
+ interactive=True,
+ )
+
+
+ with gr.Group():
+ with gr.Row():
+ with gr.Column(): # First column for audio-related inputs
+ dropbox = gr.File(label=i18n("Enter the path of the training folder:"))
+ record_button=gr.Audio(source="microphone", label=i18n("Or record an audio."), type="filepath")
+ input_audio0 = gr.Textbox(
+ label=i18n("Manual path to the audio file to be processed"),
+ value=os.path.join(now_dir, "audios", "someguy.mp3"),
+ visible=False
+ )
+ input_audio1 = gr.Dropdown(
+ label=i18n("Auto detect audio path and select from the dropdown:"),
+ choices=sorted(audio_paths),
+ value='',
+ interactive=True,
+ )
+
+ input_audio1.select(fn=lambda:'',inputs=[],outputs=[input_audio0])
+ input_audio0.input(fn=lambda:'',inputs=[],outputs=[input_audio1])
+
+ dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0])
+ dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio1])
+ record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0])
+ record_button.change(fn=change_choices2, inputs=[], outputs=[input_audio1])
+
+ best_match_index_path1, _ = match_index(sid0.value) # Get initial index from default sid0 (first voice model in list)
+
+ with gr.Column(): # Second column for pitch shift and other options
+ file_index2 = gr.Dropdown(
+ label=i18n("Auto-detect index path and select from the dropdown"),
+ choices=get_indexes(),
+ value=best_match_index_path1,
+ interactive=True,
+ allow_custom_value=True,
+ )
+ index_rate1 = gr.Slider(
+ minimum=0,
+ maximum=1,
+ label=i18n("Search feature ratio:"),
+ value=0.75,
+ interactive=True,
+ )
+ refresh_button.click(
+ fn=change_choices, inputs=[], outputs=[sid0, file_index2, input_audio1]
+ )
+ with gr.Column():
+ vc_transform0 = gr.Number(
+ label=i18n("Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):"), value=0
+ )
+
+ # Create a checkbox for advanced settings
+ advanced_settings_checkbox = gr.Checkbox(
+ value=False,
+ label=i18n("Advanced Settings"),
+ interactive=True,
+ )
+
+ # Advanced settings container
+ with gr.Column(visible=False) as advanced_settings: # Initially hidden
+ with gr.Row(label = i18n("Advanced Settings"), open = False):
+ with gr.Column():
+ f0method0 = gr.Radio(
+ label=i18n(
+ "Select the pitch extraction algorithm:"
+ ),
+ choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny", "rmvpe", "rmvpe+"],
+ value="rmvpe+",
+ interactive=True,
+ )
+ crepe_hop_length = gr.Slider(
+ minimum=1,
+ maximum=512,
+ step=1,
+ label=i18n("Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate."),
+ value=120,
+ interactive=True,
+ visible=False,
+ )
+ filter_radius0 = gr.Slider(
+ minimum=0,
+ maximum=7,
+ label=i18n("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."),
+ value=3,
+ step=1,
+ interactive=True,
+ )
+
+ minpitch_slider = gr.Slider(
+ label = i18n("Min pitch"),
+ info = i18n("Specify minimal pitch for inference [HZ]"),
+ step = 0.1,
+ minimum = 1,
+ scale = 0,
+ value = 50,
+ maximum = 16000,
+ interactive = True,
+ visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'),
+ )
+ minpitch_txtbox = gr.Textbox(
+ label = i18n("Min pitch"),
+ info = i18n("Specify minimal pitch for inference [NOTE][OCTAVE]"),
+ placeholder = "C5",
+ visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'),
+ interactive = True,
+ )
+
+ maxpitch_slider = gr.Slider(
+ label = i18n("Max pitch"),
+ info = i18n("Specify max pitch for inference [HZ]"),
+ step = 0.1,
+ minimum = 1,
+ scale = 0,
+ value = 1100,
+ maximum = 16000,
+ interactive = True,
+ visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'),
+ )
+ maxpitch_txtbox = gr.Textbox(
+ label = i18n("Max pitch"),
+ info = i18n("Specify max pitch for inference [NOTE][OCTAVE]"),
+ placeholder = "C6",
+ visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'),
+ interactive = True,
+ )
+
+ with gr.Column():
+ file_index1 = gr.Textbox(
+ label=i18n("Feature search database file path"),
+ value="",
+ interactive=True,
+ )
+
+ with gr.Accordion(label = i18n("Custom f0 [Root pitch] File"), open = False):
+ f0_file = gr.File(label=i18n("F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:"))
+
+ f0method0.change(
+ fn=lambda radio: (
+ {
+ "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'],
+ "__type__": "update"
+ }
+ ),
+ inputs=[f0method0],
+ outputs=[crepe_hop_length]
+ )
+
+ f0method0.change(
+ fn=switch_pitch_controls,
+ inputs=[f0method0],
+ outputs=[minpitch_slider, minpitch_txtbox,
+ maxpitch_slider, maxpitch_txtbox]
+ )
+
+ with gr.Column():
+ resample_sr0 = gr.Slider(
+ minimum=0,
+ maximum=48000,
+ label=i18n("Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:"),
+ value=0,
+ step=1,
+ interactive=True,
+ )
+ rms_mix_rate0 = gr.Slider(
+ minimum=0,
+ maximum=1,
+ label=i18n("Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:"),
+ value=0.25,
+ interactive=True,
+ )
+ protect0 = gr.Slider(
+ minimum=0,
+ maximum=0.5,
+ label=i18n(
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:"
+ ),
+ value=0.33,
+ step=0.01,
+ interactive=True,
+ )
+ formanting = gr.Checkbox(
+ value=bool(DoFormant),
+ label=i18n("Formant shift inference audio"),
+ info=i18n("Used for male to female and vice-versa conversions"),
+ interactive=True,
+ visible=True,
+ )
+
+ formant_preset = gr.Dropdown(
+ value='',
+ choices=get_fshift_presets(),
+ label=i18n("Browse presets for formanting"),
+ info=i18n("Presets are located in formantshiftcfg/ folder"),
+ visible=bool(DoFormant),
+ )
+
+ formant_refresh_button = gr.Button(
+ value='\U0001f504',
+ visible=bool(DoFormant),
+ variant='primary',
+ )
+
+ qfrency = gr.Slider(
+ value=Quefrency,
+ info=i18n("Default value is 1.0"),
+ label=i18n("Quefrency for formant shifting"),
+ minimum=0.0,
+ maximum=16.0,
+ step=0.1,
+ visible=bool(DoFormant),
+ interactive=True,
+ )
+
+ tmbre = gr.Slider(
+ value=Timbre,
+ info=i18n("Default value is 1.0"),
+ label=i18n("Timbre for formant shifting"),
+ minimum=0.0,
+ maximum=16.0,
+ step=0.1,
+ visible=bool(DoFormant),
+ interactive=True,
+ )
+ frmntbut = gr.Button(i18n("Apply"), variant="primary", visible=bool(DoFormant))
+
+ formant_preset.change(fn=preset_apply, inputs=[formant_preset, qfrency, tmbre], outputs=[qfrency, tmbre])
+
+ formanting.change(fn=formant_enabled,inputs=[formanting,qfrency,tmbre],outputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button])
+ frmntbut.click(fn=formant_apply,inputs=[qfrency, tmbre], outputs=[qfrency, tmbre])
+ formant_refresh_button.click(fn=update_fshift_presets,inputs=[formant_preset, qfrency, tmbre],outputs=[formant_preset, qfrency, tmbre])
+
+ # Function to toggle advanced settings
+ def toggle_advanced_settings(checkbox):
+ return {"visible": checkbox, "__type__": "update"}
+
+ # Attach the change event
+ advanced_settings_checkbox.change(
+ fn=toggle_advanced_settings,
+ inputs=[advanced_settings_checkbox],
+ outputs=[advanced_settings]
+ )
+
+
+ but0 = gr.Button(i18n("Convert"), variant="primary").style(full_width=True)
+
+ with gr.Row(): # Defines output info + output audio download after conversion
+ vc_output1 = gr.Textbox(label=i18n("Output information"))
+ vc_output2 = gr.Audio(label=i18n("Export audio (click on the three dots in the lower right corner to download)"))
+
+ with gr.Group(): # I think this defines the big convert button
+ with gr.Row():
+ but0.click(
+ vc_single,
+ [
+ spk_item,
+ input_audio0,
+ input_audio1,
+ vc_transform0,
+ f0_file,
+ f0method0,
+ file_index1,
+ file_index2,
+ index_rate1,
+ filter_radius0,
+ resample_sr0,
+ rms_mix_rate0,
+ protect0,
+ crepe_hop_length,
+ minpitch_slider, minpitch_txtbox,
+ maxpitch_slider, maxpitch_txtbox,
+ ],
+ [vc_output1, vc_output2],
+ )
+
+
+ with gr.TabItem(i18n("Batch")): # Dont Change
+ with gr.Group(): # Markdown explanation of batch inference
+ gr.Markdown(
+ value=i18n("Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').")
+ )
+ with gr.Row():
+ with gr.Column():
+ vc_transform1 = gr.Number(
+ label=i18n("Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):"), value=0
+ )
+ opt_input = gr.Textbox(label=i18n("Specify output folder:"), value="opt")
+ with gr.Column():
+ file_index4 = gr.Dropdown(
+ label=i18n("Auto-detect index path and select from the dropdown"),
+ choices=get_indexes(),
+ value=best_match_index_path1,
+ interactive=True,
+ )
+ sid0.select(fn=match_index, inputs=[sid0], outputs=[file_index2, file_index4])
+
+ refresh_button.click(
+ fn=lambda: change_choices()[1],
+ inputs=[],
+ outputs=file_index4,
+ )
+ index_rate2 = gr.Slider(
+ minimum=0,
+ maximum=1,
+ label=i18n("Search feature ratio:"),
+ value=0.75,
+ interactive=True,
+ )
+ with gr.Row():
+ dir_input = gr.Textbox(
+ label=i18n("Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):"),
+ value=os.path.join(now_dir, "audios"),
+ )
+ inputs = gr.File(
+ file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.")
+ )
+
+ with gr.Row():
+ with gr.Column():
+ # Create a checkbox for advanced batch settings
+ advanced_settings_batch_checkbox = gr.Checkbox(
+ value=False,
+ label=i18n("Advanced Settings"),
+ interactive=True,
+ )
+
+ # Advanced batch settings container
+ with gr.Row(visible=False) as advanced_settings_batch: # Initially hidden
+ with gr.Row(label = i18n("Advanced Settings"), open = False):
+ with gr.Column():
+ file_index3 = gr.Textbox(
+ label=i18n("Feature search database file path"),
+ value="",
+ interactive=True,
+ )
+
+ f0method1 = gr.Radio(
+ label=i18n(
+ "Select the pitch extraction algorithm:"
+ ),
+ choices=["pm", "harvest", "crepe", "rmvpe"],
+ value="rmvpe",
+ interactive=True,
+ )
+ filter_radius1 = gr.Slider(
+ minimum=0,
+ maximum=7,
+ label=i18n("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."),
+ value=3,
+ step=1,
+ interactive=True,
+ )
+
+ with gr.Row():
+ format1 = gr.Radio(
+ label=i18n("Export file format"),
+ choices=["wav", "flac", "mp3", "m4a"],
+ value="wav",
+ interactive=True,
+ )
+
+
+ with gr.Column():
+ resample_sr1 = gr.Slider(
+ minimum=0,
+ maximum=48000,
+ label=i18n("Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:"),
+ value=0,
+ step=1,
+ interactive=True,
+ )
+ rms_mix_rate1 = gr.Slider(
+ minimum=0,
+ maximum=1,
+ label=i18n("Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:"),
+ value=1,
+ interactive=True,
+ )
+ protect1 = gr.Slider(
+ minimum=0,
+ maximum=0.5,
+ label=i18n(
+ "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:"
+ ),
+ value=0.33,
+ step=0.01,
+ interactive=True,
+ )
+ vc_output3 = gr.Textbox(label=i18n("Output information"))
+ but1 = gr.Button(i18n("Convert"), variant="primary")
+ but1.click(
+ vc_multi,
+ [
+ spk_item,
+ dir_input,
+ opt_input,
+ inputs,
+ vc_transform1,
+ f0method1,
+ file_index3,
+ file_index4,
+ index_rate2,
+ filter_radius1,
+ resample_sr1,
+ rms_mix_rate1,
+ protect1,
+ format1,
+ crepe_hop_length,
+ minpitch_slider if (not rvc_globals.NotesOrHertz) else minpitch_txtbox,
+ maxpitch_slider if (not rvc_globals.NotesOrHertz) else maxpitch_txtbox,
+ ],
+ [vc_output3],
+ )
+
+ sid0.change(
+ fn=get_vc,
+ inputs=[sid0, protect0, protect1],
+ outputs=[spk_item, protect0, protect1],
+ )
+
+ spk_item, protect0, protect1 = get_vc(sid0.value, protect0, protect1) # Set VC parameters for the preloaded model
+
+ # Function to toggle advanced settings
+ def toggle_advanced_settings_batch(checkbox):
+ return {"visible": checkbox, "__type__": "update"}
+
+ # Attach the change event
+ advanced_settings_batch_checkbox.change(
+ fn=toggle_advanced_settings_batch,
+ inputs=[advanced_settings_batch_checkbox],
+ outputs=[advanced_settings_batch]
+ )
+
+
+ with gr.TabItem(i18n("Train")):
+ gr.Markdown(
+ value=i18n(
+ "Step 1: Processing data"
+ )
+ )
+ with gr.Row():
+ exp_dir1 = gr.Textbox(label=i18n("Enter the model name:"), value=i18n("model_name"))
+ sr2 = gr.Radio(
+ label=i18n("Target sample rate:"),
+ choices=["40k", "48k", "32k"],
+ value="40k",
+ interactive=True,
+ )
+ if_f0_3 = gr.Checkbox(
+ label=i18n("Whether the model has pitch guidance."),
+ value=True,
+ interactive=True,
+ )
+ version19 = gr.Radio(
+ label=i18n("Version"),
+ choices=["v1", "v2"],
+ value="v2",
+ interactive=True,
+ visible=True,
+ )
+ np7 = gr.Slider(
+ minimum=0,
+ maximum=config.n_cpu,
+ step=1,
+ label=i18n("Number of CPU processes used for pitch extraction and data processing:"),
+ value=int(np.ceil(config.n_cpu / 1.5)),
+ interactive=True,
+ )
+ with gr.Group():
+ gr.Markdown(
+ value=i18n(
+ "Step 2a: Skipping pitch extraction"
+ )
+ )
+ with gr.Row():
+ # trainset_dir4 = gr.Textbox(
+ # label=i18n("Enter the path of the training folder:"), value=os.path.join(now_dir, datasets_root)
+ # )
+ trainset_dir4 = gr.Dropdown(choices=sorted(datasets), label=i18n("Select your dataset."), value=get_dataset())
+ btn_update_dataset_list = gr.Button(i18n("Update list."), variant="primary")
+ spk_id5 = gr.Slider(
+ minimum=0,
+ maximum=4,
+ step=1,
+ label=i18n("Please specify the model ID:"),
+ value=0,
+ interactive=True,
+ )
+ btn_update_dataset_list.click(
+ easy_infer.update_dataset_list, [spk_id5], trainset_dir4
+ )
+ but1 = gr.Button(i18n("Process data"), variant="primary")
+ info1 = gr.Textbox(label=i18n("Output information"), value="")
+ but1.click(
+ preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
+ )
+ with gr.Group():
+ gr.Markdown(value=i18n("Step 2b: Extracting features"))
+ with gr.Row():
+ with gr.Column():
+ gpus6 = gr.Textbox(
+ label=i18n("Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:"),
+ value=gpus,
+ interactive=True,
+ )
+ gr.Textbox(label=i18n("GPU Information"), value=gpu_info)
+ with gr.Column():
+ f0method8 = gr.Radio(
+ label=i18n(
+ "Select the pitch extraction algorithm:"
+ ),
+ choices=["pm", "harvest", "dio", "crepe", "mangio-crepe", "rmvpe"],
+ # [ MANGIO ]: Fork feature: Crepe on f0 extraction for training.
+ value="rmvpe",
+ interactive=True,
+ )
+
+ extraction_crepe_hop_length = gr.Slider(
+ minimum=1,
+ maximum=512,
+ step=1,
+ label=i18n("Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate."),
+ value=64,
+ interactive=True,
+ visible=False,
+ )
+
+ f0method8.change(
+ fn=lambda radio: (
+ {
+ "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'],
+ "__type__": "update"
+ }
+ ),
+ inputs=[f0method8],
+ outputs=[extraction_crepe_hop_length]
+ )
+ but2 = gr.Button(i18n("Feature extraction"), variant="primary")
+ info2 = gr.Textbox(label=i18n("Output information"), value="", max_lines=8, interactive=False)
+ but2.click(
+ extract_f0_feature,
+ [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length],
+ [info2],
+ )
+ with gr.Group():
+ gr.Markdown(value=i18n("Step 3a: Model training started"))
+ with gr.Row():
+ save_epoch10 = gr.Slider(
+ minimum=1,
+ maximum=50,
+ step=1,
+ label=i18n("Save frequency:"),
+ value=10,
+ interactive=True,
+ visible=True,
+ )
+ total_epoch11 = gr.Slider(
+ minimum=1,
+ maximum=10000,
+ step=2,
+ label=i18n("Training epochs:"),
+ value=750,
+ interactive=True,
+ )
+ batch_size12 = gr.Slider(
+ minimum=1,
+ maximum=40,
+ step=1,
+ label=i18n("Batch size per GPU:"),
+ #value=default_batch_size,
+ value=20,
+ interactive=True,
+ )
+ if_save_latest13 = gr.Checkbox(
+ label=i18n("Whether to save only the latest .ckpt file to save hard drive space"),
+ value=True,
+ interactive=True,
+ )
+ if_cache_gpu17 = gr.Checkbox(
+ label=i18n("Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training"),
+ value=False,
+ interactive=True,
+ )
+ if_save_every_weights18 = gr.Checkbox(
+ label=i18n("Save a small final model to the 'weights' folder at each save point"),
+ value=True,
+ interactive=True,
+ )
+ with gr.Row():
+ pretrained_G14 = gr.Textbox(
+ lines=2,
+ label=i18n("Load pre-trained base model G path:"),
+ value="pretrained_v2/f0G40k.pth",
+ interactive=True,
+ )
+ pretrained_D15 = gr.Textbox(
+ lines=2,
+ label=i18n("Load pre-trained base model D path:"),
+ value="pretrained_v2/f0D40k.pth",
+ interactive=True,
+ )
+ sr2.change(
+ change_sr2,
+ [sr2, if_f0_3, version19],
+ [pretrained_G14, pretrained_D15],
+ )
+ version19.change(
+ change_version19,
+ [sr2, if_f0_3, version19],
+ [pretrained_G14, pretrained_D15, sr2],
+ )
+ if_f0_3.change(
+ fn=change_f0,
+ inputs=[if_f0_3, sr2, version19],
+ outputs=[f0method8, pretrained_G14, pretrained_D15],
+ )
+ if_f0_3.change(fn=lambda radio: (
+ {
+ "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'],
+ "__type__": "update"
+ }
+ ), inputs=[f0method8], outputs=[extraction_crepe_hop_length])
+ gpus16 = gr.Textbox(
+ label=i18n("Provide the GPU index(es) separated by '-', like 0-1-2 for using GPUs 0, 1, and 2:"),
+ value=gpus,
+ interactive=True,
+ )
+ butstop = gr.Button(i18n("Stop training"),
+ variant='primary',
+ visible=False,
+ )
+ but3 = gr.Button(i18n("Train model"), variant="primary", visible=True)
+ but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop])
+ butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[but3, butstop])
+
+ with gr.Column(scale=0):
+ gr.Markdown(value="
")
+ gr.Markdown(value="### " + i18n("Build the index before saving."))
+ but4 = gr.Button(i18n("Train feature index"), variant="primary")
+ gr.Markdown(value="### " + i18n("Save your model once the training ends."))
+ save_action = gr.Dropdown(label=i18n("Save type"), choices=[i18n("Save all"),i18n("Save D and G"),i18n("Save voice")], value=i18n("Choose the method"), interactive=True)
+ but7 = gr.Button(i18n("Save model"), variant="primary")
+
+
+ # but4 = gr.Button(i18n("Train feature index"), variant="primary")
+ info3 = gr.Textbox(label=i18n("Output information"), value="", max_lines=10)
+
+ if_save_every_weights18.change(
+ fn=lambda if_save_every_weights: (
+ {
+ "visible": if_save_every_weights,
+ "__type__": "update"
+ }
+ ),
+ inputs=[if_save_every_weights18],
+ outputs=[save_epoch10]
+ )
+
+ but3.click(
+ click_train,
+ [
+ exp_dir1,
+ sr2,
+ if_f0_3,
+ spk_id5,
+ save_epoch10,
+ total_epoch11,
+ batch_size12,
+ if_save_latest13,
+ pretrained_G14,
+ pretrained_D15,
+ gpus16,
+ if_cache_gpu17,
+ if_save_every_weights18,
+ version19,
+ ],
+ [info3, butstop, but3],
+ )
+
+ but4.click(train_index, [exp_dir1, version19], info3)
+ but7.click(easy_infer.save_model, [exp_dir1, save_action], info3)
+ with gr.Group():
+ gr.Markdown(value=i18n(
+ 'Step 4: Export lowest points on a graph of the model')
+ )
+
+ with gr.Row():
+ with gr.Accordion(label=i18n("Lowest points export")):
+
+ lowestval_weight_dir = gr.Textbox(visible=False)
+ ds = gr.Textbox(visible=False)
+ weights_dir1 = gr.Textbox(visible=False, value=weights_dir)
+
+
+ with gr.Row():
+ amntlastmdls = gr.Slider(
+ minimum=1,
+ maximum=25,
+ label=i18n('How many lowest points to save'),
+ value=3,
+ step=1,
+ interactive=True,
+ )
+ lpexport = gr.Button(
+ value=i18n('Export lowest points of a model'),
+ variant='primary',
+ )
+ lw_mdls = gr.File(
+ file_count="multiple",
+ label=i18n("Output models"),
+ interactive=False,
+ ) #####
+
+ with gr.Row():
+ infolpex = gr.Textbox(label=i18n("Output information"), value="", max_lines=10)
+ mdlbl = gr.Dataframe(label=i18n('Stats of selected models'), datatype='number', type='pandas')
+
+ lpexport.click(
+ lambda model_name: os.path.join("logs", model_name, "lowestvals"),
+ inputs=[exp_dir1],
+ outputs=[lowestval_weight_dir]
+ )
+
+ lpexport.click(fn=tensorlowest.main, inputs=[exp_dir1, save_epoch10, amntlastmdls], outputs=[ds])
+
+ ds.change(
+ fn=tensorlowest.selectweights,
+ inputs=[exp_dir1, ds, weights_dir1, lowestval_weight_dir],
+ outputs=[infolpex, lw_mdls, mdlbl],
+ )
+ with gr.TabItem(i18n("UVR5")): # UVR section
+ with gr.Group():
+ with gr.Row():
+ with gr.Column():
+ dir_wav_input = gr.Textbox(
+ label=i18n("Enter the path of the audio folder to be processed:"),
+ value=os.path.join(now_dir, "audios")
+ )
+ wav_inputs = gr.File(
+ file_count="multiple", label=i18n("You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.")
+ )
+ with gr.Column():
+ model_choose = gr.Dropdown(label=i18n("Model"), choices=uvr5_names)
+ agg = gr.Slider(
+ minimum=0,
+ maximum=20,
+ step=1,
+ label="Vocal Extraction Aggressive",
+ value=10,
+ interactive=True,
+ visible=False,
+ )
+ opt_vocal_root = gr.Textbox(
+ label=i18n("Specify the output folder for vocals:"), value="opt"
+ )
+ opt_ins_root = gr.Textbox(
+ label=i18n("Specify the output folder for accompaniment:"), value="opt"
+ )
+ format0 = gr.Radio(
+ label=i18n("Export file format"),
+ choices=["wav", "flac", "mp3", "m4a"],
+ value="flac",
+ interactive=True,
+ )
+ but2 = gr.Button(i18n("Convert"), variant="primary")
+ vc_output4 = gr.Textbox(label=i18n("Output information"))
+ but2.click(
+ uvr,
+ [
+ model_choose,
+ dir_wav_input,
+ opt_vocal_root,
+ wav_inputs,
+ opt_ins_root,
+ agg,
+ format0,
+ ],
+ [vc_output4],
+ )
+
+ with gr.TabItem(i18n("Resources")):
+
+ easy_infer.download_model()
+ easy_infer.download_backup()
+ easy_infer.download_dataset(trainset_dir4)
+ easy_infer.youtube_separator()
+ with gr.TabItem(i18n("Extra")):
+ gr.Markdown(
+ value=i18n("This section contains some extra utilities that often may be in experimental phases")
+ )
+ with gr.TabItem(i18n("Merge Audios")):
+ with gr.Group():
+ gr.Markdown(
+ value="## " + i18n("Merge your generated audios with the instrumental")
+ )
+ gr.Markdown(value="",scale="-0.5",visible=True)
+ gr.Markdown(value="",scale="-0.5",visible=True)
+ with gr.Row():
+ with gr.Column():
+ dropbox = gr.File(label=i18n("Drag the audio here and click the Refresh button"))
+ input_audio1 = gr.Dropdown(
+ label=i18n("Choose your instrumental"),
+ choices=sorted(audio_others_paths),
+ value='',
+ interactive=True,
+ )
+ input_audio1_scale = gr.Slider(
+ minimum=0,
+ maximum=10,
+ label=i18n("Volume"),
+ value=1.00,
+ interactive=True,
+ )
+ input_audio3 = gr.Dropdown(
+ label=i18n("Select the generated audio"),
+ choices=sorted(audio_paths),
+ value='',
+ interactive=True,
+ )
+ input_audio3_scale = gr.Slider(
+ minimum=0,
+ maximum=10,
+ label=i18n("Volume"),
+ value=10.00,
+ interactive=True,
+ )
+ reverb_ = gr.Checkbox(
+ label=i18n("Reverb"),
+ value=False,
+ interactive=True,
+ )
+ compressor_ = gr.Checkbox(
+ label=i18n("Compressor"),
+ value=False,
+ interactive=True,
+ )
+ noise_gate_ = gr.Checkbox(
+ label=i18n("Noise Gate"),
+ value=False,
+ interactive=True,
+ )
+
+ butnone = gr.Button(i18n("Merge"), variant="primary").style(full_width=True)
+
+ vc_output1 = gr.Textbox(label=i18n("Output information"))
+ vc_output2 = gr.Audio(label=i18n("Export audio (click on the three dots in the lower right corner to download)"), type='filepath')
+
+ dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio1])
+ dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio1])
+
+ refresh_button.click(
+ fn=lambda: change_choices3(),
+ inputs=[],
+ outputs=[input_audio1, input_audio3],
+ )
+
+ butnone.click(
+ fn=audio_combined,
+ inputs=[input_audio1, input_audio3,input_audio1_scale,input_audio3_scale,reverb_,compressor_,noise_gate_],
+ outputs=[vc_output1, vc_output2]
+ )
+
+
+ with gr.TabItem(i18n("Processing")):
+ with gr.Group():
+ gr.Markdown(value=i18n("Model fusion, can be used to test timbre fusion"))
+ with gr.Row():
+ ckpt_a = gr.Textbox(label=i18n("Path to Model A:"), value="", interactive=True, placeholder="Path to your model A.")
+ ckpt_b = gr.Textbox(label=i18n("Path to Model B:"), value="", interactive=True, placeholder="Path to your model B.")
+ alpha_a = gr.Slider(
+ minimum=0,
+ maximum=1,
+ label=i18n("Weight for Model A:"),
+ value=0.5,
+ interactive=True,
+ )
+ with gr.Row():
+ sr_ = gr.Radio(
+ label=i18n("Target sample rate:"),
+ choices=["40k", "48k"],
+ value="40k",
+ interactive=True,
+ )
+ if_f0_ = gr.Checkbox(
+ label="Whether the model has pitch guidance.",
+ value=True,
+ interactive=True,
+ )
+ info__ = gr.Textbox(
+ label=i18n("Model information to be placed:"), value="", max_lines=8, interactive=True, placeholder="Model information to be placed."
+ )
+ name_to_save0 = gr.Textbox(
+ label=i18n("Saved model name (without extension):"),
+ value="",
+ placeholder="Name for saving.",
+ max_lines=1,
+ interactive=True,
+ )
+ version_2 = gr.Radio(
+ label=i18n("Model architecture version:"),
+ choices=["v1", "v2"],
+ value="v1",
+ interactive=True,
+ )
+ with gr.Row():
+ but6 = gr.Button(i18n("Fusion"), variant="primary")
+ info4 = gr.Textbox(label=i18n("Output information"), value="", max_lines=8)
+ but6.click(
+ merge,
+ [
+ ckpt_a,
+ ckpt_b,
+ alpha_a,
+ sr_,
+ if_f0_,
+ info__,
+ name_to_save0,
+ version_2,
+ ],
+ info4,
+ ) # def merge(path1,path2,alpha1,sr,f0,info):
+ with gr.Group():
+ gr.Markdown(value=i18n("Modify model information"))
+ with gr.Row(): ######
+ ckpt_path0 = gr.Textbox(
+ label=i18n("Path to Model:"), placeholder="Path to your Model.", value="", interactive=True
+ )
+ info_ = gr.Textbox(
+ label=i18n("Model information to be modified:"), value="", max_lines=8, interactive=True, placeholder="Model information to be changed."
+ )
+ name_to_save1 = gr.Textbox(
+ label=i18n("Save file name:"),
+ placeholder="Either leave empty or put in the Name of the Model to be saved.",
+ value="",
+ max_lines=8,
+ interactive=True,
+ )
+ with gr.Row():
+ but7 = gr.Button(i18n("Modify"), variant="primary")
+ info5 = gr.Textbox(label=i18n("Output information"), value="", max_lines=8)
+ but7.click(change_info, [ckpt_path0, info_, name_to_save1], info5)
+ with gr.Group():
+ gr.Markdown(value=i18n("View model information"))
+ with gr.Row():
+ ckpt_path1 = gr.Textbox(
+ label=i18n("Path to Model:"), value="", interactive=True, placeholder="Model path here."
+ )
+ but8 = gr.Button(i18n("View"), variant="primary")
+ info6 = gr.Textbox(label=i18n("Output information"), value="", max_lines=8)
+ but8.click(show_info, [ckpt_path1], info6)
+ with gr.Group():
+ gr.Markdown(
+ value=i18n(
+ "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:"
+ )
+ )
+ with gr.Row():
+ ckpt_path2 = gr.Textbox(
+ lines=3,
+ label=i18n("Path to Model:"),
+ value=os.path.join(now_dir, "logs", "[YOUR_MODEL]", "G_23333.pth"),
+ interactive=True,
+ )
+ save_name = gr.Textbox(
+ label=i18n("Save name:"), value="", interactive=True,
+ placeholder="Your filename here.",
+ )
+ sr__ = gr.Radio(
+ label=i18n("Target sample rate:"),
+ choices=["32k", "40k", "48k"],
+ value="40k",
+ interactive=True,
+ )
+ if_f0__ = gr.Checkbox(
+ label="Whether the model has pitch guidance.",
+ value=True,
+ interactive=True,
+ )
+ version_1 = gr.Radio(
+ label=i18n("Model architecture version:"),
+ choices=["v1", "v2"],
+ value="v2",
+ interactive=True,
+ )
+ info___ = gr.Textbox(
+ label=i18n("Model information to be placed:"), value="", max_lines=8, interactive=True, placeholder="Model info here."
+ )
+ but9 = gr.Button(i18n("Extract"), variant="primary")
+ info7 = gr.Textbox(label=i18n("Output information"), value="", max_lines=8)
+ ckpt_path2.change(
+ change_info_, [ckpt_path2], [sr__, if_f0__, version_1]
+ )
+ but9.click(
+ extract_small_model,
+ [ckpt_path2, save_name, sr__, if_f0__, info___, version_1],
+ info7,
+ )
+
+ with gr.TabItem(i18n("Settings")):
+ with gr.Row():
+ gr.Markdown(value=
+ i18n("Pitch settings")
+ )
+ noteshertz = gr.Checkbox(
+ label = i18n("Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz"),
+ value = rvc_globals.NotesOrHertz,
+ interactive = True,
+ )
+
+ noteshertz.change(fn=lambda nhertz: rvc_globals.__setattr__('NotesOrHertz', nhertz), inputs=[noteshertz], outputs=[])
+
+ noteshertz.change(
+ fn=switch_pitch_controls,
+ inputs=[f0method0],
+ outputs=[
+ minpitch_slider, minpitch_txtbox,
+ maxpitch_slider, maxpitch_txtbox,]
+ )
+ return app
+
+def GradioRun(app):
+ share_gradio_link = config.iscolab or config.paperspace
+ concurrency_count = 511
+ max_size = 1022
+
+ if (
+ config.iscolab or config.paperspace
+ ):
+ app.queue(concurrency_count=concurrency_count, max_size=max_size).launch(
+ server_name="0.0.0.0",
+ inbrowser=not config.noautoopen,
+ server_port=config.listen_port,
+ quiet=True,
+ favicon_path="./images/icon.png",
+ share=share_gradio_link,
+ )
+ else:
+ app.queue(concurrency_count=concurrency_count, max_size=max_size).launch(
+ server_name="0.0.0.0",
+ inbrowser=not config.noautoopen,
+ server_port=config.listen_port,
+ quiet=True,
+ favicon_path=".\images\icon.png",
+ share=share_gradio_link,
+ )
+
+if __name__ == "__main__":
+ if os.name == 'nt':
+ print(i18n("Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored.\n"))
+ app = GradioSetup(UTheme=config.grtheme)
+ GradioRun(app)
diff --git a/infer_batch_rvc.py b/infer_batch_rvc.py
new file mode 100644
index 000000000..4ba8e05fc
--- /dev/null
+++ b/infer_batch_rvc.py
@@ -0,0 +1,215 @@
+"""
+v1
+runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33
+v2
+runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33
+"""
+import os, sys, pdb, torch
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+import sys
+import torch
+import tqdm as tq
+from multiprocessing import cpu_count
+
+
+class Config:
+ def __init__(self, device, is_half):
+ self.device = device
+ self.is_half = is_half
+ self.n_cpu = 0
+ self.gpu_name = None
+ self.gpu_mem = None
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
+
+ def device_config(self) -> tuple:
+ if torch.cuda.is_available():
+ i_device = int(self.device.split(":")[-1])
+ self.gpu_name = torch.cuda.get_device_name(i_device)
+ if (
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
+ or "P40" in self.gpu_name.upper()
+ or "1060" in self.gpu_name
+ or "1070" in self.gpu_name
+ or "1080" in self.gpu_name
+ ):
+ print("16系/10系显卡和P40强制单精度")
+ self.is_half = False
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
+ with open(f"configs/{config_file}", "r") as f:
+ strr = f.read().replace("true", "false")
+ with open(f"configs/{config_file}", "w") as f:
+ f.write(strr)
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
+ strr = f.read().replace("3.7", "3.0")
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
+ f.write(strr)
+ else:
+ self.gpu_name = None
+ self.gpu_mem = int(
+ torch.cuda.get_device_properties(i_device).total_memory
+ / 1024
+ / 1024
+ / 1024
+ + 0.4
+ )
+ if self.gpu_mem <= 4:
+ with open("trainset_preprocess_pipeline_print.py", "r") as f:
+ strr = f.read().replace("3.7", "3.0")
+ with open("trainset_preprocess_pipeline_print.py", "w") as f:
+ f.write(strr)
+ elif torch.backends.mps.is_available():
+ print("没有发现支持的N卡, 使用MPS进行推理")
+ self.device = "mps"
+ else:
+ print("没有发现支持的N卡, 使用CPU进行推理")
+ self.device = "cpu"
+ self.is_half = True
+
+ if self.n_cpu == 0:
+ self.n_cpu = cpu_count()
+
+ if self.is_half:
+ # 6G显存配置
+ x_pad = 3
+ x_query = 10
+ x_center = 60
+ x_max = 65
+ else:
+ # 5G显存配置
+ x_pad = 1
+ x_query = 6
+ x_center = 38
+ x_max = 41
+
+ if self.gpu_mem != None and self.gpu_mem <= 4:
+ x_pad = 1
+ x_query = 5
+ x_center = 30
+ x_max = 32
+
+ return x_pad, x_query, x_center, x_max
+
+
+f0up_key = sys.argv[1]
+input_path = sys.argv[2]
+index_path = sys.argv[3]
+f0method = sys.argv[4] # harvest or pm
+opt_path = sys.argv[5]
+model_path = sys.argv[6]
+index_rate = float(sys.argv[7])
+device = sys.argv[8]
+is_half = sys.argv[9].lower() != "false"
+filter_radius = int(sys.argv[10])
+resample_sr = int(sys.argv[11])
+rms_mix_rate = float(sys.argv[12])
+protect = float(sys.argv[13])
+print(sys.argv)
+config = Config(device, is_half)
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from vc_infer_pipeline import VC
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+from my_utils import load_audio
+from fairseq import checkpoint_utils
+from scipy.io import wavfile
+
+hubert_model = None
+
+
+def load_hubert():
+ global hubert_model
+ models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+ ["hubert_base.pt"],
+ suffix="",
+ )
+ hubert_model = models[0]
+ hubert_model = hubert_model.to(device)
+ if is_half:
+ hubert_model = hubert_model.half()
+ else:
+ hubert_model = hubert_model.float()
+ hubert_model.eval()
+
+
+def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate):
+ global tgt_sr, net_g, vc, hubert_model, version
+ if input_audio is None:
+ return "You need to upload an audio", None
+ f0_up_key = int(f0_up_key)
+ audio = load_audio(input_audio, 16000)
+ times = [0, 0, 0]
+ if hubert_model == None:
+ load_hubert()
+ if_f0 = cpt.get("f0", 1)
+ # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
+ audio_opt = vc.pipeline(
+ hubert_model,
+ net_g,
+ sid,
+ audio,
+ input_audio,
+ times,
+ f0_up_key,
+ f0_method,
+ file_index,
+ index_rate,
+ if_f0,
+ filter_radius,
+ tgt_sr,
+ resample_sr,
+ rms_mix_rate,
+ version,
+ protect,
+ f0_file=f0_file,
+ )
+ print(times)
+ return audio_opt
+
+
+def get_vc(model_path):
+ global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version
+ print("loading pth %s" % model_path)
+ cpt = torch.load(model_path, map_location="cpu")
+ tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
+ if_f0 = cpt.get("f0", 1)
+ version = cpt.get("version", "v1")
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif version == "v2":
+ if if_f0 == 1: #
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del net_g.enc_q
+ print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩
+ net_g.eval().to(device)
+ if is_half:
+ net_g = net_g.half()
+ else:
+ net_g = net_g.float()
+ vc = VC(tgt_sr, config)
+ n_spk = cpt["config"][-3]
+ # return {"visible": True,"maximum": n_spk, "__type__": "update"}
+
+
+get_vc(model_path)
+audios = os.listdir(input_path)
+for file in tq.tqdm(audios):
+ if file.endswith(".wav"):
+ file_path = input_path + "/" + file
+ wav_opt = vc_single(
+ 0, file_path, f0up_key, None, f0method, index_path, index_rate
+ )
+ out_path = opt_path + "/" + file
+ wavfile.write(out_path, tgt_sr, wav_opt)
diff --git a/infer_uvr5.py b/infer_uvr5.py
new file mode 100644
index 000000000..8c8c05429
--- /dev/null
+++ b/infer_uvr5.py
@@ -0,0 +1,363 @@
+import os, sys, torch, warnings, pdb
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from json import load as ll
+
+warnings.filterwarnings("ignore")
+import librosa
+import importlib
+import numpy as np
+import hashlib, math
+from tqdm import tqdm
+from lib.uvr5_pack.lib_v5 import spec_utils
+from lib.uvr5_pack.utils import _get_name_params, inference
+from lib.uvr5_pack.lib_v5.model_param_init import ModelParameters
+import soundfile as sf
+from lib.uvr5_pack.lib_v5.nets_new import CascadedNet
+from lib.uvr5_pack.lib_v5 import nets_61968KB as nets
+
+
+class _audio_pre_:
+ def __init__(self, agg, model_path, device, is_half):
+ self.model_path = model_path
+ self.device = device
+ self.data = {
+ # Processing Options
+ "postprocess": False,
+ "tta": False,
+ # Constants
+ "window_size": 512,
+ "agg": agg,
+ "high_end_process": "mirroring",
+ }
+ mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v2.json")
+ model = nets.CascadedASPPNet(mp.param["bins"] * 2)
+ cpk = torch.load(model_path, map_location="cpu")
+ model.load_state_dict(cpk)
+ model.eval()
+ if is_half:
+ model = model.half().to(device)
+ else:
+ model = model.to(device)
+
+ self.mp = mp
+ self.model = model
+
+ def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
+ if ins_root is None and vocal_root is None:
+ return "No save root."
+ name = os.path.basename(music_file)
+ if ins_root is not None:
+ os.makedirs(ins_root, exist_ok=True)
+ if vocal_root is not None:
+ os.makedirs(vocal_root, exist_ok=True)
+ X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
+ bands_n = len(self.mp.param["band"])
+ # print(bands_n)
+ for d in range(bands_n, 0, -1):
+ bp = self.mp.param["band"][d]
+ if d == bands_n: # high-end band
+ (
+ X_wave[d],
+ _,
+ ) = librosa.core.load(
+ music_file,
+ bp["sr"],
+ False,
+ dtype=np.float32,
+ res_type=bp["res_type"],
+ )
+ if X_wave[d].ndim == 1:
+ X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
+ else: # lower bands
+ X_wave[d] = librosa.core.resample(
+ X_wave[d + 1],
+ self.mp.param["band"][d + 1]["sr"],
+ bp["sr"],
+ res_type=bp["res_type"],
+ )
+ # Stft of wave source
+ X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
+ X_wave[d],
+ bp["hl"],
+ bp["n_fft"],
+ self.mp.param["mid_side"],
+ self.mp.param["mid_side_b2"],
+ self.mp.param["reverse"],
+ )
+ # pdb.set_trace()
+ if d == bands_n and self.data["high_end_process"] != "none":
+ input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
+ self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
+ )
+ input_high_end = X_spec_s[d][
+ :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
+ ]
+
+ X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
+ aggresive_set = float(self.data["agg"] / 100)
+ aggressiveness = {
+ "value": aggresive_set,
+ "split_bin": self.mp.param["band"][1]["crop_stop"],
+ }
+ with torch.no_grad():
+ pred, X_mag, X_phase = inference(
+ X_spec_m, self.device, self.model, aggressiveness, self.data
+ )
+ # Postprocess
+ if self.data["postprocess"]:
+ pred_inv = np.clip(X_mag - pred, 0, np.inf)
+ pred = spec_utils.mask_silence(pred, pred_inv)
+ y_spec_m = pred * X_phase
+ v_spec_m = X_spec_m - y_spec_m
+
+ if ins_root is not None:
+ if self.data["high_end_process"].startswith("mirroring"):
+ input_high_end_ = spec_utils.mirroring(
+ self.data["high_end_process"], y_spec_m, input_high_end, self.mp
+ )
+ wav_instrument = spec_utils.cmb_spectrogram_to_wave(
+ y_spec_m, self.mp, input_high_end_h, input_high_end_
+ )
+ else:
+ wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
+ print("%s instruments done" % name)
+ if format in ["wav", "flac"]:
+ sf.write(
+ os.path.join(
+ ins_root,
+ "instrument_{}_{}.{}".format(name, self.data["agg"], format),
+ ),
+ (np.array(wav_instrument) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ ) #
+ else:
+ path = os.path.join(
+ ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
+ )
+ sf.write(
+ path,
+ (np.array(wav_instrument) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ )
+ if os.path.exists(path):
+ os.system(
+ "ffmpeg -i %s -vn %s -q:a 2 -y"
+ % (path, path[:-4] + ".%s" % format)
+ )
+ if vocal_root is not None:
+ if self.data["high_end_process"].startswith("mirroring"):
+ input_high_end_ = spec_utils.mirroring(
+ self.data["high_end_process"], v_spec_m, input_high_end, self.mp
+ )
+ wav_vocals = spec_utils.cmb_spectrogram_to_wave(
+ v_spec_m, self.mp, input_high_end_h, input_high_end_
+ )
+ else:
+ wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
+ print("%s vocals done" % name)
+ if format in ["wav", "flac"]:
+ sf.write(
+ os.path.join(
+ vocal_root,
+ "vocal_{}_{}.{}".format(name, self.data["agg"], format),
+ ),
+ (np.array(wav_vocals) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ )
+ else:
+ path = os.path.join(
+ vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
+ )
+ sf.write(
+ path,
+ (np.array(wav_vocals) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ )
+ if os.path.exists(path):
+ os.system(
+ "ffmpeg -i %s -vn %s -q:a 2 -y"
+ % (path, path[:-4] + ".%s" % format)
+ )
+
+
+class _audio_pre_new:
+ def __init__(self, agg, model_path, device, is_half):
+ self.model_path = model_path
+ self.device = device
+ self.data = {
+ # Processing Options
+ "postprocess": False,
+ "tta": False,
+ # Constants
+ "window_size": 512,
+ "agg": agg,
+ "high_end_process": "mirroring",
+ }
+ mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v3.json")
+ nout = 64 if "DeReverb" in model_path else 48
+ model = CascadedNet(mp.param["bins"] * 2, nout)
+ cpk = torch.load(model_path, map_location="cpu")
+ model.load_state_dict(cpk)
+ model.eval()
+ if is_half:
+ model = model.half().to(device)
+ else:
+ model = model.to(device)
+
+ self.mp = mp
+ self.model = model
+
+ def _path_audio_(
+ self, music_file, vocal_root=None, ins_root=None, format="flac"
+ ): # 3个VR模型vocal和ins是反的
+ if ins_root is None and vocal_root is None:
+ return "No save root."
+ name = os.path.basename(music_file)
+ if ins_root is not None:
+ os.makedirs(ins_root, exist_ok=True)
+ if vocal_root is not None:
+ os.makedirs(vocal_root, exist_ok=True)
+ X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
+ bands_n = len(self.mp.param["band"])
+ # print(bands_n)
+ for d in range(bands_n, 0, -1):
+ bp = self.mp.param["band"][d]
+ if d == bands_n: # high-end band
+ (
+ X_wave[d],
+ _,
+ ) = librosa.core.load(
+ music_file,
+ bp["sr"],
+ False,
+ dtype=np.float32,
+ res_type=bp["res_type"],
+ )
+ if X_wave[d].ndim == 1:
+ X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
+ else: # lower bands
+ X_wave[d] = librosa.core.resample(
+ X_wave[d + 1],
+ self.mp.param["band"][d + 1]["sr"],
+ bp["sr"],
+ res_type=bp["res_type"],
+ )
+ # Stft of wave source
+ X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
+ X_wave[d],
+ bp["hl"],
+ bp["n_fft"],
+ self.mp.param["mid_side"],
+ self.mp.param["mid_side_b2"],
+ self.mp.param["reverse"],
+ )
+ # pdb.set_trace()
+ if d == bands_n and self.data["high_end_process"] != "none":
+ input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
+ self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
+ )
+ input_high_end = X_spec_s[d][
+ :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
+ ]
+
+ X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
+ aggresive_set = float(self.data["agg"] / 100)
+ aggressiveness = {
+ "value": aggresive_set,
+ "split_bin": self.mp.param["band"][1]["crop_stop"],
+ }
+ with torch.no_grad():
+ pred, X_mag, X_phase = inference(
+ X_spec_m, self.device, self.model, aggressiveness, self.data
+ )
+ # Postprocess
+ if self.data["postprocess"]:
+ pred_inv = np.clip(X_mag - pred, 0, np.inf)
+ pred = spec_utils.mask_silence(pred, pred_inv)
+ y_spec_m = pred * X_phase
+ v_spec_m = X_spec_m - y_spec_m
+
+ if ins_root is not None:
+ if self.data["high_end_process"].startswith("mirroring"):
+ input_high_end_ = spec_utils.mirroring(
+ self.data["high_end_process"], y_spec_m, input_high_end, self.mp
+ )
+ wav_instrument = spec_utils.cmb_spectrogram_to_wave(
+ y_spec_m, self.mp, input_high_end_h, input_high_end_
+ )
+ else:
+ wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
+ print("%s instruments done" % name)
+ if format in ["wav", "flac"]:
+ sf.write(
+ os.path.join(
+ ins_root,
+ "instrument_{}_{}.{}".format(name, self.data["agg"], format),
+ ),
+ (np.array(wav_instrument) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ ) #
+ else:
+ path = os.path.join(
+ ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
+ )
+ sf.write(
+ path,
+ (np.array(wav_instrument) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ )
+ if os.path.exists(path):
+ os.system(
+ "ffmpeg -i %s -vn %s -q:a 2 -y"
+ % (path, path[:-4] + ".%s" % format)
+ )
+ if vocal_root is not None:
+ if self.data["high_end_process"].startswith("mirroring"):
+ input_high_end_ = spec_utils.mirroring(
+ self.data["high_end_process"], v_spec_m, input_high_end, self.mp
+ )
+ wav_vocals = spec_utils.cmb_spectrogram_to_wave(
+ v_spec_m, self.mp, input_high_end_h, input_high_end_
+ )
+ else:
+ wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
+ print("%s vocals done" % name)
+ if format in ["wav", "flac"]:
+ sf.write(
+ os.path.join(
+ vocal_root,
+ "vocal_{}_{}.{}".format(name, self.data["agg"], format),
+ ),
+ (np.array(wav_vocals) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ )
+ else:
+ path = os.path.join(
+ vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
+ )
+ sf.write(
+ path,
+ (np.array(wav_vocals) * 32768).astype("int16"),
+ self.mp.param["sr"],
+ )
+ if os.path.exists(path):
+ os.system(
+ "ffmpeg -i %s -vn %s -q:a 2 -y"
+ % (path, path[:-4] + ".%s" % format)
+ )
+
+
+if __name__ == "__main__":
+ device = "cuda"
+ is_half = True
+ # model_path = "uvr5_weights/2_HP-UVR.pth"
+ # model_path = "uvr5_weights/VR-DeEchoDeReverb.pth"
+ # model_path = "uvr5_weights/VR-DeEchoNormal.pth"
+ model_path = "uvr5_weights/DeEchoNormal.pth"
+ # pre_fun = _audio_pre_(model_path=model_path, device=device, is_half=True,agg=10)
+ pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10)
+ audio_path = "雪雪伴奏对消HP5.wav"
+ save_path = "opt"
+ pre_fun._path_audio_(audio_path, save_path, save_path)
diff --git a/inference-presets.json b/inference-presets.json
new file mode 100644
index 000000000..55ddab746
--- /dev/null
+++ b/inference-presets.json
@@ -0,0 +1,20 @@
+{
+ "presets": [
+ {
+ "name": "Default Preset",
+ "model": "",
+ "transpose": 0,
+ "audio_file": "",
+ "f0_method": "pm",
+ "crepe_hop_length": 160,
+ "median_filtering": 3,
+ "feature_path": "",
+ "auto_feature_path": "",
+ "search_feature_ratio": 0.88,
+ "resample": 0,
+ "volume_envelope": 1,
+ "protect_voiceless": 0.33,
+ "f0_file_path": ""
+ }
+ ]
+}
\ No newline at end of file
diff --git a/install_Applio.bat b/install_Applio.bat
new file mode 100644
index 000000000..e3c40e951
--- /dev/null
+++ b/install_Applio.bat
@@ -0,0 +1,206 @@
+@echo off
+Title Instalador de Applio
+chcp 65001 > nul
+setlocal
+color 0a
+
+:::
+::: _ _
+::: /\ | (_)
+::: / \ _ __ _ __ | |_ ___
+::: / /\ \ | '_ \| '_ \| | |/ _ \
+::: / ____ \| |_) | |_) | | | (_) |
+::: /_/ \_\ .__/| .__/|_|_|\___/
+::: | | | |
+::: |_| |_|
+:::
+::: Versión 1.0.0 - Desarrollado por Aitron
+:::
+
+set "repoUrl=https://github.com/IAHispano/Applio-RVC-Fork"
+set "repoFolder=Applio-RVC-Fork"
+set "fixesFolder=Fixes"
+set "localFixesPy=local_fixes.py"
+set "URL_BASE=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main"
+echo.
+cls
+
+echo AVISO: Recuerda instalar las Microsoft C++ Build Tools y El Redistributable antes de continuar.
+echo.
+echo Enlace 1: https://aka.ms/vs/17/release/vs_BuildTools.exe
+echo Enlace 2: https://aka.ms/vs/17/release/vc_redist.x64.exe
+echo.
+pause
+cls
+
+for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A
+echo.
+
+echo Verificando si Git está instalado...
+git --version > nul 2>&1
+if %errorlevel% equ 0 (
+ echo Git está instalado. Continuando...
+) else (
+ echo Git no está instalado. Saliendo.
+ echo Presiona Enter para cerrar el script e ir a la página de descarga.
+ pause
+ start https://github.com/git-for-windows/git/releases/download/v2.42.0.windows.1/Git-2.42.0-64-bit.exe
+ exit
+)
+
+echo.
+
+echo Verificando si Python 3.9.8 está instalado...
+for /f %%A in ('python -c "import sys; print(sys.version)" 2^>^&1') do (
+ set "py_version=%%A"
+)
+
+echo %py_version% | find "3.9.8" > nul
+if %errorlevel% equ 0 (
+ echo Python 3.9.8 está instalado. Continuando...
+) else (
+ echo Python 3.9.8 no está instalado. Saliendo.
+ echo Presiona Enter para cerrar el script e ir a la página de descarga.
+ pause
+ start https://www.python.org/ftp/python/3.9.8/python-3.9.8-amd64.exe
+ exit
+)
+
+echo.
+cls
+
+echo Requisitos satisfechos, continuando...
+echo.
+
+echo Creando carpeta para el repositorio...
+mkdir "%repoFolder%"
+cd "%repoFolder%"
+echo.
+
+echo Clonando el repositorio...
+git clone "%repoUrl%" .
+echo.
+
+echo Verificando si el archivo local_fixes.py existe en la carpeta Fixes...
+if exist "%fixesFolder%\%localFixesPy%" (
+ echo Ejecutando el archivo...
+ python "%fixesFolder%\%localFixesPy%"
+) else (
+ echo El archivo "%localFixesBat%" no se encontró en la carpeta "Fixes".
+)
+
+echo Pasando a descargar los modelos...
+echo.
+
+echo AVISO
+echo En este punto, se recomienda desactivar el antivirus o el firewall, ya que existe la posibilidad de que ocurran errores al descargar los modelos preentrenados.
+echo Si has desactivado el antivirus o el firewall, presiona la tecla "Enter".
+pause
+cls
+
+echo Descargando la carpeta "pretrained"...
+cd "pretrained"
+curl -LJO "%URL_BASE%/pretrained/D32k.pth"
+curl -LJO "%URL_BASE%/pretrained/D40k.pth"
+curl -LJO "%URL_BASE%/pretrained/D48k.pth"
+curl -LJO "%URL_BASE%/pretrained/G32k.pth"
+curl -LJO "%URL_BASE%/pretrained/G40k.pth"
+curl -LJO "%URL_BASE%/pretrained/G48k.pth"
+curl -LJO "%URL_BASE%/pretrained/f0D32k.pth"
+curl -LJO "%URL_BASE%/pretrained/f0D40k.pth"
+curl -LJO "%URL_BASE%/pretrained/f0D48k.pth"
+curl -LJO "%URL_BASE%/pretrained/f0G32k.pth"
+curl -LJO "%URL_BASE%/pretrained/f0G40k.pth"
+curl -LJO "%URL_BASE%/pretrained/f0G48k.pth"
+cd ".."
+echo.
+cls
+
+echo Descargando la carpeta "pretrained_v2"...
+cd "pretrained_v2"
+curl -LJO "%URL_BASE%/pretrained_v2/D32k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/D40k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/D48k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/G32k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/G40k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/G48k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/f0D32k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/f0D40k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/f0D48k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/f0G32k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/f0G40k.pth"
+curl -LJO "%URL_BASE%/pretrained_v2/f0G48k.pth"
+cd ".."
+echo.
+cls
+
+echo Descargando la carpeta "uvr5_weights"...
+cd "uvr5_weights"
+curl -LJO "%URL_BASE%/uvr5_weights/HP2_all_vocals.pth"
+curl -LJO "%URL_BASE%/uvr5_weights/HP3_all_vocals.pth"
+curl -LJO "%URL_BASE%/uvr5_weights/HP5_only_main_vocal.pth"
+curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoAggressive.pth"
+curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoDeReverb.pth"
+curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoNormal.pth"
+cd ".."
+echo.
+cls
+
+echo Descargando el archivo rmvpe.pt...
+curl -LJO "%URL_BASE%/rmvpe.pt"
+echo.
+cls
+
+echo Descargando el archivo ffmpeg.exe...
+curl -LJO "%URL_BASE%/ffmpeg.exe"
+echo.
+cls
+
+echo Descargando el archivo ffprobe.exe...
+curl -LJO "%URL_BASE%/ffprobe.exe"
+echo.
+echo Descargas completadas. Procediendo con las dependencias.
+cls
+
+echo ¿Tienes una GPU?
+echo Esto determinará si se descargan dependencias ligeras (sin GPU) o pesadas (con GPU).
+echo.
+
+
+set /p op=Escribe "Si" o "No":
+if "%op%"=="Si" goto gpu
+if "%op%"=="No" goto non_gpu
+
+
+:gpu
+echo Se ha seleccionado GPU, continuando...
+echo.
+echo Descargando las dependencias...
+echo.
+pip install -r requirements-gpu.txt
+pip uninstall torch torchvision torchaudio -y
+echo.
+echo NOTA: El ordenador puede experimentar lentitud durante este proceso; no te preocupes.
+echo.
+pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
+endlocal
+echo.
+cls
+echo ¡Applio ha sido descargado!
+echo.
+echo Presiona Enter para salir.
+pause
+exit
+
+:non_gpu
+echo No se ha seleccionado GPU, continuando...
+echo.
+echo Descargando las dependencias...
+echo.
+pip install -r requirements.txt
+echo.
+echo ¡Applio ha sido descargado!
+endlocal
+echo.
+pause
+exit
diff --git a/lib/globals/globals.py b/lib/globals/globals.py
new file mode 100644
index 000000000..d0da59d56
--- /dev/null
+++ b/lib/globals/globals.py
@@ -0,0 +1,5 @@
+DoFormant: bool = False
+Quefrency: float = 8.0
+Timbre: float = 1.2
+
+NotesOrHertz: bool = False
\ No newline at end of file
diff --git a/lib/infer_pack/attentions.py b/lib/infer_pack/attentions.py
new file mode 100644
index 000000000..05501be18
--- /dev/null
+++ b/lib/infer_pack/attentions.py
@@ -0,0 +1,417 @@
+import copy
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from lib.infer_pack import commons
+from lib.infer_pack import modules
+from lib.infer_pack.modules import LayerNorm
+
+
+class Encoder(nn.Module):
+ def __init__(
+ self,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size=1,
+ p_dropout=0.0,
+ window_size=10,
+ **kwargs
+ ):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+
+ self.drop = nn.Dropout(p_dropout)
+ self.attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.attn_layers.append(
+ MultiHeadAttention(
+ hidden_channels,
+ hidden_channels,
+ n_heads,
+ p_dropout=p_dropout,
+ window_size=window_size,
+ )
+ )
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(
+ FFN(
+ hidden_channels,
+ hidden_channels,
+ filter_channels,
+ kernel_size,
+ p_dropout=p_dropout,
+ )
+ )
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask):
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.attn_layers[i](x, x, attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size=1,
+ p_dropout=0.0,
+ proximal_bias=False,
+ proximal_init=True,
+ **kwargs
+ ):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+
+ self.drop = nn.Dropout(p_dropout)
+ self.self_attn_layers = nn.ModuleList()
+ self.norm_layers_0 = nn.ModuleList()
+ self.encdec_attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.self_attn_layers.append(
+ MultiHeadAttention(
+ hidden_channels,
+ hidden_channels,
+ n_heads,
+ p_dropout=p_dropout,
+ proximal_bias=proximal_bias,
+ proximal_init=proximal_init,
+ )
+ )
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
+ self.encdec_attn_layers.append(
+ MultiHeadAttention(
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
+ )
+ )
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(
+ FFN(
+ hidden_channels,
+ hidden_channels,
+ filter_channels,
+ kernel_size,
+ p_dropout=p_dropout,
+ causal=True,
+ )
+ )
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
+ device=x.device, dtype=x.dtype
+ )
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_0[i](x + y)
+
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class MultiHeadAttention(nn.Module):
+ def __init__(
+ self,
+ channels,
+ out_channels,
+ n_heads,
+ p_dropout=0.0,
+ window_size=None,
+ heads_share=True,
+ block_length=None,
+ proximal_bias=False,
+ proximal_init=False,
+ ):
+ super().__init__()
+ assert channels % n_heads == 0
+
+ self.channels = channels
+ self.out_channels = out_channels
+ self.n_heads = n_heads
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+ self.heads_share = heads_share
+ self.block_length = block_length
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+ self.attn = None
+
+ self.k_channels = channels // n_heads
+ self.conv_q = nn.Conv1d(channels, channels, 1)
+ self.conv_k = nn.Conv1d(channels, channels, 1)
+ self.conv_v = nn.Conv1d(channels, channels, 1)
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
+ self.drop = nn.Dropout(p_dropout)
+
+ if window_size is not None:
+ n_heads_rel = 1 if heads_share else n_heads
+ rel_stddev = self.k_channels**-0.5
+ self.emb_rel_k = nn.Parameter(
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
+ * rel_stddev
+ )
+ self.emb_rel_v = nn.Parameter(
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
+ * rel_stddev
+ )
+
+ nn.init.xavier_uniform_(self.conv_q.weight)
+ nn.init.xavier_uniform_(self.conv_k.weight)
+ nn.init.xavier_uniform_(self.conv_v.weight)
+ if proximal_init:
+ with torch.no_grad():
+ self.conv_k.weight.copy_(self.conv_q.weight)
+ self.conv_k.bias.copy_(self.conv_q.bias)
+
+ def forward(self, x, c, attn_mask=None):
+ q = self.conv_q(x)
+ k = self.conv_k(c)
+ v = self.conv_v(c)
+
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
+
+ x = self.conv_o(x)
+ return x
+
+ def attention(self, query, key, value, mask=None):
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
+ b, d, t_s, t_t = (*key.size(), query.size(2))
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
+ if self.window_size is not None:
+ assert (
+ t_s == t_t
+ ), "Relative attention is only available for self-attention."
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
+ rel_logits = self._matmul_with_relative_keys(
+ query / math.sqrt(self.k_channels), key_relative_embeddings
+ )
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
+ scores = scores + scores_local
+ if self.proximal_bias:
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
+ scores = scores + self._attention_bias_proximal(t_s).to(
+ device=scores.device, dtype=scores.dtype
+ )
+ if mask is not None:
+ scores = scores.masked_fill(mask == 0, -1e4)
+ if self.block_length is not None:
+ assert (
+ t_s == t_t
+ ), "Local attention is only available for self-attention."
+ block_mask = (
+ torch.ones_like(scores)
+ .triu(-self.block_length)
+ .tril(self.block_length)
+ )
+ scores = scores.masked_fill(block_mask == 0, -1e4)
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
+ p_attn = self.drop(p_attn)
+ output = torch.matmul(p_attn, value)
+ if self.window_size is not None:
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
+ value_relative_embeddings = self._get_relative_embeddings(
+ self.emb_rel_v, t_s
+ )
+ output = output + self._matmul_with_relative_values(
+ relative_weights, value_relative_embeddings
+ )
+ output = (
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
+ return output, p_attn
+
+ def _matmul_with_relative_values(self, x, y):
+ """
+ x: [b, h, l, m]
+ y: [h or 1, m, d]
+ ret: [b, h, l, d]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0))
+ return ret
+
+ def _matmul_with_relative_keys(self, x, y):
+ """
+ x: [b, h, l, d]
+ y: [h or 1, m, d]
+ ret: [b, h, l, m]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
+ return ret
+
+ def _get_relative_embeddings(self, relative_embeddings, length):
+ max_relative_position = 2 * self.window_size + 1
+ # Pad first before slice to avoid using cond ops.
+ pad_length = max(length - (self.window_size + 1), 0)
+ slice_start_position = max((self.window_size + 1) - length, 0)
+ slice_end_position = slice_start_position + 2 * length - 1
+ if pad_length > 0:
+ padded_relative_embeddings = F.pad(
+ relative_embeddings,
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
+ )
+ else:
+ padded_relative_embeddings = relative_embeddings
+ used_relative_embeddings = padded_relative_embeddings[
+ :, slice_start_position:slice_end_position
+ ]
+ return used_relative_embeddings
+
+ def _relative_position_to_absolute_position(self, x):
+ """
+ x: [b, h, l, 2*l-1]
+ ret: [b, h, l, l]
+ """
+ batch, heads, length, _ = x.size()
+ # Concat columns of pad to shift from relative to absolute indexing.
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
+
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
+ x_flat = x.view([batch, heads, length * 2 * length])
+ x_flat = F.pad(
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
+ )
+
+ # Reshape and slice out the padded elements.
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
+ :, :, :length, length - 1 :
+ ]
+ return x_final
+
+ def _absolute_position_to_relative_position(self, x):
+ """
+ x: [b, h, l, l]
+ ret: [b, h, l, 2*l-1]
+ """
+ batch, heads, length, _ = x.size()
+ # padd along column
+ x = F.pad(
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
+ )
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
+ # add 0's in the beginning that will skew the elements after reshape
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
+ return x_final
+
+ def _attention_bias_proximal(self, length):
+ """Bias for self-attention to encourage attention to close positions.
+ Args:
+ length: an integer scalar.
+ Returns:
+ a Tensor with shape [1, 1, length, length]
+ """
+ r = torch.arange(length, dtype=torch.float32)
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
+
+
+class FFN(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ filter_channels,
+ kernel_size,
+ p_dropout=0.0,
+ activation=None,
+ causal=False,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.activation = activation
+ self.causal = causal
+
+ if causal:
+ self.padding = self._causal_padding
+ else:
+ self.padding = self._same_padding
+
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
+ self.drop = nn.Dropout(p_dropout)
+
+ def forward(self, x, x_mask):
+ x = self.conv_1(self.padding(x * x_mask))
+ if self.activation == "gelu":
+ x = x * torch.sigmoid(1.702 * x)
+ else:
+ x = torch.relu(x)
+ x = self.drop(x)
+ x = self.conv_2(self.padding(x * x_mask))
+ return x * x_mask
+
+ def _causal_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = self.kernel_size - 1
+ pad_r = 0
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
+
+ def _same_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = (self.kernel_size - 1) // 2
+ pad_r = self.kernel_size // 2
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
diff --git a/lib/infer_pack/commons.py b/lib/infer_pack/commons.py
new file mode 100644
index 000000000..54470986f
--- /dev/null
+++ b/lib/infer_pack/commons.py
@@ -0,0 +1,166 @@
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+
+def init_weights(m, mean=0.0, std=0.01):
+ classname = m.__class__.__name__
+ if classname.find("Conv") != -1:
+ m.weight.data.normal_(mean, std)
+
+
+def get_padding(kernel_size, dilation=1):
+ return int((kernel_size * dilation - dilation) / 2)
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
+ kl = (logs_q - logs_p) - 0.5
+ kl += (
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
+ )
+ return kl
+
+
+def rand_gumbel(shape):
+ """Sample from the Gumbel distribution, protect from overflows."""
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
+ return -torch.log(-torch.log(uniform_samples))
+
+
+def rand_gumbel_like(x):
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
+ return g
+
+
+def slice_segments(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ ret[i] = x[i, :, idx_str:idx_end]
+ return ret
+
+
+def slice_segments2(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ ret[i] = x[i, idx_str:idx_end]
+ return ret
+
+
+def rand_slice_segments(x, x_lengths=None, segment_size=4):
+ b, d, t = x.size()
+ if x_lengths is None:
+ x_lengths = t
+ ids_str_max = x_lengths - segment_size + 1
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
+ ret = slice_segments(x, ids_str, segment_size)
+ return ret, ids_str
+
+
+def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
+ position = torch.arange(length, dtype=torch.float)
+ num_timescales = channels // 2
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
+ num_timescales - 1
+ )
+ inv_timescales = min_timescale * torch.exp(
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
+ )
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
+ signal = signal.view(1, channels, length)
+ return signal
+
+
+def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return x + signal.to(dtype=x.dtype, device=x.device)
+
+
+def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
+
+
+def subsequent_mask(length):
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
+ return mask
+
+
+@torch.jit.script
+def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
+ n_channels_int = n_channels[0]
+ in_act = input_a + input_b
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
+ acts = t_act * s_act
+ return acts
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def shift_1d(x):
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
+ return x
+
+
+def sequence_mask(length, max_length=None):
+ if max_length is None:
+ max_length = length.max()
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
+ return x.unsqueeze(0) < length.unsqueeze(1)
+
+
+def generate_path(duration, mask):
+ """
+ duration: [b, 1, t_x]
+ mask: [b, 1, t_y, t_x]
+ """
+ device = duration.device
+
+ b, _, t_y, t_x = mask.shape
+ cum_duration = torch.cumsum(duration, -1)
+
+ cum_duration_flat = cum_duration.view(b * t_x)
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
+ path = path.view(b, t_x, t_y)
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
+ path = path.unsqueeze(1).transpose(2, 3) * mask
+ return path
+
+
+def clip_grad_value_(parameters, clip_value, norm_type=2):
+ if isinstance(parameters, torch.Tensor):
+ parameters = [parameters]
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
+ norm_type = float(norm_type)
+ if clip_value is not None:
+ clip_value = float(clip_value)
+
+ total_norm = 0
+ for p in parameters:
+ param_norm = p.grad.data.norm(norm_type)
+ total_norm += param_norm.item() ** norm_type
+ if clip_value is not None:
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
+ total_norm = total_norm ** (1.0 / norm_type)
+ return total_norm
diff --git a/lib/infer_pack/models.py b/lib/infer_pack/models.py
new file mode 100644
index 000000000..3665d03bc
--- /dev/null
+++ b/lib/infer_pack/models.py
@@ -0,0 +1,1142 @@
+import math, pdb, os
+from time import time as ttime
+import torch
+from torch import nn
+from torch.nn import functional as F
+from lib.infer_pack import modules
+from lib.infer_pack import attentions
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from lib.infer_pack.commons import init_weights
+import numpy as np
+from lib.infer_pack import commons
+
+
+class TextEncoder256(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(256, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class TextEncoder768(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(768, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(
+ modules.ResidualCouplingLayer(
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ mean_only=True,
+ )
+ )
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+ def remove_weight_norm(self):
+ for i in range(self.n_flows):
+ self.flows[i * 2].remove_weight_norm()
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class Generator(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=0,
+ ):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class SineGen(torch.nn.Module):
+ """Definition of sine generator
+ SineGen(samp_rate, harmonic_num = 0,
+ sine_amp = 0.1, noise_std = 0.003,
+ voiced_threshold = 0,
+ flag_for_pulse=False)
+ samp_rate: sampling rate in Hz
+ harmonic_num: number of harmonic overtones (default 0)
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
+ noise_std: std of Gaussian noise (default 0.003)
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
+ Note: when flag_for_pulse is True, the first time step of a voiced
+ segment is always sin(np.pi) or cos(0)
+ """
+
+ def __init__(
+ self,
+ samp_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ noise_std=0.003,
+ voiced_threshold=0,
+ flag_for_pulse=False,
+ ):
+ super(SineGen, self).__init__()
+ self.sine_amp = sine_amp
+ self.noise_std = noise_std
+ self.harmonic_num = harmonic_num
+ self.dim = self.harmonic_num + 1
+ self.sampling_rate = samp_rate
+ self.voiced_threshold = voiced_threshold
+
+ def _f02uv(self, f0):
+ # generate uv signal
+ uv = torch.ones_like(f0)
+ uv = uv * (f0 > self.voiced_threshold)
+ return uv
+
+ def forward(self, f0, upp):
+ """sine_tensor, uv = forward(f0)
+ input F0: tensor(batchsize=1, length, dim=1)
+ f0 for unvoiced steps should be 0
+ output sine_tensor: tensor(batchsize=1, length, dim)
+ output uv: tensor(batchsize=1, length, 1)
+ """
+ with torch.no_grad():
+ f0 = f0[:, None].transpose(1, 2)
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
+ # fundamental component
+ f0_buf[:, :, 0] = f0[:, :, 0]
+ for idx in np.arange(self.harmonic_num):
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
+ idx + 2
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
+ rand_ini = torch.rand(
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
+ )
+ rand_ini[:, 0] = 0
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
+ tmp_over_one *= upp
+ tmp_over_one = F.interpolate(
+ tmp_over_one.transpose(2, 1),
+ scale_factor=upp,
+ mode="linear",
+ align_corners=True,
+ ).transpose(2, 1)
+ rad_values = F.interpolate(
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(
+ 2, 1
+ ) #######
+ tmp_over_one %= 1
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
+ cumsum_shift = torch.zeros_like(rad_values)
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
+ sine_waves = torch.sin(
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
+ )
+ sine_waves = sine_waves * self.sine_amp
+ uv = self._f02uv(f0)
+ uv = F.interpolate(
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(2, 1)
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
+ noise = noise_amp * torch.randn_like(sine_waves)
+ sine_waves = sine_waves * uv + noise
+ return sine_waves, uv, noise
+
+
+class SourceModuleHnNSF(torch.nn.Module):
+ """SourceModule for hn-nsf
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0)
+ sampling_rate: sampling_rate in Hz
+ harmonic_num: number of harmonic above F0 (default: 0)
+ sine_amp: amplitude of sine source signal (default: 0.1)
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
+ note that amplitude of noise in unvoiced is decided
+ by sine_amp
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ uv (batchsize, length, 1)
+ """
+
+ def __init__(
+ self,
+ sampling_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ add_noise_std=0.003,
+ voiced_threshod=0,
+ is_half=True,
+ ):
+ super(SourceModuleHnNSF, self).__init__()
+
+ self.sine_amp = sine_amp
+ self.noise_std = add_noise_std
+ self.is_half = is_half
+ # to produce sine waveforms
+ self.l_sin_gen = SineGen(
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
+ )
+
+ # to merge source harmonics into a single excitation
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
+ self.l_tanh = torch.nn.Tanh()
+
+ def forward(self, x, upp=None):
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
+ if self.is_half:
+ sine_wavs = sine_wavs.half()
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
+ return sine_merge, None, None # noise, uv
+
+
+class GeneratorNSF(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ sr,
+ is_half=False,
+ ):
+ super(GeneratorNSF, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
+ self.m_source = SourceModuleHnNSF(
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
+ )
+ self.noise_convs = nn.ModuleList()
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+ if i + 1 < len(upsample_rates):
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
+ self.noise_convs.append(
+ Conv1d(
+ 1,
+ c_cur,
+ kernel_size=stride_f0 * 2,
+ stride=stride_f0,
+ padding=stride_f0 // 2,
+ )
+ )
+ else:
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ self.upp = np.prod(upsample_rates)
+
+ def forward(self, x, f0, g=None):
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
+ har_source = har_source.transpose(1, 2)
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ x_source = self.noise_convs[i](har_source)
+ x = x + x_source
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+sr2sr = {
+ "32k": 32000,
+ "40k": 40000,
+ "48k": 48000,
+}
+
+
+class SynthesizerTrnMs256NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ nsff0 = nsff0[:, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ nsff0 = nsff0[:, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs256NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2, 3, 5, 7, 11, 17]
+ # periods = [3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class MultiPeriodDiscriminatorV2(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminatorV2, self).__init__()
+ # periods = [2, 3, 5, 7, 11, 17]
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ]
+ )
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(
+ Conv2d(
+ 1,
+ 32,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 32,
+ 128,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 128,
+ 512,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 512,
+ 1024,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 1024,
+ 1024,
+ (kernel_size, 1),
+ 1,
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ ]
+ )
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
diff --git a/lib/infer_pack/models_dml.py b/lib/infer_pack/models_dml.py
new file mode 100644
index 000000000..958d7b292
--- /dev/null
+++ b/lib/infer_pack/models_dml.py
@@ -0,0 +1,1124 @@
+import math, pdb, os
+from time import time as ttime
+import torch
+from torch import nn
+from torch.nn import functional as F
+from lib.infer_pack import modules
+from lib.infer_pack import attentions
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from lib.infer_pack.commons import init_weights
+import numpy as np
+from lib.infer_pack import commons
+
+
+class TextEncoder256(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(256, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class TextEncoder768(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(768, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(
+ modules.ResidualCouplingLayer(
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ mean_only=True,
+ )
+ )
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+ def remove_weight_norm(self):
+ for i in range(self.n_flows):
+ self.flows[i * 2].remove_weight_norm()
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class Generator(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=0,
+ ):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class SineGen(torch.nn.Module):
+ """Definition of sine generator
+ SineGen(samp_rate, harmonic_num = 0,
+ sine_amp = 0.1, noise_std = 0.003,
+ voiced_threshold = 0,
+ flag_for_pulse=False)
+ samp_rate: sampling rate in Hz
+ harmonic_num: number of harmonic overtones (default 0)
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
+ noise_std: std of Gaussian noise (default 0.003)
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
+ Note: when flag_for_pulse is True, the first time step of a voiced
+ segment is always sin(np.pi) or cos(0)
+ """
+
+ def __init__(
+ self,
+ samp_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ noise_std=0.003,
+ voiced_threshold=0,
+ flag_for_pulse=False,
+ ):
+ super(SineGen, self).__init__()
+ self.sine_amp = sine_amp
+ self.noise_std = noise_std
+ self.harmonic_num = harmonic_num
+ self.dim = self.harmonic_num + 1
+ self.sampling_rate = samp_rate
+ self.voiced_threshold = voiced_threshold
+
+ def _f02uv(self, f0):
+ # generate uv signal
+ uv = torch.ones_like(f0)
+ uv = uv * (f0 > self.voiced_threshold)
+ return uv.float()
+
+ def forward(self, f0, upp):
+ """sine_tensor, uv = forward(f0)
+ input F0: tensor(batchsize=1, length, dim=1)
+ f0 for unvoiced steps should be 0
+ output sine_tensor: tensor(batchsize=1, length, dim)
+ output uv: tensor(batchsize=1, length, 1)
+ """
+ with torch.no_grad():
+ f0 = f0[:, None].transpose(1, 2)
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
+ # fundamental component
+ f0_buf[:, :, 0] = f0[:, :, 0]
+ for idx in np.arange(self.harmonic_num):
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
+ idx + 2
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
+ rand_ini = torch.rand(
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
+ )
+ rand_ini[:, 0] = 0
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
+ tmp_over_one *= upp
+ tmp_over_one = F.interpolate(
+ tmp_over_one.transpose(2, 1),
+ scale_factor=upp,
+ mode="linear",
+ align_corners=True,
+ ).transpose(2, 1)
+ rad_values = F.interpolate(
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(
+ 2, 1
+ ) #######
+ tmp_over_one %= 1
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
+ cumsum_shift = torch.zeros_like(rad_values)
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
+ sine_waves = torch.sin(
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
+ )
+ sine_waves = sine_waves * self.sine_amp
+ uv = self._f02uv(f0)
+ uv = F.interpolate(
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(2, 1)
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
+ noise = noise_amp * torch.randn_like(sine_waves)
+ sine_waves = sine_waves * uv + noise
+ return sine_waves, uv, noise
+
+
+class SourceModuleHnNSF(torch.nn.Module):
+ """SourceModule for hn-nsf
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0)
+ sampling_rate: sampling_rate in Hz
+ harmonic_num: number of harmonic above F0 (default: 0)
+ sine_amp: amplitude of sine source signal (default: 0.1)
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
+ note that amplitude of noise in unvoiced is decided
+ by sine_amp
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ uv (batchsize, length, 1)
+ """
+
+ def __init__(
+ self,
+ sampling_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ add_noise_std=0.003,
+ voiced_threshod=0,
+ is_half=True,
+ ):
+ super(SourceModuleHnNSF, self).__init__()
+
+ self.sine_amp = sine_amp
+ self.noise_std = add_noise_std
+ self.is_half = is_half
+ # to produce sine waveforms
+ self.l_sin_gen = SineGen(
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
+ )
+
+ # to merge source harmonics into a single excitation
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
+ self.l_tanh = torch.nn.Tanh()
+
+ def forward(self, x, upp=None):
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
+ if self.is_half:
+ sine_wavs = sine_wavs.half()
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
+ return sine_merge, None, None # noise, uv
+
+
+class GeneratorNSF(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ sr,
+ is_half=False,
+ ):
+ super(GeneratorNSF, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
+ self.m_source = SourceModuleHnNSF(
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
+ )
+ self.noise_convs = nn.ModuleList()
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+ if i + 1 < len(upsample_rates):
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
+ self.noise_convs.append(
+ Conv1d(
+ 1,
+ c_cur,
+ kernel_size=stride_f0 * 2,
+ stride=stride_f0,
+ padding=stride_f0 // 2,
+ )
+ )
+ else:
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ self.upp = np.prod(upsample_rates)
+
+ def forward(self, x, f0, g=None):
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
+ har_source = har_source.transpose(1, 2)
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ x_source = self.noise_convs[i](har_source)
+ x = x + x_source
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+sr2sr = {
+ "32k": 32000,
+ "40k": 40000,
+ "48k": 48000,
+}
+
+
+class SynthesizerTrnMs256NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs256NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2, 3, 5, 7, 11, 17]
+ # periods = [3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class MultiPeriodDiscriminatorV2(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminatorV2, self).__init__()
+ # periods = [2, 3, 5, 7, 11, 17]
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ]
+ )
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(
+ Conv2d(
+ 1,
+ 32,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 32,
+ 128,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 128,
+ 512,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 512,
+ 1024,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 1024,
+ 1024,
+ (kernel_size, 1),
+ 1,
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ ]
+ )
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
diff --git a/lib/infer_pack/models_onnx.py b/lib/infer_pack/models_onnx.py
new file mode 100644
index 000000000..963e67b29
--- /dev/null
+++ b/lib/infer_pack/models_onnx.py
@@ -0,0 +1,819 @@
+import math, pdb, os
+from time import time as ttime
+import torch
+from torch import nn
+from torch.nn import functional as F
+from lib.infer_pack import modules
+from lib.infer_pack import attentions
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from lib.infer_pack.commons import init_weights
+import numpy as np
+from lib.infer_pack import commons
+
+
+class TextEncoder256(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(256, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class TextEncoder768(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(768, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(
+ modules.ResidualCouplingLayer(
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ mean_only=True,
+ )
+ )
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+ def remove_weight_norm(self):
+ for i in range(self.n_flows):
+ self.flows[i * 2].remove_weight_norm()
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class Generator(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=0,
+ ):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class SineGen(torch.nn.Module):
+ """Definition of sine generator
+ SineGen(samp_rate, harmonic_num = 0,
+ sine_amp = 0.1, noise_std = 0.003,
+ voiced_threshold = 0,
+ flag_for_pulse=False)
+ samp_rate: sampling rate in Hz
+ harmonic_num: number of harmonic overtones (default 0)
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
+ noise_std: std of Gaussian noise (default 0.003)
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
+ Note: when flag_for_pulse is True, the first time step of a voiced
+ segment is always sin(np.pi) or cos(0)
+ """
+
+ def __init__(
+ self,
+ samp_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ noise_std=0.003,
+ voiced_threshold=0,
+ flag_for_pulse=False,
+ ):
+ super(SineGen, self).__init__()
+ self.sine_amp = sine_amp
+ self.noise_std = noise_std
+ self.harmonic_num = harmonic_num
+ self.dim = self.harmonic_num + 1
+ self.sampling_rate = samp_rate
+ self.voiced_threshold = voiced_threshold
+
+ def _f02uv(self, f0):
+ # generate uv signal
+ uv = torch.ones_like(f0)
+ uv = uv * (f0 > self.voiced_threshold)
+ return uv
+
+ def forward(self, f0, upp):
+ """sine_tensor, uv = forward(f0)
+ input F0: tensor(batchsize=1, length, dim=1)
+ f0 for unvoiced steps should be 0
+ output sine_tensor: tensor(batchsize=1, length, dim)
+ output uv: tensor(batchsize=1, length, 1)
+ """
+ with torch.no_grad():
+ f0 = f0[:, None].transpose(1, 2)
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
+ # fundamental component
+ f0_buf[:, :, 0] = f0[:, :, 0]
+ for idx in np.arange(self.harmonic_num):
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
+ idx + 2
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
+ rand_ini = torch.rand(
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
+ )
+ rand_ini[:, 0] = 0
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
+ tmp_over_one *= upp
+ tmp_over_one = F.interpolate(
+ tmp_over_one.transpose(2, 1),
+ scale_factor=upp,
+ mode="linear",
+ align_corners=True,
+ ).transpose(2, 1)
+ rad_values = F.interpolate(
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(
+ 2, 1
+ ) #######
+ tmp_over_one %= 1
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
+ cumsum_shift = torch.zeros_like(rad_values)
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
+ sine_waves = torch.sin(
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
+ )
+ sine_waves = sine_waves * self.sine_amp
+ uv = self._f02uv(f0)
+ uv = F.interpolate(
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(2, 1)
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
+ noise = noise_amp * torch.randn_like(sine_waves)
+ sine_waves = sine_waves * uv + noise
+ return sine_waves, uv, noise
+
+
+class SourceModuleHnNSF(torch.nn.Module):
+ """SourceModule for hn-nsf
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0)
+ sampling_rate: sampling_rate in Hz
+ harmonic_num: number of harmonic above F0 (default: 0)
+ sine_amp: amplitude of sine source signal (default: 0.1)
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
+ note that amplitude of noise in unvoiced is decided
+ by sine_amp
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ uv (batchsize, length, 1)
+ """
+
+ def __init__(
+ self,
+ sampling_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ add_noise_std=0.003,
+ voiced_threshod=0,
+ is_half=True,
+ ):
+ super(SourceModuleHnNSF, self).__init__()
+
+ self.sine_amp = sine_amp
+ self.noise_std = add_noise_std
+ self.is_half = is_half
+ # to produce sine waveforms
+ self.l_sin_gen = SineGen(
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
+ )
+
+ # to merge source harmonics into a single excitation
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
+ self.l_tanh = torch.nn.Tanh()
+
+ def forward(self, x, upp=None):
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
+ if self.is_half:
+ sine_wavs = sine_wavs.half()
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
+ return sine_merge, None, None # noise, uv
+
+
+class GeneratorNSF(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ sr,
+ is_half=False,
+ ):
+ super(GeneratorNSF, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
+ self.m_source = SourceModuleHnNSF(
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
+ )
+ self.noise_convs = nn.ModuleList()
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+ if i + 1 < len(upsample_rates):
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
+ self.noise_convs.append(
+ Conv1d(
+ 1,
+ c_cur,
+ kernel_size=stride_f0 * 2,
+ stride=stride_f0,
+ padding=stride_f0 // 2,
+ )
+ )
+ else:
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ self.upp = np.prod(upsample_rates)
+
+ def forward(self, x, f0, g=None):
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
+ har_source = har_source.transpose(1, 2)
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ x_source = self.noise_convs[i](har_source)
+ x = x + x_source
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+sr2sr = {
+ "32k": 32000,
+ "40k": 40000,
+ "48k": 48000,
+}
+
+
+class SynthesizerTrnMsNSFsidM(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ version,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ if version == "v1":
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ else:
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ self.speaker_map = None
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def construct_spkmixmap(self, n_speaker):
+ self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
+ for i in range(n_speaker):
+ self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
+ self.speaker_map = self.speaker_map.unsqueeze(0)
+
+ def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
+ if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
+ g = g * self.speaker_map # [N, S, B, 1, H]
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
+ else:
+ g = g.unsqueeze(0)
+ g = self.emb_g(g).transpose(1, 2)
+
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
+ return o
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2, 3, 5, 7, 11, 17]
+ # periods = [3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class MultiPeriodDiscriminatorV2(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminatorV2, self).__init__()
+ # periods = [2, 3, 5, 7, 11, 17]
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ]
+ )
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(
+ Conv2d(
+ 1,
+ 32,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 32,
+ 128,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 128,
+ 512,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 512,
+ 1024,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 1024,
+ 1024,
+ (kernel_size, 1),
+ 1,
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ ]
+ )
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
diff --git a/lib/infer_pack/modules.py b/lib/infer_pack/modules.py
new file mode 100644
index 000000000..c83289df7
--- /dev/null
+++ b/lib/infer_pack/modules.py
@@ -0,0 +1,522 @@
+import copy
+import math
+import numpy as np
+import scipy
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm
+
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
+
+
+LRELU_SLOPE = 0.1
+
+
+class LayerNorm(nn.Module):
+ def __init__(self, channels, eps=1e-5):
+ super().__init__()
+ self.channels = channels
+ self.eps = eps
+
+ self.gamma = nn.Parameter(torch.ones(channels))
+ self.beta = nn.Parameter(torch.zeros(channels))
+
+ def forward(self, x):
+ x = x.transpose(1, -1)
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
+ return x.transpose(1, -1)
+
+
+class ConvReluNorm(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ hidden_channels,
+ out_channels,
+ kernel_size,
+ n_layers,
+ p_dropout,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.hidden_channels = hidden_channels
+ self.out_channels = out_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+ assert n_layers > 1, "Number of layers should be larger than 0."
+
+ self.conv_layers = nn.ModuleList()
+ self.norm_layers = nn.ModuleList()
+ self.conv_layers.append(
+ nn.Conv1d(
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
+ )
+ )
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
+ for _ in range(n_layers - 1):
+ self.conv_layers.append(
+ nn.Conv1d(
+ hidden_channels,
+ hidden_channels,
+ kernel_size,
+ padding=kernel_size // 2,
+ )
+ )
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask):
+ x_org = x
+ for i in range(self.n_layers):
+ x = self.conv_layers[i](x * x_mask)
+ x = self.norm_layers[i](x)
+ x = self.relu_drop(x)
+ x = x_org + self.proj(x)
+ return x * x_mask
+
+
+class DDSConv(nn.Module):
+ """
+ Dialted and Depth-Separable Convolution
+ """
+
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
+ super().__init__()
+ self.channels = channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+
+ self.drop = nn.Dropout(p_dropout)
+ self.convs_sep = nn.ModuleList()
+ self.convs_1x1 = nn.ModuleList()
+ self.norms_1 = nn.ModuleList()
+ self.norms_2 = nn.ModuleList()
+ for i in range(n_layers):
+ dilation = kernel_size**i
+ padding = (kernel_size * dilation - dilation) // 2
+ self.convs_sep.append(
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ groups=channels,
+ dilation=dilation,
+ padding=padding,
+ )
+ )
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
+ self.norms_1.append(LayerNorm(channels))
+ self.norms_2.append(LayerNorm(channels))
+
+ def forward(self, x, x_mask, g=None):
+ if g is not None:
+ x = x + g
+ for i in range(self.n_layers):
+ y = self.convs_sep[i](x * x_mask)
+ y = self.norms_1[i](y)
+ y = F.gelu(y)
+ y = self.convs_1x1[i](y)
+ y = self.norms_2[i](y)
+ y = F.gelu(y)
+ y = self.drop(y)
+ x = x + y
+ return x * x_mask
+
+
+class WN(torch.nn.Module):
+ def __init__(
+ self,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ p_dropout=0,
+ ):
+ super(WN, self).__init__()
+ assert kernel_size % 2 == 1
+ self.hidden_channels = hidden_channels
+ self.kernel_size = (kernel_size,)
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+ self.p_dropout = p_dropout
+
+ self.in_layers = torch.nn.ModuleList()
+ self.res_skip_layers = torch.nn.ModuleList()
+ self.drop = nn.Dropout(p_dropout)
+
+ if gin_channels != 0:
+ cond_layer = torch.nn.Conv1d(
+ gin_channels, 2 * hidden_channels * n_layers, 1
+ )
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
+
+ for i in range(n_layers):
+ dilation = dilation_rate**i
+ padding = int((kernel_size * dilation - dilation) / 2)
+ in_layer = torch.nn.Conv1d(
+ hidden_channels,
+ 2 * hidden_channels,
+ kernel_size,
+ dilation=dilation,
+ padding=padding,
+ )
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
+ self.in_layers.append(in_layer)
+
+ # last one is not necessary
+ if i < n_layers - 1:
+ res_skip_channels = 2 * hidden_channels
+ else:
+ res_skip_channels = hidden_channels
+
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
+ self.res_skip_layers.append(res_skip_layer)
+
+ def forward(self, x, x_mask, g=None, **kwargs):
+ output = torch.zeros_like(x)
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
+
+ if g is not None:
+ g = self.cond_layer(g)
+
+ for i in range(self.n_layers):
+ x_in = self.in_layers[i](x)
+ if g is not None:
+ cond_offset = i * 2 * self.hidden_channels
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
+ else:
+ g_l = torch.zeros_like(x_in)
+
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
+ acts = self.drop(acts)
+
+ res_skip_acts = self.res_skip_layers[i](acts)
+ if i < self.n_layers - 1:
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
+ x = (x + res_acts) * x_mask
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
+ else:
+ output = output + res_skip_acts
+ return output * x_mask
+
+ def remove_weight_norm(self):
+ if self.gin_channels != 0:
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
+ for l in self.in_layers:
+ torch.nn.utils.remove_weight_norm(l)
+ for l in self.res_skip_layers:
+ torch.nn.utils.remove_weight_norm(l)
+
+
+class ResBlock1(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
+ super(ResBlock1, self).__init__()
+ self.convs1 = nn.ModuleList(
+ [
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[2],
+ padding=get_padding(kernel_size, dilation[2]),
+ )
+ ),
+ ]
+ )
+ self.convs1.apply(init_weights)
+
+ self.convs2 = nn.ModuleList(
+ [
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=get_padding(kernel_size, 1),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=get_padding(kernel_size, 1),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=get_padding(kernel_size, 1),
+ )
+ ),
+ ]
+ )
+ self.convs2.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c1, c2 in zip(self.convs1, self.convs2):
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c1(xt)
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c2(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs1:
+ remove_weight_norm(l)
+ for l in self.convs2:
+ remove_weight_norm(l)
+
+
+class ResBlock2(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
+ super(ResBlock2, self).__init__()
+ self.convs = nn.ModuleList(
+ [
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]),
+ )
+ ),
+ ]
+ )
+ self.convs.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c in self.convs:
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs:
+ remove_weight_norm(l)
+
+
+class Log(nn.Module):
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
+ logdet = torch.sum(-y, [1, 2])
+ return y, logdet
+ else:
+ x = torch.exp(x) * x_mask
+ return x
+
+
+class Flip(nn.Module):
+ def forward(self, x, *args, reverse=False, **kwargs):
+ x = torch.flip(x, [1])
+ if not reverse:
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
+ return x, logdet
+ else:
+ return x
+
+
+class ElementwiseAffine(nn.Module):
+ def __init__(self, channels):
+ super().__init__()
+ self.channels = channels
+ self.m = nn.Parameter(torch.zeros(channels, 1))
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
+
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = self.m + torch.exp(self.logs) * x
+ y = y * x_mask
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
+ return y, logdet
+ else:
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
+ return x
+
+
+class ResidualCouplingLayer(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ p_dropout=0,
+ gin_channels=0,
+ mean_only=False,
+ ):
+ assert channels % 2 == 0, "channels should be divisible by 2"
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.half_channels = channels // 2
+ self.mean_only = mean_only
+
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
+ self.enc = WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ p_dropout=p_dropout,
+ gin_channels=gin_channels,
+ )
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
+ self.post.weight.data.zero_()
+ self.post.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
+ h = self.pre(x0) * x_mask
+ h = self.enc(h, x_mask, g=g)
+ stats = self.post(h) * x_mask
+ if not self.mean_only:
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
+ else:
+ m = stats
+ logs = torch.zeros_like(m)
+
+ if not reverse:
+ x1 = m + x1 * torch.exp(logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ logdet = torch.sum(logs, [1, 2])
+ return x, logdet
+ else:
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ return x
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class ConvFlow(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ filter_channels,
+ kernel_size,
+ n_layers,
+ num_bins=10,
+ tail_bound=5.0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.num_bins = num_bins
+ self.tail_bound = tail_bound
+ self.half_channels = in_channels // 2
+
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
+ self.proj = nn.Conv1d(
+ filter_channels, self.half_channels * (num_bins * 3 - 1), 1
+ )
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
+ h = self.pre(x0)
+ h = self.convs(h, x_mask, g=g)
+ h = self.proj(h) * x_mask
+
+ b, c, t = x0.shape
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
+
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
+ self.filter_channels
+ )
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
+
+ x1, logabsdet = piecewise_rational_quadratic_transform(
+ x1,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=reverse,
+ tails="linear",
+ tail_bound=self.tail_bound,
+ )
+
+ x = torch.cat([x0, x1], 1) * x_mask
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
+ if not reverse:
+ return x, logdet
+ else:
+ return x
diff --git a/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
new file mode 100644
index 000000000..ee3171bcb
--- /dev/null
+++ b/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
@@ -0,0 +1,90 @@
+from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
+import pyworld
+import numpy as np
+
+
+class DioF0Predictor(F0Predictor):
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
+ self.hop_length = hop_length
+ self.f0_min = f0_min
+ self.f0_max = f0_max
+ self.sampling_rate = sampling_rate
+
+ def interpolate_f0(self, f0):
+ """
+ 对F0进行插值处理
+ """
+
+ data = np.reshape(f0, (f0.size, 1))
+
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
+ vuv_vector[data > 0.0] = 1.0
+ vuv_vector[data <= 0.0] = 0.0
+
+ ip_data = data
+
+ frame_number = data.size
+ last_value = 0.0
+ for i in range(frame_number):
+ if data[i] <= 0.0:
+ j = i + 1
+ for j in range(i + 1, frame_number):
+ if data[j] > 0.0:
+ break
+ if j < frame_number - 1:
+ if last_value > 0.0:
+ step = (data[j] - data[i - 1]) / float(j - i)
+ for k in range(i, j):
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
+ else:
+ for k in range(i, j):
+ ip_data[k] = data[j]
+ else:
+ for k in range(i, frame_number):
+ ip_data[k] = last_value
+ else:
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
+ last_value = data[i]
+
+ return ip_data[:, 0], vuv_vector[:, 0]
+
+ def resize_f0(self, x, target_len):
+ source = np.array(x)
+ source[source < 0.001] = np.nan
+ target = np.interp(
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
+ np.arange(0, len(source)),
+ source,
+ )
+ res = np.nan_to_num(target)
+ return res
+
+ def compute_f0(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.dio(
+ wav.astype(np.double),
+ fs=self.sampling_rate,
+ f0_floor=self.f0_min,
+ f0_ceil=self.f0_max,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
+ for index, pitch in enumerate(f0):
+ f0[index] = round(pitch, 1)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
+
+ def compute_f0_uv(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.dio(
+ wav.astype(np.double),
+ fs=self.sampling_rate,
+ f0_floor=self.f0_min,
+ f0_ceil=self.f0_max,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
+ for index, pitch in enumerate(f0):
+ f0[index] = round(pitch, 1)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
diff --git a/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/lib/infer_pack/modules/F0Predictor/F0Predictor.py
new file mode 100644
index 000000000..f56e49e7f
--- /dev/null
+++ b/lib/infer_pack/modules/F0Predictor/F0Predictor.py
@@ -0,0 +1,16 @@
+class F0Predictor(object):
+ def compute_f0(self, wav, p_len):
+ """
+ input: wav:[signal_length]
+ p_len:int
+ output: f0:[signal_length//hop_length]
+ """
+ pass
+
+ def compute_f0_uv(self, wav, p_len):
+ """
+ input: wav:[signal_length]
+ p_len:int
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
+ """
+ pass
diff --git a/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
new file mode 100644
index 000000000..b412ba281
--- /dev/null
+++ b/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
@@ -0,0 +1,86 @@
+from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
+import pyworld
+import numpy as np
+
+
+class HarvestF0Predictor(F0Predictor):
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
+ self.hop_length = hop_length
+ self.f0_min = f0_min
+ self.f0_max = f0_max
+ self.sampling_rate = sampling_rate
+
+ def interpolate_f0(self, f0):
+ """
+ 对F0进行插值处理
+ """
+
+ data = np.reshape(f0, (f0.size, 1))
+
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
+ vuv_vector[data > 0.0] = 1.0
+ vuv_vector[data <= 0.0] = 0.0
+
+ ip_data = data
+
+ frame_number = data.size
+ last_value = 0.0
+ for i in range(frame_number):
+ if data[i] <= 0.0:
+ j = i + 1
+ for j in range(i + 1, frame_number):
+ if data[j] > 0.0:
+ break
+ if j < frame_number - 1:
+ if last_value > 0.0:
+ step = (data[j] - data[i - 1]) / float(j - i)
+ for k in range(i, j):
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
+ else:
+ for k in range(i, j):
+ ip_data[k] = data[j]
+ else:
+ for k in range(i, frame_number):
+ ip_data[k] = last_value
+ else:
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
+ last_value = data[i]
+
+ return ip_data[:, 0], vuv_vector[:, 0]
+
+ def resize_f0(self, x, target_len):
+ source = np.array(x)
+ source[source < 0.001] = np.nan
+ target = np.interp(
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
+ np.arange(0, len(source)),
+ source,
+ )
+ res = np.nan_to_num(target)
+ return res
+
+ def compute_f0(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.harvest(
+ wav.astype(np.double),
+ fs=self.hop_length,
+ f0_ceil=self.f0_max,
+ f0_floor=self.f0_min,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
+
+ def compute_f0_uv(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.harvest(
+ wav.astype(np.double),
+ fs=self.sampling_rate,
+ f0_floor=self.f0_min,
+ f0_ceil=self.f0_max,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
diff --git a/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
new file mode 100644
index 000000000..b2c592527
--- /dev/null
+++ b/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
@@ -0,0 +1,97 @@
+from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
+import parselmouth
+import numpy as np
+
+
+class PMF0Predictor(F0Predictor):
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
+ self.hop_length = hop_length
+ self.f0_min = f0_min
+ self.f0_max = f0_max
+ self.sampling_rate = sampling_rate
+
+ def interpolate_f0(self, f0):
+ """
+ 对F0进行插值处理
+ """
+
+ data = np.reshape(f0, (f0.size, 1))
+
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
+ vuv_vector[data > 0.0] = 1.0
+ vuv_vector[data <= 0.0] = 0.0
+
+ ip_data = data
+
+ frame_number = data.size
+ last_value = 0.0
+ for i in range(frame_number):
+ if data[i] <= 0.0:
+ j = i + 1
+ for j in range(i + 1, frame_number):
+ if data[j] > 0.0:
+ break
+ if j < frame_number - 1:
+ if last_value > 0.0:
+ step = (data[j] - data[i - 1]) / float(j - i)
+ for k in range(i, j):
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
+ else:
+ for k in range(i, j):
+ ip_data[k] = data[j]
+ else:
+ for k in range(i, frame_number):
+ ip_data[k] = last_value
+ else:
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
+ last_value = data[i]
+
+ return ip_data[:, 0], vuv_vector[:, 0]
+
+ def compute_f0(self, wav, p_len=None):
+ x = wav
+ if p_len is None:
+ p_len = x.shape[0] // self.hop_length
+ else:
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
+ time_step = self.hop_length / self.sampling_rate * 1000
+ f0 = (
+ parselmouth.Sound(x, self.sampling_rate)
+ .to_pitch_ac(
+ time_step=time_step / 1000,
+ voicing_threshold=0.6,
+ pitch_floor=self.f0_min,
+ pitch_ceiling=self.f0_max,
+ )
+ .selected_array["frequency"]
+ )
+
+ pad_size = (p_len - len(f0) + 1) // 2
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
+ f0, uv = self.interpolate_f0(f0)
+ return f0
+
+ def compute_f0_uv(self, wav, p_len=None):
+ x = wav
+ if p_len is None:
+ p_len = x.shape[0] // self.hop_length
+ else:
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
+ time_step = self.hop_length / self.sampling_rate * 1000
+ f0 = (
+ parselmouth.Sound(x, self.sampling_rate)
+ .to_pitch_ac(
+ time_step=time_step / 1000,
+ voicing_threshold=0.6,
+ pitch_floor=self.f0_min,
+ pitch_ceiling=self.f0_max,
+ )
+ .selected_array["frequency"]
+ )
+
+ pad_size = (p_len - len(f0) + 1) // 2
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
+ f0, uv = self.interpolate_f0(f0)
+ return f0, uv
diff --git a/lib/infer_pack/modules/F0Predictor/__init__.py b/lib/infer_pack/modules/F0Predictor/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/lib/infer_pack/onnx_inference.py b/lib/infer_pack/onnx_inference.py
new file mode 100644
index 000000000..6517853be
--- /dev/null
+++ b/lib/infer_pack/onnx_inference.py
@@ -0,0 +1,145 @@
+import onnxruntime
+import librosa
+import numpy as np
+import soundfile
+
+
+class ContentVec:
+ def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
+ print("load model(s) from {}".format(vec_path))
+ if device == "cpu" or device is None:
+ providers = ["CPUExecutionProvider"]
+ elif device == "cuda":
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
+ elif device == "dml":
+ providers = ["DmlExecutionProvider"]
+ else:
+ raise RuntimeError("Unsportted Device")
+ self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
+
+ def __call__(self, wav):
+ return self.forward(wav)
+
+ def forward(self, wav):
+ feats = wav
+ if feats.ndim == 2: # double channels
+ feats = feats.mean(-1)
+ assert feats.ndim == 1, feats.ndim
+ feats = np.expand_dims(np.expand_dims(feats, 0), 0)
+ onnx_input = {self.model.get_inputs()[0].name: feats}
+ logits = self.model.run(None, onnx_input)[0]
+ return logits.transpose(0, 2, 1)
+
+
+def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
+ if f0_predictor == "pm":
+ from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
+
+ f0_predictor_object = PMF0Predictor(
+ hop_length=hop_length, sampling_rate=sampling_rate
+ )
+ elif f0_predictor == "harvest":
+ from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
+ HarvestF0Predictor,
+ )
+
+ f0_predictor_object = HarvestF0Predictor(
+ hop_length=hop_length, sampling_rate=sampling_rate
+ )
+ elif f0_predictor == "dio":
+ from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
+
+ f0_predictor_object = DioF0Predictor(
+ hop_length=hop_length, sampling_rate=sampling_rate
+ )
+ else:
+ raise Exception("Unknown f0 predictor")
+ return f0_predictor_object
+
+
+class OnnxRVC:
+ def __init__(
+ self,
+ model_path,
+ sr=40000,
+ hop_size=512,
+ vec_path="vec-768-layer-12",
+ device="cpu",
+ ):
+ vec_path = f"pretrained/{vec_path}.onnx"
+ self.vec_model = ContentVec(vec_path, device)
+ if device == "cpu" or device is None:
+ providers = ["CPUExecutionProvider"]
+ elif device == "cuda":
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
+ elif device == "dml":
+ providers = ["DmlExecutionProvider"]
+ else:
+ raise RuntimeError("Unsportted Device")
+ self.model = onnxruntime.InferenceSession(model_path, providers=providers)
+ self.sampling_rate = sr
+ self.hop_size = hop_size
+
+ def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
+ onnx_input = {
+ self.model.get_inputs()[0].name: hubert,
+ self.model.get_inputs()[1].name: hubert_length,
+ self.model.get_inputs()[2].name: pitch,
+ self.model.get_inputs()[3].name: pitchf,
+ self.model.get_inputs()[4].name: ds,
+ self.model.get_inputs()[5].name: rnd,
+ }
+ return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
+
+ def inference(
+ self,
+ raw_path,
+ sid,
+ f0_method="dio",
+ f0_up_key=0,
+ pad_time=0.5,
+ cr_threshold=0.02,
+ ):
+ f0_min = 50
+ f0_max = 1100
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+ f0_predictor = get_f0_predictor(
+ f0_method,
+ hop_length=self.hop_size,
+ sampling_rate=self.sampling_rate,
+ threshold=cr_threshold,
+ )
+ wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
+ org_length = len(wav)
+ if org_length / sr > 50.0:
+ raise RuntimeError("Reached Max Length")
+
+ wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
+ wav16k = wav16k
+
+ hubert = self.vec_model(wav16k)
+ hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
+ hubert_length = hubert.shape[1]
+
+ pitchf = f0_predictor.compute_f0(wav, hubert_length)
+ pitchf = pitchf * 2 ** (f0_up_key / 12)
+ pitch = pitchf.copy()
+ f0_mel = 1127 * np.log(1 + pitch / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ pitch = np.rint(f0_mel).astype(np.int64)
+
+ pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
+ pitch = pitch.reshape(1, len(pitch))
+ ds = np.array([sid]).astype(np.int64)
+
+ rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
+ hubert_length = np.array([hubert_length]).astype(np.int64)
+
+ out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
+ out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
+ return out_wav[0:org_length]
diff --git a/lib/infer_pack/transforms.py b/lib/infer_pack/transforms.py
new file mode 100644
index 000000000..a11f799e0
--- /dev/null
+++ b/lib/infer_pack/transforms.py
@@ -0,0 +1,209 @@
+import torch
+from torch.nn import functional as F
+
+import numpy as np
+
+
+DEFAULT_MIN_BIN_WIDTH = 1e-3
+DEFAULT_MIN_BIN_HEIGHT = 1e-3
+DEFAULT_MIN_DERIVATIVE = 1e-3
+
+
+def piecewise_rational_quadratic_transform(
+ inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ tails=None,
+ tail_bound=1.0,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
+):
+ if tails is None:
+ spline_fn = rational_quadratic_spline
+ spline_kwargs = {}
+ else:
+ spline_fn = unconstrained_rational_quadratic_spline
+ spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
+
+ outputs, logabsdet = spline_fn(
+ inputs=inputs,
+ unnormalized_widths=unnormalized_widths,
+ unnormalized_heights=unnormalized_heights,
+ unnormalized_derivatives=unnormalized_derivatives,
+ inverse=inverse,
+ min_bin_width=min_bin_width,
+ min_bin_height=min_bin_height,
+ min_derivative=min_derivative,
+ **spline_kwargs
+ )
+ return outputs, logabsdet
+
+
+def searchsorted(bin_locations, inputs, eps=1e-6):
+ bin_locations[..., -1] += eps
+ return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
+
+
+def unconstrained_rational_quadratic_spline(
+ inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ tails="linear",
+ tail_bound=1.0,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
+):
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
+ outside_interval_mask = ~inside_interval_mask
+
+ outputs = torch.zeros_like(inputs)
+ logabsdet = torch.zeros_like(inputs)
+
+ if tails == "linear":
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
+ constant = np.log(np.exp(1 - min_derivative) - 1)
+ unnormalized_derivatives[..., 0] = constant
+ unnormalized_derivatives[..., -1] = constant
+
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
+ logabsdet[outside_interval_mask] = 0
+ else:
+ raise RuntimeError("{} tails are not implemented.".format(tails))
+
+ (
+ outputs[inside_interval_mask],
+ logabsdet[inside_interval_mask],
+ ) = rational_quadratic_spline(
+ inputs=inputs[inside_interval_mask],
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
+ inverse=inverse,
+ left=-tail_bound,
+ right=tail_bound,
+ bottom=-tail_bound,
+ top=tail_bound,
+ min_bin_width=min_bin_width,
+ min_bin_height=min_bin_height,
+ min_derivative=min_derivative,
+ )
+
+ return outputs, logabsdet
+
+
+def rational_quadratic_spline(
+ inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ left=0.0,
+ right=1.0,
+ bottom=0.0,
+ top=1.0,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
+):
+ if torch.min(inputs) < left or torch.max(inputs) > right:
+ raise ValueError("Input to a transform is not within its domain")
+
+ num_bins = unnormalized_widths.shape[-1]
+
+ if min_bin_width * num_bins > 1.0:
+ raise ValueError("Minimal bin width too large for the number of bins")
+ if min_bin_height * num_bins > 1.0:
+ raise ValueError("Minimal bin height too large for the number of bins")
+
+ widths = F.softmax(unnormalized_widths, dim=-1)
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
+ cumwidths = torch.cumsum(widths, dim=-1)
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
+ cumwidths = (right - left) * cumwidths + left
+ cumwidths[..., 0] = left
+ cumwidths[..., -1] = right
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
+
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
+
+ heights = F.softmax(unnormalized_heights, dim=-1)
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
+ cumheights = torch.cumsum(heights, dim=-1)
+ cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
+ cumheights = (top - bottom) * cumheights + bottom
+ cumheights[..., 0] = bottom
+ cumheights[..., -1] = top
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
+
+ if inverse:
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
+ else:
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
+
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
+
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
+ delta = heights / widths
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
+
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
+
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
+
+ if inverse:
+ a = (inputs - input_cumheights) * (
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
+ ) + input_heights * (input_delta - input_derivatives)
+ b = input_heights * input_derivatives - (inputs - input_cumheights) * (
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
+ )
+ c = -input_delta * (inputs - input_cumheights)
+
+ discriminant = b.pow(2) - 4 * a * c
+ assert (discriminant >= 0).all()
+
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
+ outputs = root * input_bin_widths + input_cumwidths
+
+ theta_one_minus_theta = root * (1 - root)
+ denominator = input_delta + (
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta
+ )
+ derivative_numerator = input_delta.pow(2) * (
+ input_derivatives_plus_one * root.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - root).pow(2)
+ )
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, -logabsdet
+ else:
+ theta = (inputs - input_cumwidths) / input_bin_widths
+ theta_one_minus_theta = theta * (1 - theta)
+
+ numerator = input_heights * (
+ input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
+ )
+ denominator = input_delta + (
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta
+ )
+ outputs = input_cumheights + numerator / denominator
+
+ derivative_numerator = input_delta.pow(2) * (
+ input_derivatives_plus_one * theta.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - theta).pow(2)
+ )
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, logabsdet
diff --git a/lib/uvr5_pack/lib_v5/dataset.py b/lib/uvr5_pack/lib_v5/dataset.py
new file mode 100644
index 000000000..cfd01a174
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/dataset.py
@@ -0,0 +1,183 @@
+import os
+import random
+
+import numpy as np
+import torch
+import torch.utils.data
+from tqdm import tqdm
+
+from . import spec_utils
+
+
+class VocalRemoverValidationSet(torch.utils.data.Dataset):
+ def __init__(self, patch_list):
+ self.patch_list = patch_list
+
+ def __len__(self):
+ return len(self.patch_list)
+
+ def __getitem__(self, idx):
+ path = self.patch_list[idx]
+ data = np.load(path)
+
+ X, y = data["X"], data["y"]
+
+ X_mag = np.abs(X)
+ y_mag = np.abs(y)
+
+ return X_mag, y_mag
+
+
+def make_pair(mix_dir, inst_dir):
+ input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"]
+
+ X_list = sorted(
+ [
+ os.path.join(mix_dir, fname)
+ for fname in os.listdir(mix_dir)
+ if os.path.splitext(fname)[1] in input_exts
+ ]
+ )
+ y_list = sorted(
+ [
+ os.path.join(inst_dir, fname)
+ for fname in os.listdir(inst_dir)
+ if os.path.splitext(fname)[1] in input_exts
+ ]
+ )
+
+ filelist = list(zip(X_list, y_list))
+
+ return filelist
+
+
+def train_val_split(dataset_dir, split_mode, val_rate, val_filelist):
+ if split_mode == "random":
+ filelist = make_pair(
+ os.path.join(dataset_dir, "mixtures"),
+ os.path.join(dataset_dir, "instruments"),
+ )
+
+ random.shuffle(filelist)
+
+ if len(val_filelist) == 0:
+ val_size = int(len(filelist) * val_rate)
+ train_filelist = filelist[:-val_size]
+ val_filelist = filelist[-val_size:]
+ else:
+ train_filelist = [
+ pair for pair in filelist if list(pair) not in val_filelist
+ ]
+ elif split_mode == "subdirs":
+ if len(val_filelist) != 0:
+ raise ValueError(
+ "The `val_filelist` option is not available in `subdirs` mode"
+ )
+
+ train_filelist = make_pair(
+ os.path.join(dataset_dir, "training/mixtures"),
+ os.path.join(dataset_dir, "training/instruments"),
+ )
+
+ val_filelist = make_pair(
+ os.path.join(dataset_dir, "validation/mixtures"),
+ os.path.join(dataset_dir, "validation/instruments"),
+ )
+
+ return train_filelist, val_filelist
+
+
+def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha):
+ perm = np.random.permutation(len(X))
+ for i, idx in enumerate(tqdm(perm)):
+ if np.random.uniform() < reduction_rate:
+ y[idx] = spec_utils.reduce_vocal_aggressively(
+ X[idx], y[idx], reduction_mask
+ )
+
+ if np.random.uniform() < 0.5:
+ # swap channel
+ X[idx] = X[idx, ::-1]
+ y[idx] = y[idx, ::-1]
+ if np.random.uniform() < 0.02:
+ # mono
+ X[idx] = X[idx].mean(axis=0, keepdims=True)
+ y[idx] = y[idx].mean(axis=0, keepdims=True)
+ if np.random.uniform() < 0.02:
+ # inst
+ X[idx] = y[idx]
+
+ if np.random.uniform() < mixup_rate and i < len(perm) - 1:
+ lam = np.random.beta(mixup_alpha, mixup_alpha)
+ X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]]
+ y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]]
+
+ return X, y
+
+
+def make_padding(width, cropsize, offset):
+ left = offset
+ roi_size = cropsize - left * 2
+ if roi_size == 0:
+ roi_size = cropsize
+ right = roi_size - (width % roi_size) + left
+
+ return left, right, roi_size
+
+
+def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset):
+ len_dataset = patches * len(filelist)
+
+ X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
+ y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
+
+ for i, (X_path, y_path) in enumerate(tqdm(filelist)):
+ X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
+ coef = np.max([np.abs(X).max(), np.abs(y).max()])
+ X, y = X / coef, y / coef
+
+ l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
+ X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
+ y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
+
+ starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches)
+ ends = starts + cropsize
+ for j in range(patches):
+ idx = i * patches + j
+ X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]]
+ y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]]
+
+ return X_dataset, y_dataset
+
+
+def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset):
+ patch_list = []
+ patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(
+ cropsize, sr, hop_length, n_fft, offset
+ )
+ os.makedirs(patch_dir, exist_ok=True)
+
+ for i, (X_path, y_path) in enumerate(tqdm(filelist)):
+ basename = os.path.splitext(os.path.basename(X_path))[0]
+
+ X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
+ coef = np.max([np.abs(X).max(), np.abs(y).max()])
+ X, y = X / coef, y / coef
+
+ l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
+ X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
+ y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
+
+ len_dataset = int(np.ceil(X.shape[2] / roi_size))
+ for j in range(len_dataset):
+ outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j))
+ start = j * roi_size
+ if not os.path.exists(outpath):
+ np.savez(
+ outpath,
+ X=X_pad[:, :, start : start + cropsize],
+ y=y_pad[:, :, start : start + cropsize],
+ )
+ patch_list.append(outpath)
+
+ return VocalRemoverValidationSet(patch_list)
diff --git a/lib/uvr5_pack/lib_v5/layers.py b/lib/uvr5_pack/lib_v5/layers.py
new file mode 100644
index 000000000..b82f06bb4
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/layers.py
@@ -0,0 +1,118 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import spec_utils
+
+
+class Conv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(Conv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nout,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ bias=False,
+ ),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class SeperableConv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(SeperableConv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nin,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ groups=nin,
+ bias=False,
+ ),
+ nn.Conv2d(nin, nout, kernel_size=1, bias=False),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class Encoder(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
+ super(Encoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
+
+ def __call__(self, x):
+ skip = self.conv1(x)
+ h = self.conv2(skip)
+
+ return h, skip
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
+ ):
+ super(Decoder, self).__init__()
+ self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def __call__(self, x, skip=None):
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
+ if skip is not None:
+ skip = spec_utils.crop_center(skip, x)
+ x = torch.cat([x, skip], dim=1)
+ h = self.conv(x)
+
+ if self.dropout is not None:
+ h = self.dropout(h)
+
+ return h
+
+
+class ASPPModule(nn.Module):
+ def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
+ super(ASPPModule, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.AdaptiveAvgPool2d((1, None)),
+ Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
+ )
+ self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
+ self.conv3 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
+ )
+ self.conv4 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
+ )
+ self.conv5 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.bottleneck = nn.Sequential(
+ Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
+ )
+
+ def forward(self, x):
+ _, _, h, w = x.size()
+ feat1 = F.interpolate(
+ self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
+ )
+ feat2 = self.conv2(x)
+ feat3 = self.conv3(x)
+ feat4 = self.conv4(x)
+ feat5 = self.conv5(x)
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
+ bottle = self.bottleneck(out)
+ return bottle
diff --git a/lib/uvr5_pack/lib_v5/layers_123812KB .py b/lib/uvr5_pack/lib_v5/layers_123812KB .py
new file mode 100644
index 000000000..b82f06bb4
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/layers_123812KB .py
@@ -0,0 +1,118 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import spec_utils
+
+
+class Conv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(Conv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nout,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ bias=False,
+ ),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class SeperableConv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(SeperableConv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nin,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ groups=nin,
+ bias=False,
+ ),
+ nn.Conv2d(nin, nout, kernel_size=1, bias=False),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class Encoder(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
+ super(Encoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
+
+ def __call__(self, x):
+ skip = self.conv1(x)
+ h = self.conv2(skip)
+
+ return h, skip
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
+ ):
+ super(Decoder, self).__init__()
+ self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def __call__(self, x, skip=None):
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
+ if skip is not None:
+ skip = spec_utils.crop_center(skip, x)
+ x = torch.cat([x, skip], dim=1)
+ h = self.conv(x)
+
+ if self.dropout is not None:
+ h = self.dropout(h)
+
+ return h
+
+
+class ASPPModule(nn.Module):
+ def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
+ super(ASPPModule, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.AdaptiveAvgPool2d((1, None)),
+ Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
+ )
+ self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
+ self.conv3 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
+ )
+ self.conv4 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
+ )
+ self.conv5 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.bottleneck = nn.Sequential(
+ Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
+ )
+
+ def forward(self, x):
+ _, _, h, w = x.size()
+ feat1 = F.interpolate(
+ self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
+ )
+ feat2 = self.conv2(x)
+ feat3 = self.conv3(x)
+ feat4 = self.conv4(x)
+ feat5 = self.conv5(x)
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
+ bottle = self.bottleneck(out)
+ return bottle
diff --git a/lib/uvr5_pack/lib_v5/layers_123821KB.py b/lib/uvr5_pack/lib_v5/layers_123821KB.py
new file mode 100644
index 000000000..b82f06bb4
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/layers_123821KB.py
@@ -0,0 +1,118 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import spec_utils
+
+
+class Conv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(Conv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nout,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ bias=False,
+ ),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class SeperableConv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(SeperableConv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nin,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ groups=nin,
+ bias=False,
+ ),
+ nn.Conv2d(nin, nout, kernel_size=1, bias=False),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class Encoder(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
+ super(Encoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
+
+ def __call__(self, x):
+ skip = self.conv1(x)
+ h = self.conv2(skip)
+
+ return h, skip
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
+ ):
+ super(Decoder, self).__init__()
+ self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def __call__(self, x, skip=None):
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
+ if skip is not None:
+ skip = spec_utils.crop_center(skip, x)
+ x = torch.cat([x, skip], dim=1)
+ h = self.conv(x)
+
+ if self.dropout is not None:
+ h = self.dropout(h)
+
+ return h
+
+
+class ASPPModule(nn.Module):
+ def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
+ super(ASPPModule, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.AdaptiveAvgPool2d((1, None)),
+ Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
+ )
+ self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
+ self.conv3 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
+ )
+ self.conv4 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
+ )
+ self.conv5 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.bottleneck = nn.Sequential(
+ Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
+ )
+
+ def forward(self, x):
+ _, _, h, w = x.size()
+ feat1 = F.interpolate(
+ self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
+ )
+ feat2 = self.conv2(x)
+ feat3 = self.conv3(x)
+ feat4 = self.conv4(x)
+ feat5 = self.conv5(x)
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
+ bottle = self.bottleneck(out)
+ return bottle
diff --git a/lib/uvr5_pack/lib_v5/layers_33966KB.py b/lib/uvr5_pack/lib_v5/layers_33966KB.py
new file mode 100644
index 000000000..a38b7bb3a
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/layers_33966KB.py
@@ -0,0 +1,126 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import spec_utils
+
+
+class Conv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(Conv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nout,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ bias=False,
+ ),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class SeperableConv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(SeperableConv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nin,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ groups=nin,
+ bias=False,
+ ),
+ nn.Conv2d(nin, nout, kernel_size=1, bias=False),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class Encoder(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
+ super(Encoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
+
+ def __call__(self, x):
+ skip = self.conv1(x)
+ h = self.conv2(skip)
+
+ return h, skip
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
+ ):
+ super(Decoder, self).__init__()
+ self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def __call__(self, x, skip=None):
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
+ if skip is not None:
+ skip = spec_utils.crop_center(skip, x)
+ x = torch.cat([x, skip], dim=1)
+ h = self.conv(x)
+
+ if self.dropout is not None:
+ h = self.dropout(h)
+
+ return h
+
+
+class ASPPModule(nn.Module):
+ def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
+ super(ASPPModule, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.AdaptiveAvgPool2d((1, None)),
+ Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
+ )
+ self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
+ self.conv3 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
+ )
+ self.conv4 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
+ )
+ self.conv5 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.conv6 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.conv7 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.bottleneck = nn.Sequential(
+ Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
+ )
+
+ def forward(self, x):
+ _, _, h, w = x.size()
+ feat1 = F.interpolate(
+ self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
+ )
+ feat2 = self.conv2(x)
+ feat3 = self.conv3(x)
+ feat4 = self.conv4(x)
+ feat5 = self.conv5(x)
+ feat6 = self.conv6(x)
+ feat7 = self.conv7(x)
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
+ bottle = self.bottleneck(out)
+ return bottle
diff --git a/lib/uvr5_pack/lib_v5/layers_537227KB.py b/lib/uvr5_pack/lib_v5/layers_537227KB.py
new file mode 100644
index 000000000..a38b7bb3a
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/layers_537227KB.py
@@ -0,0 +1,126 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import spec_utils
+
+
+class Conv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(Conv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nout,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ bias=False,
+ ),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class SeperableConv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(SeperableConv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nin,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ groups=nin,
+ bias=False,
+ ),
+ nn.Conv2d(nin, nout, kernel_size=1, bias=False),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class Encoder(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
+ super(Encoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
+
+ def __call__(self, x):
+ skip = self.conv1(x)
+ h = self.conv2(skip)
+
+ return h, skip
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
+ ):
+ super(Decoder, self).__init__()
+ self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def __call__(self, x, skip=None):
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
+ if skip is not None:
+ skip = spec_utils.crop_center(skip, x)
+ x = torch.cat([x, skip], dim=1)
+ h = self.conv(x)
+
+ if self.dropout is not None:
+ h = self.dropout(h)
+
+ return h
+
+
+class ASPPModule(nn.Module):
+ def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
+ super(ASPPModule, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.AdaptiveAvgPool2d((1, None)),
+ Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
+ )
+ self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
+ self.conv3 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
+ )
+ self.conv4 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
+ )
+ self.conv5 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.conv6 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.conv7 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.bottleneck = nn.Sequential(
+ Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
+ )
+
+ def forward(self, x):
+ _, _, h, w = x.size()
+ feat1 = F.interpolate(
+ self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
+ )
+ feat2 = self.conv2(x)
+ feat3 = self.conv3(x)
+ feat4 = self.conv4(x)
+ feat5 = self.conv5(x)
+ feat6 = self.conv6(x)
+ feat7 = self.conv7(x)
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
+ bottle = self.bottleneck(out)
+ return bottle
diff --git a/lib/uvr5_pack/lib_v5/layers_537238KB.py b/lib/uvr5_pack/lib_v5/layers_537238KB.py
new file mode 100644
index 000000000..a38b7bb3a
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/layers_537238KB.py
@@ -0,0 +1,126 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import spec_utils
+
+
+class Conv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(Conv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nout,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ bias=False,
+ ),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class SeperableConv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(SeperableConv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nin,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ groups=nin,
+ bias=False,
+ ),
+ nn.Conv2d(nin, nout, kernel_size=1, bias=False),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class Encoder(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
+ super(Encoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
+
+ def __call__(self, x):
+ skip = self.conv1(x)
+ h = self.conv2(skip)
+
+ return h, skip
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
+ ):
+ super(Decoder, self).__init__()
+ self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def __call__(self, x, skip=None):
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
+ if skip is not None:
+ skip = spec_utils.crop_center(skip, x)
+ x = torch.cat([x, skip], dim=1)
+ h = self.conv(x)
+
+ if self.dropout is not None:
+ h = self.dropout(h)
+
+ return h
+
+
+class ASPPModule(nn.Module):
+ def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
+ super(ASPPModule, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.AdaptiveAvgPool2d((1, None)),
+ Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
+ )
+ self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
+ self.conv3 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
+ )
+ self.conv4 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
+ )
+ self.conv5 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.conv6 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.conv7 = SeperableConv2DBNActiv(
+ nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.bottleneck = nn.Sequential(
+ Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
+ )
+
+ def forward(self, x):
+ _, _, h, w = x.size()
+ feat1 = F.interpolate(
+ self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
+ )
+ feat2 = self.conv2(x)
+ feat3 = self.conv3(x)
+ feat4 = self.conv4(x)
+ feat5 = self.conv5(x)
+ feat6 = self.conv6(x)
+ feat7 = self.conv7(x)
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
+ bottle = self.bottleneck(out)
+ return bottle
diff --git a/lib/uvr5_pack/lib_v5/layers_new.py b/lib/uvr5_pack/lib_v5/layers_new.py
new file mode 100644
index 000000000..0c13e60b0
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/layers_new.py
@@ -0,0 +1,125 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import spec_utils
+
+
+class Conv2DBNActiv(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
+ super(Conv2DBNActiv, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ nin,
+ nout,
+ kernel_size=ksize,
+ stride=stride,
+ padding=pad,
+ dilation=dilation,
+ bias=False,
+ ),
+ nn.BatchNorm2d(nout),
+ activ(),
+ )
+
+ def __call__(self, x):
+ return self.conv(x)
+
+
+class Encoder(nn.Module):
+ def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
+ super(Encoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ)
+ self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
+
+ def __call__(self, x):
+ h = self.conv1(x)
+ h = self.conv2(h)
+
+ return h
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
+ ):
+ super(Decoder, self).__init__()
+ self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
+ # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def __call__(self, x, skip=None):
+ x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
+
+ if skip is not None:
+ skip = spec_utils.crop_center(skip, x)
+ x = torch.cat([x, skip], dim=1)
+
+ h = self.conv1(x)
+ # h = self.conv2(h)
+
+ if self.dropout is not None:
+ h = self.dropout(h)
+
+ return h
+
+
+class ASPPModule(nn.Module):
+ def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False):
+ super(ASPPModule, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.AdaptiveAvgPool2d((1, None)),
+ Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ),
+ )
+ self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ)
+ self.conv3 = Conv2DBNActiv(
+ nin, nout, 3, 1, dilations[0], dilations[0], activ=activ
+ )
+ self.conv4 = Conv2DBNActiv(
+ nin, nout, 3, 1, dilations[1], dilations[1], activ=activ
+ )
+ self.conv5 = Conv2DBNActiv(
+ nin, nout, 3, 1, dilations[2], dilations[2], activ=activ
+ )
+ self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ)
+ self.dropout = nn.Dropout2d(0.1) if dropout else None
+
+ def forward(self, x):
+ _, _, h, w = x.size()
+ feat1 = F.interpolate(
+ self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
+ )
+ feat2 = self.conv2(x)
+ feat3 = self.conv3(x)
+ feat4 = self.conv4(x)
+ feat5 = self.conv5(x)
+ out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
+ out = self.bottleneck(out)
+
+ if self.dropout is not None:
+ out = self.dropout(out)
+
+ return out
+
+
+class LSTMModule(nn.Module):
+ def __init__(self, nin_conv, nin_lstm, nout_lstm):
+ super(LSTMModule, self).__init__()
+ self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0)
+ self.lstm = nn.LSTM(
+ input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True
+ )
+ self.dense = nn.Sequential(
+ nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU()
+ )
+
+ def forward(self, x):
+ N, _, nbins, nframes = x.size()
+ h = self.conv(x)[:, 0] # N, nbins, nframes
+ h = h.permute(2, 0, 1) # nframes, N, nbins
+ h, _ = self.lstm(h)
+ h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins
+ h = h.reshape(nframes, N, 1, nbins)
+ h = h.permute(1, 2, 3, 0)
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/model_param_init.py b/lib/uvr5_pack/lib_v5/model_param_init.py
new file mode 100644
index 000000000..b995c0bfb
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/model_param_init.py
@@ -0,0 +1,69 @@
+import json
+import os
+import pathlib
+
+default_param = {}
+default_param["bins"] = 768
+default_param["unstable_bins"] = 9 # training only
+default_param["reduction_bins"] = 762 # training only
+default_param["sr"] = 44100
+default_param["pre_filter_start"] = 757
+default_param["pre_filter_stop"] = 768
+default_param["band"] = {}
+
+
+default_param["band"][1] = {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 960,
+ "crop_start": 0,
+ "crop_stop": 245,
+ "lpf_start": 61, # inference only
+ "res_type": "polyphase",
+}
+
+default_param["band"][2] = {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 1536,
+ "crop_start": 24,
+ "crop_stop": 547,
+ "hpf_start": 81, # inference only
+ "res_type": "sinc_best",
+}
+
+
+def int_keys(d):
+ r = {}
+ for k, v in d:
+ if k.isdigit():
+ k = int(k)
+ r[k] = v
+ return r
+
+
+class ModelParameters(object):
+ def __init__(self, config_path=""):
+ if ".pth" == pathlib.Path(config_path).suffix:
+ import zipfile
+
+ with zipfile.ZipFile(config_path, "r") as zip:
+ self.param = json.loads(
+ zip.read("param.json"), object_pairs_hook=int_keys
+ )
+ elif ".json" == pathlib.Path(config_path).suffix:
+ with open(config_path, "r") as f:
+ self.param = json.loads(f.read(), object_pairs_hook=int_keys)
+ else:
+ self.param = default_param
+
+ for k in [
+ "mid_side",
+ "mid_side_b",
+ "mid_side_b2",
+ "stereo_w",
+ "stereo_n",
+ "reverse",
+ ]:
+ if not k in self.param:
+ self.param[k] = False
diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json
new file mode 100644
index 000000000..72cb44998
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json
@@ -0,0 +1,19 @@
+{
+ "bins": 1024,
+ "unstable_bins": 0,
+ "reduction_bins": 0,
+ "band": {
+ "1": {
+ "sr": 16000,
+ "hl": 512,
+ "n_fft": 2048,
+ "crop_start": 0,
+ "crop_stop": 1024,
+ "hpf_start": -1,
+ "res_type": "sinc_best"
+ }
+ },
+ "sr": 16000,
+ "pre_filter_start": 1023,
+ "pre_filter_stop": 1024
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json
new file mode 100644
index 000000000..3c00ecf0a
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json
@@ -0,0 +1,19 @@
+{
+ "bins": 1024,
+ "unstable_bins": 0,
+ "reduction_bins": 0,
+ "band": {
+ "1": {
+ "sr": 32000,
+ "hl": 512,
+ "n_fft": 2048,
+ "crop_start": 0,
+ "crop_stop": 1024,
+ "hpf_start": -1,
+ "res_type": "kaiser_fast"
+ }
+ },
+ "sr": 32000,
+ "pre_filter_start": 1000,
+ "pre_filter_stop": 1021
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json
new file mode 100644
index 000000000..55666ac9a
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json
@@ -0,0 +1,19 @@
+{
+ "bins": 1024,
+ "unstable_bins": 0,
+ "reduction_bins": 0,
+ "band": {
+ "1": {
+ "sr": 33075,
+ "hl": 384,
+ "n_fft": 2048,
+ "crop_start": 0,
+ "crop_stop": 1024,
+ "hpf_start": -1,
+ "res_type": "sinc_best"
+ }
+ },
+ "sr": 33075,
+ "pre_filter_start": 1000,
+ "pre_filter_stop": 1021
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json
new file mode 100644
index 000000000..665abe20e
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json
@@ -0,0 +1,19 @@
+{
+ "bins": 1024,
+ "unstable_bins": 0,
+ "reduction_bins": 0,
+ "band": {
+ "1": {
+ "sr": 44100,
+ "hl": 1024,
+ "n_fft": 2048,
+ "crop_start": 0,
+ "crop_stop": 1024,
+ "hpf_start": -1,
+ "res_type": "sinc_best"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 1023,
+ "pre_filter_stop": 1024
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json
new file mode 100644
index 000000000..0e8b16f89
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json
@@ -0,0 +1,19 @@
+{
+ "bins": 256,
+ "unstable_bins": 0,
+ "reduction_bins": 0,
+ "band": {
+ "1": {
+ "sr": 44100,
+ "hl": 256,
+ "n_fft": 512,
+ "crop_start": 0,
+ "crop_stop": 256,
+ "hpf_start": -1,
+ "res_type": "sinc_best"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 256,
+ "pre_filter_stop": 256
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json
new file mode 100644
index 000000000..3b38fcaf6
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json
@@ -0,0 +1,19 @@
+{
+ "bins": 1024,
+ "unstable_bins": 0,
+ "reduction_bins": 0,
+ "band": {
+ "1": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 2048,
+ "crop_start": 0,
+ "crop_stop": 1024,
+ "hpf_start": -1,
+ "res_type": "sinc_best"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 1023,
+ "pre_filter_stop": 1024
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json
new file mode 100644
index 000000000..630df3524
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json
@@ -0,0 +1,19 @@
+{
+ "bins": 1024,
+ "unstable_bins": 0,
+ "reduction_bins": 0,
+ "band": {
+ "1": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 2048,
+ "crop_start": 0,
+ "crop_stop": 700,
+ "hpf_start": -1,
+ "res_type": "sinc_best"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 1023,
+ "pre_filter_stop": 700
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/2band_32000.json b/lib/uvr5_pack/lib_v5/modelparams/2band_32000.json
new file mode 100644
index 000000000..ab9cf1150
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/2band_32000.json
@@ -0,0 +1,30 @@
+{
+ "bins": 768,
+ "unstable_bins": 7,
+ "reduction_bins": 705,
+ "band": {
+ "1": {
+ "sr": 6000,
+ "hl": 66,
+ "n_fft": 512,
+ "crop_start": 0,
+ "crop_stop": 240,
+ "lpf_start": 60,
+ "lpf_stop": 118,
+ "res_type": "sinc_fastest"
+ },
+ "2": {
+ "sr": 32000,
+ "hl": 352,
+ "n_fft": 1024,
+ "crop_start": 22,
+ "crop_stop": 505,
+ "hpf_start": 44,
+ "hpf_stop": 23,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 32000,
+ "pre_filter_start": 710,
+ "pre_filter_stop": 731
+}
diff --git a/lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json b/lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json
new file mode 100644
index 000000000..7faa216d7
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json
@@ -0,0 +1,30 @@
+{
+ "bins": 512,
+ "unstable_bins": 7,
+ "reduction_bins": 510,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 160,
+ "n_fft": 768,
+ "crop_start": 0,
+ "crop_stop": 192,
+ "lpf_start": 41,
+ "lpf_stop": 139,
+ "res_type": "sinc_fastest"
+ },
+ "2": {
+ "sr": 44100,
+ "hl": 640,
+ "n_fft": 1024,
+ "crop_start": 10,
+ "crop_stop": 320,
+ "hpf_start": 47,
+ "hpf_stop": 15,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 510,
+ "pre_filter_stop": 512
+}
diff --git a/lib/uvr5_pack/lib_v5/modelparams/2band_48000.json b/lib/uvr5_pack/lib_v5/modelparams/2band_48000.json
new file mode 100644
index 000000000..7e7817505
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/2band_48000.json
@@ -0,0 +1,30 @@
+{
+ "bins": 768,
+ "unstable_bins": 7,
+ "reduction_bins": 705,
+ "band": {
+ "1": {
+ "sr": 6000,
+ "hl": 66,
+ "n_fft": 512,
+ "crop_start": 0,
+ "crop_stop": 240,
+ "lpf_start": 60,
+ "lpf_stop": 240,
+ "res_type": "sinc_fastest"
+ },
+ "2": {
+ "sr": 48000,
+ "hl": 528,
+ "n_fft": 1536,
+ "crop_start": 22,
+ "crop_stop": 505,
+ "hpf_start": 82,
+ "hpf_stop": 22,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 48000,
+ "pre_filter_start": 710,
+ "pre_filter_stop": 731
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/3band_44100.json b/lib/uvr5_pack/lib_v5/modelparams/3band_44100.json
new file mode 100644
index 000000000..d881d767f
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/3band_44100.json
@@ -0,0 +1,42 @@
+{
+ "bins": 768,
+ "unstable_bins": 5,
+ "reduction_bins": 733,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 768,
+ "crop_start": 0,
+ "crop_stop": 278,
+ "lpf_start": 28,
+ "lpf_stop": 140,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 768,
+ "crop_start": 14,
+ "crop_stop": 322,
+ "hpf_start": 70,
+ "hpf_stop": 14,
+ "lpf_start": 283,
+ "lpf_stop": 314,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 131,
+ "crop_stop": 313,
+ "hpf_start": 154,
+ "hpf_stop": 141,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 757,
+ "pre_filter_stop": 768
+}
diff --git a/lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json b/lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json
new file mode 100644
index 000000000..77ec19857
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json
@@ -0,0 +1,43 @@
+{
+ "mid_side": true,
+ "bins": 768,
+ "unstable_bins": 5,
+ "reduction_bins": 733,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 768,
+ "crop_start": 0,
+ "crop_stop": 278,
+ "lpf_start": 28,
+ "lpf_stop": 140,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 768,
+ "crop_start": 14,
+ "crop_stop": 322,
+ "hpf_start": 70,
+ "hpf_stop": 14,
+ "lpf_start": 283,
+ "lpf_stop": 314,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 131,
+ "crop_stop": 313,
+ "hpf_start": 154,
+ "hpf_stop": 141,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 757,
+ "pre_filter_stop": 768
+}
diff --git a/lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json b/lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json
new file mode 100644
index 000000000..85ee8a7d4
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json
@@ -0,0 +1,43 @@
+{
+ "mid_side_b2": true,
+ "bins": 640,
+ "unstable_bins": 7,
+ "reduction_bins": 565,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 108,
+ "n_fft": 1024,
+ "crop_start": 0,
+ "crop_stop": 187,
+ "lpf_start": 92,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 22050,
+ "hl": 216,
+ "n_fft": 768,
+ "crop_start": 0,
+ "crop_stop": 212,
+ "hpf_start": 68,
+ "hpf_stop": 34,
+ "lpf_start": 174,
+ "lpf_stop": 209,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 44100,
+ "hl": 432,
+ "n_fft": 640,
+ "crop_start": 66,
+ "crop_stop": 307,
+ "hpf_start": 86,
+ "hpf_stop": 72,
+ "res_type": "kaiser_fast"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 639,
+ "pre_filter_stop": 640
+}
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100.json
new file mode 100644
index 000000000..df1237542
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_44100.json
@@ -0,0 +1,54 @@
+{
+ "bins": 768,
+ "unstable_bins": 7,
+ "reduction_bins": 668,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 1024,
+ "crop_start": 0,
+ "crop_stop": 186,
+ "lpf_start": 37,
+ "lpf_stop": 73,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 512,
+ "crop_start": 4,
+ "crop_stop": 185,
+ "hpf_start": 36,
+ "hpf_stop": 18,
+ "lpf_start": 93,
+ "lpf_stop": 185,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 512,
+ "crop_start": 46,
+ "crop_stop": 186,
+ "hpf_start": 93,
+ "hpf_stop": 46,
+ "lpf_start": 164,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 121,
+ "crop_stop": 382,
+ "hpf_start": 138,
+ "hpf_stop": 123,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 740,
+ "pre_filter_stop": 768
+}
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json
new file mode 100644
index 000000000..e91b699eb
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json
@@ -0,0 +1,55 @@
+{
+ "bins": 768,
+ "unstable_bins": 7,
+ "mid_side": true,
+ "reduction_bins": 668,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 1024,
+ "crop_start": 0,
+ "crop_stop": 186,
+ "lpf_start": 37,
+ "lpf_stop": 73,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 512,
+ "crop_start": 4,
+ "crop_stop": 185,
+ "hpf_start": 36,
+ "hpf_stop": 18,
+ "lpf_start": 93,
+ "lpf_stop": 185,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 512,
+ "crop_start": 46,
+ "crop_stop": 186,
+ "hpf_start": 93,
+ "hpf_stop": 46,
+ "lpf_start": 164,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 121,
+ "crop_stop": 382,
+ "hpf_start": 138,
+ "hpf_stop": 123,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 740,
+ "pre_filter_stop": 768
+}
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json
new file mode 100644
index 000000000..f852f280e
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json
@@ -0,0 +1,55 @@
+{
+ "mid_side_b": true,
+ "bins": 768,
+ "unstable_bins": 7,
+ "reduction_bins": 668,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 1024,
+ "crop_start": 0,
+ "crop_stop": 186,
+ "lpf_start": 37,
+ "lpf_stop": 73,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 512,
+ "crop_start": 4,
+ "crop_stop": 185,
+ "hpf_start": 36,
+ "hpf_stop": 18,
+ "lpf_start": 93,
+ "lpf_stop": 185,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 512,
+ "crop_start": 46,
+ "crop_stop": 186,
+ "hpf_start": 93,
+ "hpf_stop": 46,
+ "lpf_start": 164,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 121,
+ "crop_stop": 382,
+ "hpf_start": 138,
+ "hpf_stop": 123,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 740,
+ "pre_filter_stop": 768
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json
new file mode 100644
index 000000000..f852f280e
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json
@@ -0,0 +1,55 @@
+{
+ "mid_side_b": true,
+ "bins": 768,
+ "unstable_bins": 7,
+ "reduction_bins": 668,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 1024,
+ "crop_start": 0,
+ "crop_stop": 186,
+ "lpf_start": 37,
+ "lpf_stop": 73,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 512,
+ "crop_start": 4,
+ "crop_stop": 185,
+ "hpf_start": 36,
+ "hpf_stop": 18,
+ "lpf_start": 93,
+ "lpf_stop": 185,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 512,
+ "crop_start": 46,
+ "crop_stop": 186,
+ "hpf_start": 93,
+ "hpf_stop": 46,
+ "lpf_start": 164,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 121,
+ "crop_stop": 382,
+ "hpf_start": 138,
+ "hpf_stop": 123,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 740,
+ "pre_filter_stop": 768
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json
new file mode 100644
index 000000000..7a07d5541
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json
@@ -0,0 +1,55 @@
+{
+ "reverse": true,
+ "bins": 768,
+ "unstable_bins": 7,
+ "reduction_bins": 668,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 1024,
+ "crop_start": 0,
+ "crop_stop": 186,
+ "lpf_start": 37,
+ "lpf_stop": 73,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 512,
+ "crop_start": 4,
+ "crop_stop": 185,
+ "hpf_start": 36,
+ "hpf_stop": 18,
+ "lpf_start": 93,
+ "lpf_stop": 185,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 512,
+ "crop_start": 46,
+ "crop_stop": 186,
+ "hpf_start": 93,
+ "hpf_stop": 46,
+ "lpf_start": 164,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 121,
+ "crop_stop": 382,
+ "hpf_start": 138,
+ "hpf_stop": 123,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 740,
+ "pre_filter_stop": 768
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json
new file mode 100644
index 000000000..ba0cf3421
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json
@@ -0,0 +1,55 @@
+{
+ "stereo_w": true,
+ "bins": 768,
+ "unstable_bins": 7,
+ "reduction_bins": 668,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 1024,
+ "crop_start": 0,
+ "crop_stop": 186,
+ "lpf_start": 37,
+ "lpf_stop": 73,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 11025,
+ "hl": 128,
+ "n_fft": 512,
+ "crop_start": 4,
+ "crop_stop": 185,
+ "hpf_start": 36,
+ "hpf_stop": 18,
+ "lpf_start": 93,
+ "lpf_stop": 185,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 22050,
+ "hl": 256,
+ "n_fft": 512,
+ "crop_start": 46,
+ "crop_stop": 186,
+ "hpf_start": 93,
+ "hpf_stop": 46,
+ "lpf_start": 164,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 512,
+ "n_fft": 768,
+ "crop_start": 121,
+ "crop_stop": 382,
+ "hpf_start": 138,
+ "hpf_stop": 123,
+ "res_type": "sinc_medium"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 740,
+ "pre_filter_stop": 768
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json b/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json
new file mode 100644
index 000000000..33281a0cf
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json
@@ -0,0 +1,54 @@
+{
+ "bins": 672,
+ "unstable_bins": 8,
+ "reduction_bins": 637,
+ "band": {
+ "1": {
+ "sr": 7350,
+ "hl": 80,
+ "n_fft": 640,
+ "crop_start": 0,
+ "crop_stop": 85,
+ "lpf_start": 25,
+ "lpf_stop": 53,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 7350,
+ "hl": 80,
+ "n_fft": 320,
+ "crop_start": 4,
+ "crop_stop": 87,
+ "hpf_start": 25,
+ "hpf_stop": 12,
+ "lpf_start": 31,
+ "lpf_stop": 62,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 14700,
+ "hl": 160,
+ "n_fft": 512,
+ "crop_start": 17,
+ "crop_stop": 216,
+ "hpf_start": 48,
+ "hpf_stop": 24,
+ "lpf_start": 139,
+ "lpf_stop": 210,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 480,
+ "n_fft": 960,
+ "crop_start": 78,
+ "crop_stop": 383,
+ "hpf_start": 130,
+ "hpf_stop": 86,
+ "res_type": "kaiser_fast"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 668,
+ "pre_filter_stop": 672
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json b/lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json
new file mode 100644
index 000000000..2e5c770fe
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json
@@ -0,0 +1,55 @@
+{
+ "bins": 672,
+ "unstable_bins": 8,
+ "reduction_bins": 637,
+ "band": {
+ "1": {
+ "sr": 7350,
+ "hl": 80,
+ "n_fft": 640,
+ "crop_start": 0,
+ "crop_stop": 85,
+ "lpf_start": 25,
+ "lpf_stop": 53,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 7350,
+ "hl": 80,
+ "n_fft": 320,
+ "crop_start": 4,
+ "crop_stop": 87,
+ "hpf_start": 25,
+ "hpf_stop": 12,
+ "lpf_start": 31,
+ "lpf_stop": 62,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 14700,
+ "hl": 160,
+ "n_fft": 512,
+ "crop_start": 17,
+ "crop_stop": 216,
+ "hpf_start": 48,
+ "hpf_stop": 24,
+ "lpf_start": 139,
+ "lpf_stop": 210,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 480,
+ "n_fft": 960,
+ "crop_start": 78,
+ "crop_stop": 383,
+ "hpf_start": 130,
+ "hpf_stop": 86,
+ "convert_channels": "stereo_n",
+ "res_type": "kaiser_fast"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 668,
+ "pre_filter_stop": 672
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json b/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json
new file mode 100644
index 000000000..edb908b88
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json
@@ -0,0 +1,54 @@
+{
+ "bins": 672,
+ "unstable_bins": 8,
+ "reduction_bins": 530,
+ "band": {
+ "1": {
+ "sr": 7350,
+ "hl": 80,
+ "n_fft": 640,
+ "crop_start": 0,
+ "crop_stop": 85,
+ "lpf_start": 25,
+ "lpf_stop": 53,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 7350,
+ "hl": 80,
+ "n_fft": 320,
+ "crop_start": 4,
+ "crop_stop": 87,
+ "hpf_start": 25,
+ "hpf_stop": 12,
+ "lpf_start": 31,
+ "lpf_stop": 62,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 14700,
+ "hl": 160,
+ "n_fft": 512,
+ "crop_start": 17,
+ "crop_stop": 216,
+ "hpf_start": 48,
+ "hpf_stop": 24,
+ "lpf_start": 139,
+ "lpf_stop": 210,
+ "res_type": "polyphase"
+ },
+ "4": {
+ "sr": 44100,
+ "hl": 480,
+ "n_fft": 960,
+ "crop_start": 78,
+ "crop_stop": 383,
+ "hpf_start": 130,
+ "hpf_stop": 86,
+ "res_type": "kaiser_fast"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 668,
+ "pre_filter_stop": 672
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/modelparams/ensemble.json b/lib/uvr5_pack/lib_v5/modelparams/ensemble.json
new file mode 100644
index 000000000..ee69beb46
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/modelparams/ensemble.json
@@ -0,0 +1,43 @@
+{
+ "mid_side_b2": true,
+ "bins": 1280,
+ "unstable_bins": 7,
+ "reduction_bins": 565,
+ "band": {
+ "1": {
+ "sr": 11025,
+ "hl": 108,
+ "n_fft": 2048,
+ "crop_start": 0,
+ "crop_stop": 374,
+ "lpf_start": 92,
+ "lpf_stop": 186,
+ "res_type": "polyphase"
+ },
+ "2": {
+ "sr": 22050,
+ "hl": 216,
+ "n_fft": 1536,
+ "crop_start": 0,
+ "crop_stop": 424,
+ "hpf_start": 68,
+ "hpf_stop": 34,
+ "lpf_start": 348,
+ "lpf_stop": 418,
+ "res_type": "polyphase"
+ },
+ "3": {
+ "sr": 44100,
+ "hl": 432,
+ "n_fft": 1280,
+ "crop_start": 132,
+ "crop_stop": 614,
+ "hpf_start": 172,
+ "hpf_stop": 144,
+ "res_type": "polyphase"
+ }
+ },
+ "sr": 44100,
+ "pre_filter_start": 1280,
+ "pre_filter_stop": 1280
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/lib_v5/nets.py b/lib/uvr5_pack/lib_v5/nets.py
new file mode 100644
index 000000000..db4c5e339
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets.py
@@ -0,0 +1,123 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+import layers
+from . import spec_utils
+
+
+class BaseASPPNet(nn.Module):
+ def __init__(self, nin, ch, dilations=(4, 8, 16)):
+ super(BaseASPPNet, self).__init__()
+ self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
+ self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
+ self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
+ self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
+
+ self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
+
+ self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
+ self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
+ self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
+ self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
+
+ def __call__(self, x):
+ h, e1 = self.enc1(x)
+ h, e2 = self.enc2(h)
+ h, e3 = self.enc3(h)
+ h, e4 = self.enc4(h)
+
+ h = self.aspp(h)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedASPPNet(nn.Module):
+ def __init__(self, n_fft):
+ super(CascadedASPPNet, self).__init__()
+ self.stg1_low_band_net = BaseASPPNet(2, 16)
+ self.stg1_high_band_net = BaseASPPNet(2, 16)
+
+ self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0)
+ self.stg2_full_band_net = BaseASPPNet(8, 16)
+
+ self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
+ self.stg3_full_band_net = BaseASPPNet(16, 32)
+
+ self.out = nn.Conv2d(32, 2, 1, bias=False)
+ self.aux1_out = nn.Conv2d(16, 2, 1, bias=False)
+ self.aux2_out = nn.Conv2d(16, 2, 1, bias=False)
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+
+ self.offset = 128
+
+ def forward(self, x, aggressiveness=None):
+ mix = x.detach()
+ x = x.clone()
+
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ aux1 = torch.cat(
+ [
+ self.stg1_low_band_net(x[:, :, :bandw]),
+ self.stg1_high_band_net(x[:, :, bandw:]),
+ ],
+ dim=2,
+ )
+
+ h = torch.cat([x, aux1], dim=1)
+ aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
+
+ h = torch.cat([x, aux1, aux2], dim=1)
+ h = self.stg3_full_band_net(self.stg3_bridge(h))
+
+ mask = torch.sigmoid(self.out(h))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux1 = torch.sigmoid(self.aux1_out(aux1))
+ aux1 = F.pad(
+ input=aux1,
+ pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
+ mode="replicate",
+ )
+ aux2 = torch.sigmoid(self.aux2_out(aux2))
+ aux2 = F.pad(
+ input=aux2,
+ pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
+ mode="replicate",
+ )
+ return mask * mix, aux1 * mix, aux2 * mix
+ else:
+ if aggressiveness:
+ mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
+ mask[:, :, : aggressiveness["split_bin"]],
+ 1 + aggressiveness["value"] / 3,
+ )
+ mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
+ mask[:, :, aggressiveness["split_bin"] :],
+ 1 + aggressiveness["value"],
+ )
+
+ return mask * mix
+
+ def predict(self, x_mag, aggressiveness=None):
+ h = self.forward(x_mag, aggressiveness)
+
+ if self.offset > 0:
+ h = h[:, :, :, self.offset : -self.offset]
+ assert h.size()[3] > 0
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/nets_123812KB.py b/lib/uvr5_pack/lib_v5/nets_123812KB.py
new file mode 100644
index 000000000..becbfae85
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets_123812KB.py
@@ -0,0 +1,122 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import layers_123821KB as layers
+
+
+class BaseASPPNet(nn.Module):
+ def __init__(self, nin, ch, dilations=(4, 8, 16)):
+ super(BaseASPPNet, self).__init__()
+ self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
+ self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
+ self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
+ self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
+
+ self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
+
+ self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
+ self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
+ self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
+ self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
+
+ def __call__(self, x):
+ h, e1 = self.enc1(x)
+ h, e2 = self.enc2(h)
+ h, e3 = self.enc3(h)
+ h, e4 = self.enc4(h)
+
+ h = self.aspp(h)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedASPPNet(nn.Module):
+ def __init__(self, n_fft):
+ super(CascadedASPPNet, self).__init__()
+ self.stg1_low_band_net = BaseASPPNet(2, 32)
+ self.stg1_high_band_net = BaseASPPNet(2, 32)
+
+ self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
+ self.stg2_full_band_net = BaseASPPNet(16, 32)
+
+ self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
+ self.stg3_full_band_net = BaseASPPNet(32, 64)
+
+ self.out = nn.Conv2d(64, 2, 1, bias=False)
+ self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
+ self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+
+ self.offset = 128
+
+ def forward(self, x, aggressiveness=None):
+ mix = x.detach()
+ x = x.clone()
+
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ aux1 = torch.cat(
+ [
+ self.stg1_low_band_net(x[:, :, :bandw]),
+ self.stg1_high_band_net(x[:, :, bandw:]),
+ ],
+ dim=2,
+ )
+
+ h = torch.cat([x, aux1], dim=1)
+ aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
+
+ h = torch.cat([x, aux1, aux2], dim=1)
+ h = self.stg3_full_band_net(self.stg3_bridge(h))
+
+ mask = torch.sigmoid(self.out(h))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux1 = torch.sigmoid(self.aux1_out(aux1))
+ aux1 = F.pad(
+ input=aux1,
+ pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
+ mode="replicate",
+ )
+ aux2 = torch.sigmoid(self.aux2_out(aux2))
+ aux2 = F.pad(
+ input=aux2,
+ pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
+ mode="replicate",
+ )
+ return mask * mix, aux1 * mix, aux2 * mix
+ else:
+ if aggressiveness:
+ mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
+ mask[:, :, : aggressiveness["split_bin"]],
+ 1 + aggressiveness["value"] / 3,
+ )
+ mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
+ mask[:, :, aggressiveness["split_bin"] :],
+ 1 + aggressiveness["value"],
+ )
+
+ return mask * mix
+
+ def predict(self, x_mag, aggressiveness=None):
+ h = self.forward(x_mag, aggressiveness)
+
+ if self.offset > 0:
+ h = h[:, :, :, self.offset : -self.offset]
+ assert h.size()[3] > 0
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/nets_123821KB.py b/lib/uvr5_pack/lib_v5/nets_123821KB.py
new file mode 100644
index 000000000..becbfae85
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets_123821KB.py
@@ -0,0 +1,122 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import layers_123821KB as layers
+
+
+class BaseASPPNet(nn.Module):
+ def __init__(self, nin, ch, dilations=(4, 8, 16)):
+ super(BaseASPPNet, self).__init__()
+ self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
+ self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
+ self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
+ self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
+
+ self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
+
+ self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
+ self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
+ self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
+ self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
+
+ def __call__(self, x):
+ h, e1 = self.enc1(x)
+ h, e2 = self.enc2(h)
+ h, e3 = self.enc3(h)
+ h, e4 = self.enc4(h)
+
+ h = self.aspp(h)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedASPPNet(nn.Module):
+ def __init__(self, n_fft):
+ super(CascadedASPPNet, self).__init__()
+ self.stg1_low_band_net = BaseASPPNet(2, 32)
+ self.stg1_high_band_net = BaseASPPNet(2, 32)
+
+ self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
+ self.stg2_full_band_net = BaseASPPNet(16, 32)
+
+ self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
+ self.stg3_full_band_net = BaseASPPNet(32, 64)
+
+ self.out = nn.Conv2d(64, 2, 1, bias=False)
+ self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
+ self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+
+ self.offset = 128
+
+ def forward(self, x, aggressiveness=None):
+ mix = x.detach()
+ x = x.clone()
+
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ aux1 = torch.cat(
+ [
+ self.stg1_low_band_net(x[:, :, :bandw]),
+ self.stg1_high_band_net(x[:, :, bandw:]),
+ ],
+ dim=2,
+ )
+
+ h = torch.cat([x, aux1], dim=1)
+ aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
+
+ h = torch.cat([x, aux1, aux2], dim=1)
+ h = self.stg3_full_band_net(self.stg3_bridge(h))
+
+ mask = torch.sigmoid(self.out(h))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux1 = torch.sigmoid(self.aux1_out(aux1))
+ aux1 = F.pad(
+ input=aux1,
+ pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
+ mode="replicate",
+ )
+ aux2 = torch.sigmoid(self.aux2_out(aux2))
+ aux2 = F.pad(
+ input=aux2,
+ pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
+ mode="replicate",
+ )
+ return mask * mix, aux1 * mix, aux2 * mix
+ else:
+ if aggressiveness:
+ mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
+ mask[:, :, : aggressiveness["split_bin"]],
+ 1 + aggressiveness["value"] / 3,
+ )
+ mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
+ mask[:, :, aggressiveness["split_bin"] :],
+ 1 + aggressiveness["value"],
+ )
+
+ return mask * mix
+
+ def predict(self, x_mag, aggressiveness=None):
+ h = self.forward(x_mag, aggressiveness)
+
+ if self.offset > 0:
+ h = h[:, :, :, self.offset : -self.offset]
+ assert h.size()[3] > 0
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/nets_33966KB.py b/lib/uvr5_pack/lib_v5/nets_33966KB.py
new file mode 100644
index 000000000..b8986f968
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets_33966KB.py
@@ -0,0 +1,122 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import layers_33966KB as layers
+
+
+class BaseASPPNet(nn.Module):
+ def __init__(self, nin, ch, dilations=(4, 8, 16, 32)):
+ super(BaseASPPNet, self).__init__()
+ self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
+ self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
+ self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
+ self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
+
+ self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
+
+ self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
+ self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
+ self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
+ self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
+
+ def __call__(self, x):
+ h, e1 = self.enc1(x)
+ h, e2 = self.enc2(h)
+ h, e3 = self.enc3(h)
+ h, e4 = self.enc4(h)
+
+ h = self.aspp(h)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedASPPNet(nn.Module):
+ def __init__(self, n_fft):
+ super(CascadedASPPNet, self).__init__()
+ self.stg1_low_band_net = BaseASPPNet(2, 16)
+ self.stg1_high_band_net = BaseASPPNet(2, 16)
+
+ self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0)
+ self.stg2_full_band_net = BaseASPPNet(8, 16)
+
+ self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
+ self.stg3_full_band_net = BaseASPPNet(16, 32)
+
+ self.out = nn.Conv2d(32, 2, 1, bias=False)
+ self.aux1_out = nn.Conv2d(16, 2, 1, bias=False)
+ self.aux2_out = nn.Conv2d(16, 2, 1, bias=False)
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+
+ self.offset = 128
+
+ def forward(self, x, aggressiveness=None):
+ mix = x.detach()
+ x = x.clone()
+
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ aux1 = torch.cat(
+ [
+ self.stg1_low_band_net(x[:, :, :bandw]),
+ self.stg1_high_band_net(x[:, :, bandw:]),
+ ],
+ dim=2,
+ )
+
+ h = torch.cat([x, aux1], dim=1)
+ aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
+
+ h = torch.cat([x, aux1, aux2], dim=1)
+ h = self.stg3_full_band_net(self.stg3_bridge(h))
+
+ mask = torch.sigmoid(self.out(h))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux1 = torch.sigmoid(self.aux1_out(aux1))
+ aux1 = F.pad(
+ input=aux1,
+ pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
+ mode="replicate",
+ )
+ aux2 = torch.sigmoid(self.aux2_out(aux2))
+ aux2 = F.pad(
+ input=aux2,
+ pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
+ mode="replicate",
+ )
+ return mask * mix, aux1 * mix, aux2 * mix
+ else:
+ if aggressiveness:
+ mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
+ mask[:, :, : aggressiveness["split_bin"]],
+ 1 + aggressiveness["value"] / 3,
+ )
+ mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
+ mask[:, :, aggressiveness["split_bin"] :],
+ 1 + aggressiveness["value"],
+ )
+
+ return mask * mix
+
+ def predict(self, x_mag, aggressiveness=None):
+ h = self.forward(x_mag, aggressiveness)
+
+ if self.offset > 0:
+ h = h[:, :, :, self.offset : -self.offset]
+ assert h.size()[3] > 0
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/nets_537227KB.py b/lib/uvr5_pack/lib_v5/nets_537227KB.py
new file mode 100644
index 000000000..a1bb530e0
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets_537227KB.py
@@ -0,0 +1,123 @@
+import torch
+import numpy as np
+from torch import nn
+import torch.nn.functional as F
+
+from . import layers_537238KB as layers
+
+
+class BaseASPPNet(nn.Module):
+ def __init__(self, nin, ch, dilations=(4, 8, 16)):
+ super(BaseASPPNet, self).__init__()
+ self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
+ self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
+ self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
+ self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
+
+ self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
+
+ self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
+ self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
+ self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
+ self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
+
+ def __call__(self, x):
+ h, e1 = self.enc1(x)
+ h, e2 = self.enc2(h)
+ h, e3 = self.enc3(h)
+ h, e4 = self.enc4(h)
+
+ h = self.aspp(h)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedASPPNet(nn.Module):
+ def __init__(self, n_fft):
+ super(CascadedASPPNet, self).__init__()
+ self.stg1_low_band_net = BaseASPPNet(2, 64)
+ self.stg1_high_band_net = BaseASPPNet(2, 64)
+
+ self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
+ self.stg2_full_band_net = BaseASPPNet(32, 64)
+
+ self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0)
+ self.stg3_full_band_net = BaseASPPNet(64, 128)
+
+ self.out = nn.Conv2d(128, 2, 1, bias=False)
+ self.aux1_out = nn.Conv2d(64, 2, 1, bias=False)
+ self.aux2_out = nn.Conv2d(64, 2, 1, bias=False)
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+
+ self.offset = 128
+
+ def forward(self, x, aggressiveness=None):
+ mix = x.detach()
+ x = x.clone()
+
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ aux1 = torch.cat(
+ [
+ self.stg1_low_band_net(x[:, :, :bandw]),
+ self.stg1_high_band_net(x[:, :, bandw:]),
+ ],
+ dim=2,
+ )
+
+ h = torch.cat([x, aux1], dim=1)
+ aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
+
+ h = torch.cat([x, aux1, aux2], dim=1)
+ h = self.stg3_full_band_net(self.stg3_bridge(h))
+
+ mask = torch.sigmoid(self.out(h))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux1 = torch.sigmoid(self.aux1_out(aux1))
+ aux1 = F.pad(
+ input=aux1,
+ pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
+ mode="replicate",
+ )
+ aux2 = torch.sigmoid(self.aux2_out(aux2))
+ aux2 = F.pad(
+ input=aux2,
+ pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
+ mode="replicate",
+ )
+ return mask * mix, aux1 * mix, aux2 * mix
+ else:
+ if aggressiveness:
+ mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
+ mask[:, :, : aggressiveness["split_bin"]],
+ 1 + aggressiveness["value"] / 3,
+ )
+ mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
+ mask[:, :, aggressiveness["split_bin"] :],
+ 1 + aggressiveness["value"],
+ )
+
+ return mask * mix
+
+ def predict(self, x_mag, aggressiveness=None):
+ h = self.forward(x_mag, aggressiveness)
+
+ if self.offset > 0:
+ h = h[:, :, :, self.offset : -self.offset]
+ assert h.size()[3] > 0
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/nets_537238KB.py b/lib/uvr5_pack/lib_v5/nets_537238KB.py
new file mode 100644
index 000000000..a1bb530e0
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets_537238KB.py
@@ -0,0 +1,123 @@
+import torch
+import numpy as np
+from torch import nn
+import torch.nn.functional as F
+
+from . import layers_537238KB as layers
+
+
+class BaseASPPNet(nn.Module):
+ def __init__(self, nin, ch, dilations=(4, 8, 16)):
+ super(BaseASPPNet, self).__init__()
+ self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
+ self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
+ self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
+ self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
+
+ self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
+
+ self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
+ self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
+ self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
+ self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
+
+ def __call__(self, x):
+ h, e1 = self.enc1(x)
+ h, e2 = self.enc2(h)
+ h, e3 = self.enc3(h)
+ h, e4 = self.enc4(h)
+
+ h = self.aspp(h)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedASPPNet(nn.Module):
+ def __init__(self, n_fft):
+ super(CascadedASPPNet, self).__init__()
+ self.stg1_low_band_net = BaseASPPNet(2, 64)
+ self.stg1_high_band_net = BaseASPPNet(2, 64)
+
+ self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
+ self.stg2_full_band_net = BaseASPPNet(32, 64)
+
+ self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0)
+ self.stg3_full_band_net = BaseASPPNet(64, 128)
+
+ self.out = nn.Conv2d(128, 2, 1, bias=False)
+ self.aux1_out = nn.Conv2d(64, 2, 1, bias=False)
+ self.aux2_out = nn.Conv2d(64, 2, 1, bias=False)
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+
+ self.offset = 128
+
+ def forward(self, x, aggressiveness=None):
+ mix = x.detach()
+ x = x.clone()
+
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ aux1 = torch.cat(
+ [
+ self.stg1_low_band_net(x[:, :, :bandw]),
+ self.stg1_high_band_net(x[:, :, bandw:]),
+ ],
+ dim=2,
+ )
+
+ h = torch.cat([x, aux1], dim=1)
+ aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
+
+ h = torch.cat([x, aux1, aux2], dim=1)
+ h = self.stg3_full_band_net(self.stg3_bridge(h))
+
+ mask = torch.sigmoid(self.out(h))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux1 = torch.sigmoid(self.aux1_out(aux1))
+ aux1 = F.pad(
+ input=aux1,
+ pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
+ mode="replicate",
+ )
+ aux2 = torch.sigmoid(self.aux2_out(aux2))
+ aux2 = F.pad(
+ input=aux2,
+ pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
+ mode="replicate",
+ )
+ return mask * mix, aux1 * mix, aux2 * mix
+ else:
+ if aggressiveness:
+ mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
+ mask[:, :, : aggressiveness["split_bin"]],
+ 1 + aggressiveness["value"] / 3,
+ )
+ mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
+ mask[:, :, aggressiveness["split_bin"] :],
+ 1 + aggressiveness["value"],
+ )
+
+ return mask * mix
+
+ def predict(self, x_mag, aggressiveness=None):
+ h = self.forward(x_mag, aggressiveness)
+
+ if self.offset > 0:
+ h = h[:, :, :, self.offset : -self.offset]
+ assert h.size()[3] > 0
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/nets_61968KB.py b/lib/uvr5_pack/lib_v5/nets_61968KB.py
new file mode 100644
index 000000000..becbfae85
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets_61968KB.py
@@ -0,0 +1,122 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+
+from . import layers_123821KB as layers
+
+
+class BaseASPPNet(nn.Module):
+ def __init__(self, nin, ch, dilations=(4, 8, 16)):
+ super(BaseASPPNet, self).__init__()
+ self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
+ self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
+ self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
+ self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
+
+ self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
+
+ self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
+ self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
+ self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
+ self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
+
+ def __call__(self, x):
+ h, e1 = self.enc1(x)
+ h, e2 = self.enc2(h)
+ h, e3 = self.enc3(h)
+ h, e4 = self.enc4(h)
+
+ h = self.aspp(h)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedASPPNet(nn.Module):
+ def __init__(self, n_fft):
+ super(CascadedASPPNet, self).__init__()
+ self.stg1_low_band_net = BaseASPPNet(2, 32)
+ self.stg1_high_band_net = BaseASPPNet(2, 32)
+
+ self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
+ self.stg2_full_band_net = BaseASPPNet(16, 32)
+
+ self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
+ self.stg3_full_band_net = BaseASPPNet(32, 64)
+
+ self.out = nn.Conv2d(64, 2, 1, bias=False)
+ self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
+ self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+
+ self.offset = 128
+
+ def forward(self, x, aggressiveness=None):
+ mix = x.detach()
+ x = x.clone()
+
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ aux1 = torch.cat(
+ [
+ self.stg1_low_band_net(x[:, :, :bandw]),
+ self.stg1_high_band_net(x[:, :, bandw:]),
+ ],
+ dim=2,
+ )
+
+ h = torch.cat([x, aux1], dim=1)
+ aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
+
+ h = torch.cat([x, aux1, aux2], dim=1)
+ h = self.stg3_full_band_net(self.stg3_bridge(h))
+
+ mask = torch.sigmoid(self.out(h))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux1 = torch.sigmoid(self.aux1_out(aux1))
+ aux1 = F.pad(
+ input=aux1,
+ pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
+ mode="replicate",
+ )
+ aux2 = torch.sigmoid(self.aux2_out(aux2))
+ aux2 = F.pad(
+ input=aux2,
+ pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
+ mode="replicate",
+ )
+ return mask * mix, aux1 * mix, aux2 * mix
+ else:
+ if aggressiveness:
+ mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
+ mask[:, :, : aggressiveness["split_bin"]],
+ 1 + aggressiveness["value"] / 3,
+ )
+ mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
+ mask[:, :, aggressiveness["split_bin"] :],
+ 1 + aggressiveness["value"],
+ )
+
+ return mask * mix
+
+ def predict(self, x_mag, aggressiveness=None):
+ h = self.forward(x_mag, aggressiveness)
+
+ if self.offset > 0:
+ h = h[:, :, :, self.offset : -self.offset]
+ assert h.size()[3] > 0
+
+ return h
diff --git a/lib/uvr5_pack/lib_v5/nets_new.py b/lib/uvr5_pack/lib_v5/nets_new.py
new file mode 100644
index 000000000..bfaf72e48
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/nets_new.py
@@ -0,0 +1,132 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+from . import layers_new
+
+
+class BaseNet(nn.Module):
+ def __init__(
+ self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))
+ ):
+ super(BaseNet, self).__init__()
+ self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1)
+ self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1)
+ self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1)
+ self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1)
+ self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1)
+
+ self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True)
+
+ self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1)
+ self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1)
+ self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1)
+ self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm)
+ self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1)
+
+ def __call__(self, x):
+ e1 = self.enc1(x)
+ e2 = self.enc2(e1)
+ e3 = self.enc3(e2)
+ e4 = self.enc4(e3)
+ e5 = self.enc5(e4)
+
+ h = self.aspp(e5)
+
+ h = self.dec4(h, e4)
+ h = self.dec3(h, e3)
+ h = self.dec2(h, e2)
+ h = torch.cat([h, self.lstm_dec2(h)], dim=1)
+ h = self.dec1(h, e1)
+
+ return h
+
+
+class CascadedNet(nn.Module):
+ def __init__(self, n_fft, nout=32, nout_lstm=128):
+ super(CascadedNet, self).__init__()
+
+ self.max_bin = n_fft // 2
+ self.output_bin = n_fft // 2 + 1
+ self.nin_lstm = self.max_bin // 2
+ self.offset = 64
+
+ self.stg1_low_band_net = nn.Sequential(
+ BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm),
+ layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0),
+ )
+
+ self.stg1_high_band_net = BaseNet(
+ 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2
+ )
+
+ self.stg2_low_band_net = nn.Sequential(
+ BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm),
+ layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0),
+ )
+ self.stg2_high_band_net = BaseNet(
+ nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2
+ )
+
+ self.stg3_full_band_net = BaseNet(
+ 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm
+ )
+
+ self.out = nn.Conv2d(nout, 2, 1, bias=False)
+ self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False)
+
+ def forward(self, x):
+ x = x[:, :, : self.max_bin]
+
+ bandw = x.size()[2] // 2
+ l1_in = x[:, :, :bandw]
+ h1_in = x[:, :, bandw:]
+ l1 = self.stg1_low_band_net(l1_in)
+ h1 = self.stg1_high_band_net(h1_in)
+ aux1 = torch.cat([l1, h1], dim=2)
+
+ l2_in = torch.cat([l1_in, l1], dim=1)
+ h2_in = torch.cat([h1_in, h1], dim=1)
+ l2 = self.stg2_low_band_net(l2_in)
+ h2 = self.stg2_high_band_net(h2_in)
+ aux2 = torch.cat([l2, h2], dim=2)
+
+ f3_in = torch.cat([x, aux1, aux2], dim=1)
+ f3 = self.stg3_full_band_net(f3_in)
+
+ mask = torch.sigmoid(self.out(f3))
+ mask = F.pad(
+ input=mask,
+ pad=(0, 0, 0, self.output_bin - mask.size()[2]),
+ mode="replicate",
+ )
+
+ if self.training:
+ aux = torch.cat([aux1, aux2], dim=1)
+ aux = torch.sigmoid(self.aux_out(aux))
+ aux = F.pad(
+ input=aux,
+ pad=(0, 0, 0, self.output_bin - aux.size()[2]),
+ mode="replicate",
+ )
+ return mask, aux
+ else:
+ return mask
+
+ def predict_mask(self, x):
+ mask = self.forward(x)
+
+ if self.offset > 0:
+ mask = mask[:, :, :, self.offset : -self.offset]
+ assert mask.size()[3] > 0
+
+ return mask
+
+ def predict(self, x, aggressiveness=None):
+ mask = self.forward(x)
+ pred_mag = x * mask
+
+ if self.offset > 0:
+ pred_mag = pred_mag[:, :, :, self.offset : -self.offset]
+ assert pred_mag.size()[3] > 0
+
+ return pred_mag
diff --git a/lib/uvr5_pack/lib_v5/spec_utils.py b/lib/uvr5_pack/lib_v5/spec_utils.py
new file mode 100644
index 000000000..a3fd46d33
--- /dev/null
+++ b/lib/uvr5_pack/lib_v5/spec_utils.py
@@ -0,0 +1,667 @@
+import os, librosa
+import numpy as np
+import soundfile as sf
+from tqdm import tqdm
+import json, math, hashlib
+
+
+def crop_center(h1, h2):
+ h1_shape = h1.size()
+ h2_shape = h2.size()
+
+ if h1_shape[3] == h2_shape[3]:
+ return h1
+ elif h1_shape[3] < h2_shape[3]:
+ raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
+
+ # s_freq = (h2_shape[2] - h1_shape[2]) // 2
+ # e_freq = s_freq + h1_shape[2]
+ s_time = (h1_shape[3] - h2_shape[3]) // 2
+ e_time = s_time + h2_shape[3]
+ h1 = h1[:, :, :, s_time:e_time]
+
+ return h1
+
+
+def wave_to_spectrogram(
+ wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
+):
+ if reverse:
+ wave_left = np.flip(np.asfortranarray(wave[0]))
+ wave_right = np.flip(np.asfortranarray(wave[1]))
+ elif mid_side:
+ wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
+ wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
+ elif mid_side_b2:
+ wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
+ wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
+ else:
+ wave_left = np.asfortranarray(wave[0])
+ wave_right = np.asfortranarray(wave[1])
+
+ spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
+ spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
+
+ spec = np.asfortranarray([spec_left, spec_right])
+
+ return spec
+
+
+def wave_to_spectrogram_mt(
+ wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
+):
+ import threading
+
+ if reverse:
+ wave_left = np.flip(np.asfortranarray(wave[0]))
+ wave_right = np.flip(np.asfortranarray(wave[1]))
+ elif mid_side:
+ wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
+ wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
+ elif mid_side_b2:
+ wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
+ wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
+ else:
+ wave_left = np.asfortranarray(wave[0])
+ wave_right = np.asfortranarray(wave[1])
+
+ def run_thread(**kwargs):
+ global spec_left
+ spec_left = librosa.stft(**kwargs)
+
+ thread = threading.Thread(
+ target=run_thread,
+ kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
+ )
+ thread.start()
+ spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
+ thread.join()
+
+ spec = np.asfortranarray([spec_left, spec_right])
+
+ return spec
+
+
+def combine_spectrograms(specs, mp):
+ l = min([specs[i].shape[2] for i in specs])
+ spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64)
+ offset = 0
+ bands_n = len(mp.param["band"])
+
+ for d in range(1, bands_n + 1):
+ h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"]
+ spec_c[:, offset : offset + h, :l] = specs[d][
+ :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l
+ ]
+ offset += h
+
+ if offset > mp.param["bins"]:
+ raise ValueError("Too much bins")
+
+ # lowpass fiter
+ if (
+ mp.param["pre_filter_start"] > 0
+ ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']:
+ if bands_n == 1:
+ spec_c = fft_lp_filter(
+ spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]
+ )
+ else:
+ gp = 1
+ for b in range(
+ mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]
+ ):
+ g = math.pow(
+ 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0
+ )
+ gp = g
+ spec_c[:, b, :] *= g
+
+ return np.asfortranarray(spec_c)
+
+
+def spectrogram_to_image(spec, mode="magnitude"):
+ if mode == "magnitude":
+ if np.iscomplexobj(spec):
+ y = np.abs(spec)
+ else:
+ y = spec
+ y = np.log10(y**2 + 1e-8)
+ elif mode == "phase":
+ if np.iscomplexobj(spec):
+ y = np.angle(spec)
+ else:
+ y = spec
+
+ y -= y.min()
+ y *= 255 / y.max()
+ img = np.uint8(y)
+
+ if y.ndim == 3:
+ img = img.transpose(1, 2, 0)
+ img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2)
+
+ return img
+
+
+def reduce_vocal_aggressively(X, y, softmask):
+ v = X - y
+ y_mag_tmp = np.abs(y)
+ v_mag_tmp = np.abs(v)
+
+ v_mask = v_mag_tmp > y_mag_tmp
+ y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf)
+
+ return y_mag * np.exp(1.0j * np.angle(y))
+
+
+def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32):
+ if min_range < fade_size * 2:
+ raise ValueError("min_range must be >= fade_area * 2")
+
+ mag = mag.copy()
+
+ idx = np.where(ref.mean(axis=(0, 1)) < thres)[0]
+ starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
+ ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
+ uninformative = np.where(ends - starts > min_range)[0]
+ if len(uninformative) > 0:
+ starts = starts[uninformative]
+ ends = ends[uninformative]
+ old_e = None
+ for s, e in zip(starts, ends):
+ if old_e is not None and s - old_e < fade_size:
+ s = old_e - fade_size * 2
+
+ if s != 0:
+ weight = np.linspace(0, 1, fade_size)
+ mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size]
+ else:
+ s -= fade_size
+
+ if e != mag.shape[2]:
+ weight = np.linspace(1, 0, fade_size)
+ mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e]
+ else:
+ e += fade_size
+
+ mag[:, :, s + fade_size : e - fade_size] += ref[
+ :, :, s + fade_size : e - fade_size
+ ]
+ old_e = e
+
+ return mag
+
+
+def align_wave_head_and_tail(a, b):
+ l = min([a[0].size, b[0].size])
+
+ return a[:l, :l], b[:l, :l]
+
+
+def cache_or_load(mix_path, inst_path, mp):
+ mix_basename = os.path.splitext(os.path.basename(mix_path))[0]
+ inst_basename = os.path.splitext(os.path.basename(inst_path))[0]
+
+ cache_dir = "mph{}".format(
+ hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()
+ )
+ mix_cache_dir = os.path.join("cache", cache_dir)
+ inst_cache_dir = os.path.join("cache", cache_dir)
+
+ os.makedirs(mix_cache_dir, exist_ok=True)
+ os.makedirs(inst_cache_dir, exist_ok=True)
+
+ mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy")
+ inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy")
+
+ if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path):
+ X_spec_m = np.load(mix_cache_path)
+ y_spec_m = np.load(inst_cache_path)
+ else:
+ X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
+
+ for d in range(len(mp.param["band"]), 0, -1):
+ bp = mp.param["band"][d]
+
+ if d == len(mp.param["band"]): # high-end band
+ X_wave[d], _ = librosa.load(
+ mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
+ )
+ y_wave[d], _ = librosa.load(
+ inst_path,
+ bp["sr"],
+ False,
+ dtype=np.float32,
+ res_type=bp["res_type"],
+ )
+ else: # lower bands
+ X_wave[d] = librosa.resample(
+ X_wave[d + 1],
+ mp.param["band"][d + 1]["sr"],
+ bp["sr"],
+ res_type=bp["res_type"],
+ )
+ y_wave[d] = librosa.resample(
+ y_wave[d + 1],
+ mp.param["band"][d + 1]["sr"],
+ bp["sr"],
+ res_type=bp["res_type"],
+ )
+
+ X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
+
+ X_spec_s[d] = wave_to_spectrogram(
+ X_wave[d],
+ bp["hl"],
+ bp["n_fft"],
+ mp.param["mid_side"],
+ mp.param["mid_side_b2"],
+ mp.param["reverse"],
+ )
+ y_spec_s[d] = wave_to_spectrogram(
+ y_wave[d],
+ bp["hl"],
+ bp["n_fft"],
+ mp.param["mid_side"],
+ mp.param["mid_side_b2"],
+ mp.param["reverse"],
+ )
+
+ del X_wave, y_wave
+
+ X_spec_m = combine_spectrograms(X_spec_s, mp)
+ y_spec_m = combine_spectrograms(y_spec_s, mp)
+
+ if X_spec_m.shape != y_spec_m.shape:
+ raise ValueError("The combined spectrograms are different: " + mix_path)
+
+ _, ext = os.path.splitext(mix_path)
+
+ np.save(mix_cache_path, X_spec_m)
+ np.save(inst_cache_path, y_spec_m)
+
+ return X_spec_m, y_spec_m
+
+
+def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse):
+ spec_left = np.asfortranarray(spec[0])
+ spec_right = np.asfortranarray(spec[1])
+
+ wave_left = librosa.istft(spec_left, hop_length=hop_length)
+ wave_right = librosa.istft(spec_right, hop_length=hop_length)
+
+ if reverse:
+ return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
+ elif mid_side:
+ return np.asfortranarray(
+ [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
+ )
+ elif mid_side_b2:
+ return np.asfortranarray(
+ [
+ np.add(wave_right / 1.25, 0.4 * wave_left),
+ np.subtract(wave_left / 1.25, 0.4 * wave_right),
+ ]
+ )
+ else:
+ return np.asfortranarray([wave_left, wave_right])
+
+
+def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2):
+ import threading
+
+ spec_left = np.asfortranarray(spec[0])
+ spec_right = np.asfortranarray(spec[1])
+
+ def run_thread(**kwargs):
+ global wave_left
+ wave_left = librosa.istft(**kwargs)
+
+ thread = threading.Thread(
+ target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}
+ )
+ thread.start()
+ wave_right = librosa.istft(spec_right, hop_length=hop_length)
+ thread.join()
+
+ if reverse:
+ return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
+ elif mid_side:
+ return np.asfortranarray(
+ [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
+ )
+ elif mid_side_b2:
+ return np.asfortranarray(
+ [
+ np.add(wave_right / 1.25, 0.4 * wave_left),
+ np.subtract(wave_left / 1.25, 0.4 * wave_right),
+ ]
+ )
+ else:
+ return np.asfortranarray([wave_left, wave_right])
+
+
+def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
+ wave_band = {}
+ bands_n = len(mp.param["band"])
+ offset = 0
+
+ for d in range(1, bands_n + 1):
+ bp = mp.param["band"][d]
+ spec_s = np.ndarray(
+ shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex
+ )
+ h = bp["crop_stop"] - bp["crop_start"]
+ spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[
+ :, offset : offset + h, :
+ ]
+
+ offset += h
+ if d == bands_n: # higher
+ if extra_bins_h: # if --high_end_process bypass
+ max_bin = bp["n_fft"] // 2
+ spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[
+ :, :extra_bins_h, :
+ ]
+ if bp["hpf_start"] > 0:
+ spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
+ if bands_n == 1:
+ wave = spectrogram_to_wave(
+ spec_s,
+ bp["hl"],
+ mp.param["mid_side"],
+ mp.param["mid_side_b2"],
+ mp.param["reverse"],
+ )
+ else:
+ wave = np.add(
+ wave,
+ spectrogram_to_wave(
+ spec_s,
+ bp["hl"],
+ mp.param["mid_side"],
+ mp.param["mid_side_b2"],
+ mp.param["reverse"],
+ ),
+ )
+ else:
+ sr = mp.param["band"][d + 1]["sr"]
+ if d == 1: # lower
+ spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
+ wave = librosa.resample(
+ spectrogram_to_wave(
+ spec_s,
+ bp["hl"],
+ mp.param["mid_side"],
+ mp.param["mid_side_b2"],
+ mp.param["reverse"],
+ ),
+ bp["sr"],
+ sr,
+ res_type="sinc_fastest",
+ )
+ else: # mid
+ spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
+ spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
+ wave2 = np.add(
+ wave,
+ spectrogram_to_wave(
+ spec_s,
+ bp["hl"],
+ mp.param["mid_side"],
+ mp.param["mid_side_b2"],
+ mp.param["reverse"],
+ ),
+ )
+ # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
+ wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
+
+ return wave.T
+
+
+def fft_lp_filter(spec, bin_start, bin_stop):
+ g = 1.0
+ for b in range(bin_start, bin_stop):
+ g -= 1 / (bin_stop - bin_start)
+ spec[:, b, :] = g * spec[:, b, :]
+
+ spec[:, bin_stop:, :] *= 0
+
+ return spec
+
+
+def fft_hp_filter(spec, bin_start, bin_stop):
+ g = 1.0
+ for b in range(bin_start, bin_stop, -1):
+ g -= 1 / (bin_start - bin_stop)
+ spec[:, b, :] = g * spec[:, b, :]
+
+ spec[:, 0 : bin_stop + 1, :] *= 0
+
+ return spec
+
+
+def mirroring(a, spec_m, input_high_end, mp):
+ if "mirroring" == a:
+ mirror = np.flip(
+ np.abs(
+ spec_m[
+ :,
+ mp.param["pre_filter_start"]
+ - 10
+ - input_high_end.shape[1] : mp.param["pre_filter_start"]
+ - 10,
+ :,
+ ]
+ ),
+ 1,
+ )
+ mirror = mirror * np.exp(1.0j * np.angle(input_high_end))
+
+ return np.where(
+ np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror
+ )
+
+ if "mirroring2" == a:
+ mirror = np.flip(
+ np.abs(
+ spec_m[
+ :,
+ mp.param["pre_filter_start"]
+ - 10
+ - input_high_end.shape[1] : mp.param["pre_filter_start"]
+ - 10,
+ :,
+ ]
+ ),
+ 1,
+ )
+ mi = np.multiply(mirror, input_high_end * 1.7)
+
+ return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi)
+
+
+def ensembling(a, specs):
+ for i in range(1, len(specs)):
+ if i == 1:
+ spec = specs[0]
+
+ ln = min([spec.shape[2], specs[i].shape[2]])
+ spec = spec[:, :, :ln]
+ specs[i] = specs[i][:, :, :ln]
+
+ if "min_mag" == a:
+ spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec)
+ if "max_mag" == a:
+ spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec)
+
+ return spec
+
+
+def stft(wave, nfft, hl):
+ wave_left = np.asfortranarray(wave[0])
+ wave_right = np.asfortranarray(wave[1])
+ spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
+ spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
+ spec = np.asfortranarray([spec_left, spec_right])
+
+ return spec
+
+
+def istft(spec, hl):
+ spec_left = np.asfortranarray(spec[0])
+ spec_right = np.asfortranarray(spec[1])
+
+ wave_left = librosa.istft(spec_left, hop_length=hl)
+ wave_right = librosa.istft(spec_right, hop_length=hl)
+ wave = np.asfortranarray([wave_left, wave_right])
+
+
+if __name__ == "__main__":
+ import cv2
+ import sys
+ import time
+ import argparse
+ from model_param_init import ModelParameters
+
+ p = argparse.ArgumentParser()
+ p.add_argument(
+ "--algorithm",
+ "-a",
+ type=str,
+ choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"],
+ default="min_mag",
+ )
+ p.add_argument(
+ "--model_params",
+ "-m",
+ type=str,
+ default=os.path.join("modelparams", "1band_sr44100_hl512.json"),
+ )
+ p.add_argument("--output_name", "-o", type=str, default="output")
+ p.add_argument("--vocals_only", "-v", action="store_true")
+ p.add_argument("input", nargs="+")
+ args = p.parse_args()
+
+ start_time = time.time()
+
+ if args.algorithm.startswith("invert") and len(args.input) != 2:
+ raise ValueError("There should be two input files.")
+
+ if not args.algorithm.startswith("invert") and len(args.input) < 2:
+ raise ValueError("There must be at least two input files.")
+
+ wave, specs = {}, {}
+ mp = ModelParameters(args.model_params)
+
+ for i in range(len(args.input)):
+ spec = {}
+
+ for d in range(len(mp.param["band"]), 0, -1):
+ bp = mp.param["band"][d]
+
+ if d == len(mp.param["band"]): # high-end band
+ wave[d], _ = librosa.load(
+ args.input[i],
+ bp["sr"],
+ False,
+ dtype=np.float32,
+ res_type=bp["res_type"],
+ )
+
+ if len(wave[d].shape) == 1: # mono to stereo
+ wave[d] = np.array([wave[d], wave[d]])
+ else: # lower bands
+ wave[d] = librosa.resample(
+ wave[d + 1],
+ mp.param["band"][d + 1]["sr"],
+ bp["sr"],
+ res_type=bp["res_type"],
+ )
+
+ spec[d] = wave_to_spectrogram(
+ wave[d],
+ bp["hl"],
+ bp["n_fft"],
+ mp.param["mid_side"],
+ mp.param["mid_side_b2"],
+ mp.param["reverse"],
+ )
+
+ specs[i] = combine_spectrograms(spec, mp)
+
+ del wave
+
+ if args.algorithm == "deep":
+ d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1])
+ v_spec = d_spec - specs[1]
+ sf.write(
+ os.path.join("{}.wav".format(args.output_name)),
+ cmb_spectrogram_to_wave(v_spec, mp),
+ mp.param["sr"],
+ )
+
+ if args.algorithm.startswith("invert"):
+ ln = min([specs[0].shape[2], specs[1].shape[2]])
+ specs[0] = specs[0][:, :, :ln]
+ specs[1] = specs[1][:, :, :ln]
+
+ if "invert_p" == args.algorithm:
+ X_mag = np.abs(specs[0])
+ y_mag = np.abs(specs[1])
+ max_mag = np.where(X_mag >= y_mag, X_mag, y_mag)
+ v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0]))
+ else:
+ specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2)
+ v_spec = specs[0] - specs[1]
+
+ if not args.vocals_only:
+ X_mag = np.abs(specs[0])
+ y_mag = np.abs(specs[1])
+ v_mag = np.abs(v_spec)
+
+ X_image = spectrogram_to_image(X_mag)
+ y_image = spectrogram_to_image(y_mag)
+ v_image = spectrogram_to_image(v_mag)
+
+ cv2.imwrite("{}_X.png".format(args.output_name), X_image)
+ cv2.imwrite("{}_y.png".format(args.output_name), y_image)
+ cv2.imwrite("{}_v.png".format(args.output_name), v_image)
+
+ sf.write(
+ "{}_X.wav".format(args.output_name),
+ cmb_spectrogram_to_wave(specs[0], mp),
+ mp.param["sr"],
+ )
+ sf.write(
+ "{}_y.wav".format(args.output_name),
+ cmb_spectrogram_to_wave(specs[1], mp),
+ mp.param["sr"],
+ )
+
+ sf.write(
+ "{}_v.wav".format(args.output_name),
+ cmb_spectrogram_to_wave(v_spec, mp),
+ mp.param["sr"],
+ )
+ else:
+ if not args.algorithm == "deep":
+ sf.write(
+ os.path.join("ensembled", "{}.wav".format(args.output_name)),
+ cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp),
+ mp.param["sr"],
+ )
+
+ if args.algorithm == "align":
+ trackalignment = [
+ {
+ "file1": '"{}"'.format(args.input[0]),
+ "file2": '"{}"'.format(args.input[1]),
+ }
+ ]
+
+ for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."):
+ os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
+
+ # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
diff --git a/lib/uvr5_pack/name_params.json b/lib/uvr5_pack/name_params.json
new file mode 100644
index 000000000..950adcf51
--- /dev/null
+++ b/lib/uvr5_pack/name_params.json
@@ -0,0 +1,263 @@
+{
+ "equivalent" : [
+ {
+ "model_hash_name" : [
+ {
+ "hash_name": "47939caf0cfe52a0e81442b85b971dfd",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100"
+ },
+ {
+ "hash_name": "4e4ecb9764c50a8c414fee6e10395bbe",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2.json",
+ "param_name": "4band_v2"
+ },
+ {
+ "hash_name": "ca106edd563e034bde0bdec4bb7a4b36",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2.json",
+ "param_name": "4band_v2"
+ },
+ {
+ "hash_name": "e60a1e84803ce4efc0a6551206cc4b71",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100"
+ },
+ {
+ "hash_name": "a82f14e75892e55e994376edbf0c8435",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100"
+ },
+ {
+ "hash_name": "6dd9eaa6f0420af9f1d403aaafa4cc06",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json",
+ "param_name": "4band_v2_sn"
+ },
+ {
+ "hash_name": "08611fb99bd59eaa79ad27c58d137727",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json",
+ "param_name": "4band_v2_sn"
+ },
+ {
+ "hash_name": "5c7bbca45a187e81abbbd351606164e5",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json",
+ "param_name": "3band_44100_msb2"
+ },
+ {
+ "hash_name": "d6b2cb685a058a091e5e7098192d3233",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json",
+ "param_name": "3band_44100_msb2"
+ },
+ {
+ "hash_name": "c1b9f38170a7c90e96f027992eb7c62b",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100"
+ },
+ {
+ "hash_name": "c3448ec923fa0edf3d03a19e633faa53",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100"
+ },
+ {
+ "hash_name": "68aa2c8093d0080704b200d140f59e54",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100.json",
+ "param_name": "3band_44100"
+ },
+ {
+ "hash_name": "fdc83be5b798e4bd29fe00fe6600e147",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json",
+ "param_name": "3band_44100_mid.json"
+ },
+ {
+ "hash_name": "2ce34bc92fd57f55db16b7a4def3d745",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json",
+ "param_name": "3band_44100_mid.json"
+ },
+ {
+ "hash_name": "52fdca89576f06cf4340b74a4730ee5f",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100.json"
+ },
+ {
+ "hash_name": "41191165b05d38fc77f072fa9e8e8a30",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100.json"
+ },
+ {
+ "hash_name": "89e83b511ad474592689e562d5b1f80e",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_32000.json",
+ "param_name": "2band_32000.json"
+ },
+ {
+ "hash_name": "0b954da81d453b716b114d6d7c95177f",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_32000.json",
+ "param_name": "2band_32000.json"
+ }
+
+ ],
+ "v4 Models": [
+ {
+ "hash_name": "6a00461c51c2920fd68937d4609ed6c8",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json",
+ "param_name": "1band_sr16000_hl512"
+ },
+ {
+ "hash_name": "0ab504864d20f1bd378fe9c81ef37140",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json",
+ "param_name": "1band_sr32000_hl512"
+ },
+ {
+ "hash_name": "7dd21065bf91c10f7fccb57d7d83b07f",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json",
+ "param_name": "1band_sr32000_hl512"
+ },
+ {
+ "hash_name": "80ab74d65e515caa3622728d2de07d23",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json",
+ "param_name": "1band_sr32000_hl512"
+ },
+ {
+ "hash_name": "edc115e7fc523245062200c00caa847f",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json",
+ "param_name": "1band_sr33075_hl384"
+ },
+ {
+ "hash_name": "28063e9f6ab5b341c5f6d3c67f2045b7",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json",
+ "param_name": "1band_sr33075_hl384"
+ },
+ {
+ "hash_name": "b58090534c52cbc3e9b5104bad666ef2",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json",
+ "param_name": "1band_sr44100_hl512"
+ },
+ {
+ "hash_name": "0cdab9947f1b0928705f518f3c78ea8f",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json",
+ "param_name": "1band_sr44100_hl512"
+ },
+ {
+ "hash_name": "ae702fed0238afb5346db8356fe25f13",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json",
+ "param_name": "1band_sr44100_hl1024"
+ }
+ ]
+ }
+ ],
+ "User Models" : [
+ {
+ "1 Band": [
+ {
+ "hash_name": "1band_sr16000_hl512",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json",
+ "param_name": "1band_sr16000_hl512"
+ },
+ {
+ "hash_name": "1band_sr32000_hl512",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json",
+ "param_name": "1band_sr16000_hl512"
+ },
+ {
+ "hash_name": "1band_sr33075_hl384",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json",
+ "param_name": "1band_sr33075_hl384"
+ },
+ {
+ "hash_name": "1band_sr44100_hl256",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json",
+ "param_name": "1band_sr44100_hl256"
+ },
+ {
+ "hash_name": "1band_sr44100_hl512",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json",
+ "param_name": "1band_sr44100_hl512"
+ },
+ {
+ "hash_name": "1band_sr44100_hl1024",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json",
+ "param_name": "1band_sr44100_hl1024"
+ }
+ ],
+ "2 Band": [
+ {
+ "hash_name": "2band_44100_lofi",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json",
+ "param_name": "2band_44100_lofi"
+ },
+ {
+ "hash_name": "2band_32000",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_32000.json",
+ "param_name": "2band_32000"
+ },
+ {
+ "hash_name": "2band_48000",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_48000.json",
+ "param_name": "2band_48000"
+ }
+ ],
+ "3 Band": [
+ {
+ "hash_name": "3band_44100",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100.json",
+ "param_name": "3band_44100"
+ },
+ {
+ "hash_name": "3band_44100_mid",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json",
+ "param_name": "3band_44100_mid"
+ },
+ {
+ "hash_name": "3band_44100_msb2",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json",
+ "param_name": "3band_44100_msb2"
+ }
+ ],
+ "4 Band": [
+ {
+ "hash_name": "4band_44100",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json",
+ "param_name": "4band_44100"
+ },
+ {
+ "hash_name": "4band_44100_mid",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json",
+ "param_name": "4band_44100_mid"
+ },
+ {
+ "hash_name": "4band_44100_msb",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json",
+ "param_name": "4band_44100_msb"
+ },
+ {
+ "hash_name": "4band_44100_msb2",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json",
+ "param_name": "4band_44100_msb2"
+ },
+ {
+ "hash_name": "4band_44100_reverse",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json",
+ "param_name": "4band_44100_reverse"
+ },
+ {
+ "hash_name": "4band_44100_sw",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json",
+ "param_name": "4band_44100_sw"
+ },
+ {
+ "hash_name": "4band_v2",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2.json",
+ "param_name": "4band_v2"
+ },
+ {
+ "hash_name": "4band_v2_sn",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json",
+ "param_name": "4band_v2_sn"
+ },
+ {
+ "hash_name": "tmodelparam",
+ "model_params": "lib/uvr5_pack/lib_v5/modelparams/tmodelparam.json",
+ "param_name": "User Model Param Set"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/lib/uvr5_pack/utils.py b/lib/uvr5_pack/utils.py
new file mode 100644
index 000000000..0fafe8793
--- /dev/null
+++ b/lib/uvr5_pack/utils.py
@@ -0,0 +1,120 @@
+import torch
+import numpy as np
+from tqdm import tqdm
+import json
+
+
+def load_data(file_name: str = "./lib/uvr5_pack/name_params.json") -> dict:
+ with open(file_name, "r") as f:
+ data = json.load(f)
+
+ return data
+
+
+def make_padding(width, cropsize, offset):
+ left = offset
+ roi_size = cropsize - left * 2
+ if roi_size == 0:
+ roi_size = cropsize
+ right = roi_size - (width % roi_size) + left
+
+ return left, right, roi_size
+
+
+def inference(X_spec, device, model, aggressiveness, data):
+ """
+ data : dic configs
+ """
+
+ def _execute(
+ X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True
+ ):
+ model.eval()
+ with torch.no_grad():
+ preds = []
+
+ iterations = [n_window]
+
+ total_iterations = sum(iterations)
+ for i in tqdm(range(n_window)):
+ start = i * roi_size
+ X_mag_window = X_mag_pad[
+ None, :, :, start : start + data["window_size"]
+ ]
+ X_mag_window = torch.from_numpy(X_mag_window)
+ if is_half:
+ X_mag_window = X_mag_window.half()
+ X_mag_window = X_mag_window.to(device)
+
+ pred = model.predict(X_mag_window, aggressiveness)
+
+ pred = pred.detach().cpu().numpy()
+ preds.append(pred[0])
+
+ pred = np.concatenate(preds, axis=2)
+ return pred
+
+ def preprocess(X_spec):
+ X_mag = np.abs(X_spec)
+ X_phase = np.angle(X_spec)
+
+ return X_mag, X_phase
+
+ X_mag, X_phase = preprocess(X_spec)
+
+ coef = X_mag.max()
+ X_mag_pre = X_mag / coef
+
+ n_frame = X_mag_pre.shape[2]
+ pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset)
+ n_window = int(np.ceil(n_frame / roi_size))
+
+ X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
+
+ if list(model.state_dict().values())[0].dtype == torch.float16:
+ is_half = True
+ else:
+ is_half = False
+ pred = _execute(
+ X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
+ )
+ pred = pred[:, :, :n_frame]
+
+ if data["tta"]:
+ pad_l += roi_size // 2
+ pad_r += roi_size // 2
+ n_window += 1
+
+ X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant")
+
+ pred_tta = _execute(
+ X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half
+ )
+ pred_tta = pred_tta[:, :, roi_size // 2 :]
+ pred_tta = pred_tta[:, :, :n_frame]
+
+ return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase)
+ else:
+ return pred * coef, X_mag, np.exp(1.0j * X_phase)
+
+
+def _get_name_params(model_path, model_hash):
+ data = load_data()
+ flag = False
+ ModelName = model_path
+ for type in list(data):
+ for model in list(data[type][0]):
+ for i in range(len(data[type][0][model])):
+ if str(data[type][0][model][i]["hash_name"]) == model_hash:
+ flag = True
+ elif str(data[type][0][model][i]["hash_name"]) in ModelName:
+ flag = True
+
+ if flag:
+ model_params_auto = data[type][0][model][i]["model_params"]
+ param_name_auto = data[type][0][model][i]["param_name"]
+ if type == "equivalent":
+ return param_name_auto, model_params_auto
+ else:
+ flag = False
+ return param_name_auto, model_params_auto
diff --git a/logs/mute/0_gt_wavs/mute32k.wav b/logs/mute/0_gt_wavs/mute32k.wav
new file mode 100644
index 000000000..b4b502920
Binary files /dev/null and b/logs/mute/0_gt_wavs/mute32k.wav differ
diff --git a/logs/mute/0_gt_wavs/mute40k.wav b/logs/mute/0_gt_wavs/mute40k.wav
new file mode 100644
index 000000000..fcf1281d4
Binary files /dev/null and b/logs/mute/0_gt_wavs/mute40k.wav differ
diff --git a/logs/mute/0_gt_wavs/mute48k.wav b/logs/mute/0_gt_wavs/mute48k.wav
new file mode 100644
index 000000000..72822a012
Binary files /dev/null and b/logs/mute/0_gt_wavs/mute48k.wav differ
diff --git a/logs/mute/1_16k_wavs/mute.wav b/logs/mute/1_16k_wavs/mute.wav
new file mode 100644
index 000000000..27a7d6385
Binary files /dev/null and b/logs/mute/1_16k_wavs/mute.wav differ
diff --git a/logs/mute/2a_f0/mute.wav.npy b/logs/mute/2a_f0/mute.wav.npy
new file mode 100644
index 000000000..a7ecfbf92
Binary files /dev/null and b/logs/mute/2a_f0/mute.wav.npy differ
diff --git a/logs/mute/2b-f0nsf/mute.wav.npy b/logs/mute/2b-f0nsf/mute.wav.npy
new file mode 100644
index 000000000..cf5c21bd4
Binary files /dev/null and b/logs/mute/2b-f0nsf/mute.wav.npy differ
diff --git a/logs/mute/3_feature256/mute.npy b/logs/mute/3_feature256/mute.npy
new file mode 100644
index 000000000..ffe35e784
Binary files /dev/null and b/logs/mute/3_feature256/mute.npy differ
diff --git a/logs/mute/3_feature768/mute.npy b/logs/mute/3_feature768/mute.npy
new file mode 100644
index 000000000..b14cfb83e
Binary files /dev/null and b/logs/mute/3_feature768/mute.npy differ
diff --git a/my_utils.py b/my_utils.py
new file mode 100644
index 000000000..219aba7c6
--- /dev/null
+++ b/my_utils.py
@@ -0,0 +1,99 @@
+import ffmpeg
+import numpy as np
+
+import os
+import sys
+
+import random
+
+#import csv
+
+platform_stft_mapping = {
+ 'linux': 'stftpitchshift',
+ 'darwin': 'stftpitchshift',
+ 'win32': 'stftpitchshift.exe',
+}
+
+stft = platform_stft_mapping.get(sys.platform)
+
+def load_audio(file, sr, DoFormant=False, Quefrency=1.0, Timbre=1.0):
+ converted = False
+ try:
+ file = (
+ file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
+ )
+ file_formanted = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
+
+ if DoFormant:
+ numerator = round(random.uniform(1,4), 4)
+ if not file.endswith(".wav"):
+ if not os.path.isfile(f"{file_formanted}.wav"):
+ converted = True
+ converting = (
+ ffmpeg.input(file_formanted, threads = 0)
+ .output(f"{file_formanted}.wav")
+ .run(
+ cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True
+ )
+ )
+ file_formanted = f"{file_formanted}.wav" if not file_formanted.endswith(".wav") else file_formanted
+ print(f" · Formanting {file_formanted}...\n")
+
+ command = (
+ f'{stft} -i "{file_formanted}" -q "{Quefrency}" '
+ f'-t "{Timbre}" -o "{file_formanted}FORMANTED_{str(numerator)}.wav"'
+ )
+
+ os.system(command)
+
+ print(f" · Formanted {file_formanted}!\n")
+
+ out, _ = (
+ ffmpeg.input(f"{file_formanted}FORMANTED_{str(numerator)}.wav", threads=0)
+ .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
+ .run(
+ cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True
+ )
+ )
+
+ try: os.remove(f"{file_formanted}FORMANTED_{str(numerator)}.wav")
+ except Exception as e: pass; print(f"couldn't remove formanted type of file due to {e}")
+
+ else:
+ out, _ = (
+ ffmpeg.input(file, threads=0)
+ .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
+ .run(
+ cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True
+ )
+ )
+ except Exception as e:
+ raise RuntimeError(f"Failed to load audio: {e}")
+
+ if converted:
+ try: os.remove(file_formanted)
+ except Exception as e: pass; print(f"Couldn't remove converted type of file due to {e}")
+ converted = False
+
+ return np.frombuffer(out, np.float32).flatten()
+
+
+def check_audio_duration(file):
+ try:
+ file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
+
+ probe = ffmpeg.probe(file)
+
+ duration = float(probe['streams'][0]['duration'])
+
+ if duration < 0.76:
+ print(
+ f"\n------------\n"
+ f"Audio file, {file.split('/')[-1]}, under ~0.76s detected - file is too short. Target at least 1-2s for best results."
+ f"\n------------\n\n"
+ )
+ return False
+
+ return True
+ except Exception as e:
+ raise RuntimeError(f"Failed to check audio duration: {e}")
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 000000000..f7aad0a17
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,3881 @@
+# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
+
+[[package]]
+name = "absl-py"
+version = "1.4.0"
+description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "absl-py-1.4.0.tar.gz", hash = "sha256:d2c244d01048ba476e7c080bd2c6df5e141d211de80223460d5b3b8a2a58433d"},
+ {file = "absl_py-1.4.0-py3-none-any.whl", hash = "sha256:0d3fe606adfa4f7db64792dd4c7aee4ee0c38ab75dfd353b7a83ed3e957fcb47"},
+]
+
+[[package]]
+name = "aiofiles"
+version = "23.1.0"
+description = "File support for asyncio."
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "aiofiles-23.1.0-py3-none-any.whl", hash = "sha256:9312414ae06472eb6f1d163f555e466a23aed1c8f60c30cccf7121dba2e53eb2"},
+ {file = "aiofiles-23.1.0.tar.gz", hash = "sha256:edd247df9a19e0db16534d4baaf536d6609a43e1de5401d7a4c1c148753a1635"},
+]
+
+[[package]]
+name = "aiohttp"
+version = "3.8.4"
+description = "Async http client/server framework (asyncio)"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1"},
+ {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a"},
+ {file = "aiohttp-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea"},
+ {file = "aiohttp-3.8.4-cp310-cp310-win32.whl", hash = "sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1"},
+ {file = "aiohttp-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f"},
+ {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4"},
+ {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4"},
+ {file = "aiohttp-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24"},
+ {file = "aiohttp-3.8.4-cp311-cp311-win32.whl", hash = "sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d"},
+ {file = "aiohttp-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-win32.whl", hash = "sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-win32.whl", hash = "sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480"},
+ {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f"},
+ {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15"},
+ {file = "aiohttp-3.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d"},
+ {file = "aiohttp-3.8.4-cp38-cp38-win32.whl", hash = "sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54"},
+ {file = "aiohttp-3.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f"},
+ {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed"},
+ {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567"},
+ {file = "aiohttp-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4"},
+ {file = "aiohttp-3.8.4-cp39-cp39-win32.whl", hash = "sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a"},
+ {file = "aiohttp-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04"},
+ {file = "aiohttp-3.8.4.tar.gz", hash = "sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c"},
+]
+
+[package.dependencies]
+aiosignal = ">=1.1.2"
+async-timeout = ">=4.0.0a3,<5.0"
+attrs = ">=17.3.0"
+charset-normalizer = ">=2.0,<4.0"
+frozenlist = ">=1.1.1"
+multidict = ">=4.5,<7.0"
+yarl = ">=1.0,<2.0"
+
+[package.extras]
+speedups = ["Brotli", "aiodns", "cchardet"]
+
+[[package]]
+name = "aiosignal"
+version = "1.3.1"
+description = "aiosignal: a list of registered asynchronous callbacks"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
+ {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
+]
+
+[package.dependencies]
+frozenlist = ">=1.1.0"
+
+[[package]]
+name = "altair"
+version = "4.2.2"
+description = "Altair: A declarative statistical visualization library for Python."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "altair-4.2.2-py3-none-any.whl", hash = "sha256:8b45ebeaf8557f2d760c5c77b79f02ae12aee7c46c27c06014febab6f849bc87"},
+ {file = "altair-4.2.2.tar.gz", hash = "sha256:39399a267c49b30d102c10411e67ab26374156a84b1aeb9fcd15140429ba49c5"},
+]
+
+[package.dependencies]
+entrypoints = "*"
+jinja2 = "*"
+jsonschema = ">=3.0"
+numpy = "*"
+pandas = ">=0.18"
+toolz = "*"
+
+[package.extras]
+dev = ["black", "docutils", "flake8", "ipython", "m2r", "mistune (<2.0.0)", "pytest", "recommonmark", "sphinx", "vega-datasets"]
+
+[[package]]
+name = "antlr4-python3-runtime"
+version = "4.8"
+description = "ANTLR 4.8 runtime for Python 3.7"
+optional = false
+python-versions = "*"
+files = [
+ {file = "antlr4-python3-runtime-4.8.tar.gz", hash = "sha256:15793f5d0512a372b4e7d2284058ad32ce7dd27126b105fb0b2245130445db33"},
+]
+
+[[package]]
+name = "anyio"
+version = "3.6.2"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"},
+ {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"},
+]
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"]
+trio = ["trio (>=0.16,<0.22)"]
+
+[[package]]
+name = "async-timeout"
+version = "4.0.2"
+description = "Timeout context manager for asyncio programs"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"},
+ {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"},
+]
+
+[[package]]
+name = "attrs"
+version = "22.2.0"
+description = "Classes Without Boilerplate"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"},
+ {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"},
+]
+
+[package.extras]
+cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"]
+tests = ["attrs[tests-no-zope]", "zope.interface"]
+tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"]
+
+[[package]]
+name = "audioread"
+version = "3.0.0"
+description = "multi-library, cross-platform audio decoding"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "audioread-3.0.0.tar.gz", hash = "sha256:121995bd207eb1fda3d566beb851d3534275925bc35a4fb6da0cb11de0f7251a"},
+]
+
+[[package]]
+name = "bitarray"
+version = "2.7.3"
+description = "efficient arrays of booleans -- C extension"
+optional = false
+python-versions = "*"
+files = [
+ {file = "bitarray-2.7.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:979d42e0b2c3113526f9716a461e08671788a23ce7e3b5cd090ce3e6a6762641"},
+ {file = "bitarray-2.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:860edf8533223d82bd6201894bcaf540f828f49075f363390eecf04b12fb94cb"},
+ {file = "bitarray-2.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:78378d8dacbe1f4f263347f42ec0a41cc2097cd671c6ac30a65a838284a5e141"},
+ {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:888df211aafe5fad41c0792a686d95c8ba37345d5037f437aa3c09608f9c3b56"},
+ {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb3f003dee96dbf24a6df71443557f249b17b20083c189995302b14eb01530bf"},
+ {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c531532c21bc1063e65957a1a85a2d13601ec21801f70821c89d9339b16ebc78"},
+ {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8fd92c8026e4ba6874e94f538890e35bef2a3a18ea54e3663c578b7916ade1"},
+ {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d19c34a2121eccfeb642d4ad71163bd3342a8f3a99e6724fe824bdfbc0a5b65"},
+ {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:102db74ee82ec5774aba01481e73eedaebd27ba167344a81d3b42e6fbf9ffb77"},
+ {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7f6540b45b2230442f7a0614745131e0a6f28251f5d33ac19d0ed61d80db7153"},
+ {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:99c9345c417a9cff98f9f6e59b0350dcc10c2e0e1ea66acf7946de1cd60541fa"},
+ {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:a1d439c98e65ab8e5fbcc2b242a16e7a3f076974bff78185ff42ba2d4c220032"},
+ {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:87897ec0e4876c9f2c1ae313519de0ed2ad8041a4d2210a083f9b4a239add2e3"},
+ {file = "bitarray-2.7.3-cp310-cp310-win32.whl", hash = "sha256:cb46c3a4002c8322dd0e1b4b53f8a647dcb0f199f5c7a1fc03d3880c3eabbd2c"},
+ {file = "bitarray-2.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:5df10eb9b794932b0cf806f412d1c6d04fb7655ca7ae5caf6354b9edc380a5f7"},
+ {file = "bitarray-2.7.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:27524bc92fdeb464a5057a4677a35f482cf30be2e920bd1d11c46de533cafda6"},
+ {file = "bitarray-2.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3cf37431de779b29e5c0d8e36868f77f6df53c3c19c20e8404137e257dc80040"},
+ {file = "bitarray-2.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8abd23f94cdcce971d932a5f0a066d40fbc61901fd087aa70d32cccd1793bd20"},
+ {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7659bdfe7716b14a39007e31e957fa64d7f0d9e40a1dbd024bd81b972d76bffb"},
+ {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:da1570f301abdfda68f4fdb40c4d3f09af4bb6e4550b4fa5395db0d142b680bc"},
+ {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8becbb9649fd29ee577f9f0405ce2fba5cf9fa2c290c9b044bc235c04473f213"},
+ {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72fd7f6f940bc42914c86700591ccfd1daeff0e414cefcbd7843117df2fac4e9"},
+ {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23b7bada6d6b62cba08f4a1b8a95da2d8592aae1db3c167dcb52abcba0a7bef5"},
+ {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4b2d150a81a981537801ac7d4f4f5d082c48343612a21f4e2c4cd2e887973bd5"},
+ {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1502660ab489b1f18c3493c766252cd5d24bc1cbf4bdf3594e0a30de142ed453"},
+ {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:91f43f6b6c9129a56d3e2dccb8b88ffce0e4f4893dd9d69d285676bdf5b9ca14"},
+ {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a69c99274aee2ffdc7f1cfd34044ccb7155790d6f5217d677ea46a6ddead6dd2"},
+ {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d63f20299441e32171f08fc62f7ea7e401cc12a96f67a36ab2d76439ecfcb118"},
+ {file = "bitarray-2.7.3-cp311-cp311-win32.whl", hash = "sha256:0b84fd9dbf999cbca1090a7703aa1404cd01af4035c6ba3adf69d41280611fb6"},
+ {file = "bitarray-2.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:76bbbb9ceebb9cbb2b14369b3681fecab226792b339f612e79f6575ca31fed45"},
+ {file = "bitarray-2.7.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50d5e2c026b3e3d145f64c457338ea99edcbdd302fdcbd96418251ac51a98a59"},
+ {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d571056115bbdc18f199a9ee4c2a1b5884f5e63a3c05fe43d2fc7fc67320515"},
+ {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2a0313657e6656efca2148cfc91c50fdafca6f811b6c7d0906e6ba57134e560"},
+ {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d3b5abb73c45d40d27f9795dac9d6eb1515729c13f93dd67df2be07be6549990"},
+ {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7776c070943f45cd8303543a6625cf82f2e000ef9c885d52d7828be099e52f42"},
+ {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:057f9c53a34e42deed6e8813a82b9c85924f4728be28e3b9b65144569ac5a387"},
+ {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8591ad5768860ad186dc94fd58b2932604a7639b57eefbbff2b4865af3407691"},
+ {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:bd7f4b2df89bf4e298756c0be0be67fb84d6aa49bda60d46805d43f0e643abd5"},
+ {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:433f91c8ab8338662aaa86b0677e6c15c35f8f7b65d4c43d7d1647a8198bc0b0"},
+ {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:31e60d8341c3189aa156ca8cb2f6370b29d79cf132e3d091714b0a5a9097eb69"},
+ {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ea33ed09157e032f0a7a2627ef87f156e9927697f59b55961439d34bf45af23a"},
+ {file = "bitarray-2.7.3-cp36-cp36m-win32.whl", hash = "sha256:302149aaff75939beb8af7f32ac9bf922480033a24fb54f4ebc0c9dc175247c4"},
+ {file = "bitarray-2.7.3-cp36-cp36m-win_amd64.whl", hash = "sha256:7a8995737fae8de03b31ed83acf4f4326a55b217022009d18be19ff87fc9010e"},
+ {file = "bitarray-2.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8b2f31a4cc28aef27355ab896e4b4cc2da2204b2b7adb674d8be7fefa0c93868"},
+ {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5df624ee8a4098c3b1149f4817f2a4a0121c4920e1c114af324bc52d6659e2b"},
+ {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1d60ed709989e34e7158d97fdb077a2f2dfc505998a84161a70f81a6101172"},
+ {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:748847e58c45a37f23db1f53a6dc16ae32aa80ee504653d79336830de1a79ed7"},
+ {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4b7fdb9772e087174f446655bbc497a1600b5758f279c6d44fcf344c13d5c8a"},
+ {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86e9c48ffeddb0f943e87ab65e1e95dccc9b44ef3761af3bf9642973ab7646d2"},
+ {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0d1f49cc51919d6fa0f7eebd073d2c620b80079aa537d084a7fafb46a35c7a4d"},
+ {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b43d56c7c96f5a055f4051be426496db2a616840645d0ab3733d5ceacb2f701b"},
+ {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:01f8d02c3eae82c98d4259777cb2f042a0b3989d7dceeb37c643cb94b91d5a42"},
+ {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d089b1d0b157c9a484f8f7475eecea813d0dc3818adc5bf352903da14fe88fc3"},
+ {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1362e9fb78ca72aa52ec1f1fbd62872801302001b0156ed2a1e707850cd30ffd"},
+ {file = "bitarray-2.7.3-cp37-cp37m-win32.whl", hash = "sha256:2cdf5700537e5aa4ec9f4a0b498b8d5b03b9859d503e01ea17a6a134a838aa30"},
+ {file = "bitarray-2.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1e1553933f4533040491f4e4499bcbbfcee42c4056f56d7e18010e779daab33d"},
+ {file = "bitarray-2.7.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1048a29b3d72b1821a3ae9e8d64e71ed96c53a1a36b1da6db02091a424a8f795"},
+ {file = "bitarray-2.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:10dc358fe29d7a4c5be78ab2fb5aa50cb8066babd23e0b5589eb68e26afe58d8"},
+ {file = "bitarray-2.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8ab6770833976448a9a973bc0df63adedc4c30de4774cec5a9928fc496423ebb"},
+ {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abe2f829f6f2d330bccf1bcde2192264ab9a15d6d00e507265f46dc66557014"},
+ {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87851a82bdf849e3c40ff6d8af5f734634e17f52a8f7f7e74486c2f8ce717578"},
+ {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5fc2512bdf5289a1412c936c65d17881d2b46edb0036c63a8d5605dc8d398a3"},
+ {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1289f408a8b5c87cdb4fd7975d4021c6e61209ccb956d0411e72bf43c7f78463"},
+ {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ee181cc00aaba38d9812f4df4e7d828105b6dde3b068cd2c43f1d8f395e0046"},
+ {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:00e93f70cbcbeabd1e79accf1b6f5b2424cd40556e7877f618549523d0031c98"},
+ {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3fb6a952796d16c3a309d866eef56a8f4e5591d112c22446e67d33ecb096b44b"},
+ {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:0fe747a134f7f5bc0877eee58090ae7e7f23628eeb459f681ade65719c3f246a"},
+ {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:2c1b2c91bf991b5c641faee78dd5a751dff6155ec51c7a6c7f922dc85431898e"},
+ {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c3956ae54285ab30d802756144887e30e013f81c9f03e5ffff9daa46d8ca0154"},
+ {file = "bitarray-2.7.3-cp38-cp38-win32.whl", hash = "sha256:00a6fc4355bd4e6ead54d05187dc4ea39f0af439b336ae113f0194673ed730ae"},
+ {file = "bitarray-2.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:305e6f7441c007f296644ba3899c0306ce9fd7a482dbbc06b6e7b7bd6e0ddabc"},
+ {file = "bitarray-2.7.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fe80c23409efb41b86efb5e45f334420a9b5b7828f5b3d08b5ff28f03a024d9e"},
+ {file = "bitarray-2.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:16345146b61e93ca20679c83537ccf7245f78b17035f5b1a436fd2b75da04c5e"},
+ {file = "bitarray-2.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1af9b720a048c69e999094e2310138b7cfca5471a9d2c1dbe4b53dd10e516720"},
+ {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:088e6e9ea7f0eaf8b672679a68096dbc0a7a7b7a4ed567860f7362e1588370a6"},
+ {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:122cd70ee0de2cc9d94da8b8ebcb7dca12b9f4d3beefb94c11e110e1d87503bb"},
+ {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb9a8ee23416bd0cfd457118978bc2f6f02c20b95336db486887f670bf92c2b7"},
+ {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a544f99c24b6f658907eb9edf290a9c54f4106738b2ab84cd19dc6013cc3abf"},
+ {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:980f6564218f853a9341fb045446539d4153338926ed2fb222e86dc9b2ae9b8f"},
+ {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f64abe9301b918d2c352e42198cea0196f3639bc1ad23a4a9d8ae97f66068901"},
+ {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:029c724bf38c6616b90b1c423b846b63f8d607ed5a23d270e3862696d88a5392"},
+ {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:16cb00911584a6e9ca0f42c305714898120dc6bfbbec90dacedeed4690331a47"},
+ {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:699b0134e87c0c4e3b224d879d218c4385a06e6b72df73b4c9c9d549155fb837"},
+ {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b508e1bba4ec68fd0ef28505e2dad2f56de7df710c8334c97036705a562cb908"},
+ {file = "bitarray-2.7.3-cp39-cp39-win32.whl", hash = "sha256:4b84230624d15868e407ba8b66df54fc69ee6a9e9cb6d51eb264b8f2614596f1"},
+ {file = "bitarray-2.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:757a08bf0aed5a650a399f8c66bcba00c210bce34408b6d7b09b4837bee8f4da"},
+ {file = "bitarray-2.7.3.tar.gz", hash = "sha256:f71256a32609b036adad932e1228b66a6b4e2cae6be397e588ddc0babd9a78b9"},
+]
+
+[[package]]
+name = "cachetools"
+version = "5.3.0"
+description = "Extensible memoizing collections and decorators"
+optional = false
+python-versions = "~=3.7"
+files = [
+ {file = "cachetools-5.3.0-py3-none-any.whl", hash = "sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4"},
+ {file = "cachetools-5.3.0.tar.gz", hash = "sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14"},
+]
+
+[[package]]
+name = "certifi"
+version = "2022.12.7"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
+ {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
+]
+
+[[package]]
+name = "cffi"
+version = "1.15.1"
+description = "Foreign Function Interface for Python calling C code."
+optional = false
+python-versions = "*"
+files = [
+ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
+ {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
+ {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
+ {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
+ {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
+ {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
+ {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
+ {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
+ {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
+ {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
+ {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
+ {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
+ {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
+ {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
+ {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
+ {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
+ {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
+ {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
+ {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "charset-normalizer"
+version = "3.1.0"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"},
+ {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.3"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
+ {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "cmake"
+version = "3.26.1"
+description = "CMake is an open-source, cross-platform family of tools designed to build, test and package software"
+optional = false
+python-versions = "*"
+files = [
+ {file = "cmake-3.26.1-py2.py3-none-macosx_10_10_universal2.macosx_10_10_x86_64.macosx_11_0_arm64.macosx_11_0_universal2.whl", hash = "sha256:d8a7e0cc8677677a732aff3e3fd0ad64eeff43cac772614b03c436912247d0d8"},
+ {file = "cmake-3.26.1-py2.py3-none-manylinux2010_i686.manylinux_2_12_i686.whl", hash = "sha256:f2f721f5aebe304c281ee4b1d2dfbf7f4a52fca003834b2b4a3ba838aeded63c"},
+ {file = "cmake-3.26.1-py2.py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:63a012b72836702eadfe4fba9642aeb17337f26861f4768e837053f40e98cb46"},
+ {file = "cmake-3.26.1-py2.py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2b72be88b7bfaa6ae59566cbb9d6a5553f19b2a8d14efa6ac0cf019a29860a1b"},
+ {file = "cmake-3.26.1-py2.py3-none-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1278354f7210e22458aa9137d46a56da1f115a7b76ad2733f0bf6041fb40f1dc"},
+ {file = "cmake-3.26.1-py2.py3-none-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:de96a5522917fba0ab0da2d01d9dd9462fa80f365218bf27162d539c2335758f"},
+ {file = "cmake-3.26.1-py2.py3-none-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:449928ad7dfcd41e4dcff64c7d44f86557883c70577666a19e79e22d783bbbd0"},
+ {file = "cmake-3.26.1-py2.py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:19fa3e457afecf2803265f71652ef17c3f1d317173c330ba46767a0853d38fa0"},
+ {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:43360650d60d177d979e4ad0a5f31afa286e6d88f5350f7a38c29d94514900eb"},
+ {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_i686.whl", hash = "sha256:16aac10363bc926da5109a59ef8fe46ddcd7e3d421de61f871b35524eef2f1ae"},
+ {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:e460ba5070be4dcac9613cb526a46db4e5fa19d8b909a8d8d5244c6cc3c777e1"},
+ {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_s390x.whl", hash = "sha256:fd2ecc0899f7939a014bd906df85e8681bd63ce457de3ab0b5d9e369fa3bdf79"},
+ {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:22781a23e274ba9bf380b970649654851c1b4b9d83b65fec12ee2e2e03b6ffc4"},
+ {file = "cmake-3.26.1-py2.py3-none-win32.whl", hash = "sha256:7b4e81de30ac1fb2f1eb5287063e140b53f376fd9ed7e2060c1c7b5917bd5f83"},
+ {file = "cmake-3.26.1-py2.py3-none-win_amd64.whl", hash = "sha256:90845b6c87a25be07e9220f67dd7f6c891c6ec14d764d37335218d97f9ea4520"},
+ {file = "cmake-3.26.1-py2.py3-none-win_arm64.whl", hash = "sha256:43bd96327e2631183bb4829ba20cb810e20b4b0c68f852fcd7082fbb5359d57c"},
+ {file = "cmake-3.26.1.tar.gz", hash = "sha256:4e0eb3c03dcf2d459f78d96cc85f7482476aeb1ae5ada65150b1db35c0f70cc7"},
+]
+
+[package.extras]
+test = ["codecov (>=2.0.5)", "coverage (>=4.2)", "flake8 (>=3.0.4)", "path.py (>=11.5.0)", "pytest (>=3.0.3)", "pytest-cov (>=2.4.0)", "pytest-runner (>=2.9)", "pytest-virtualenv (>=1.7.0)", "scikit-build (>=0.10.0)", "setuptools (>=28.0.0)", "virtualenv (>=15.0.3)", "wheel"]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "contourpy"
+version = "1.0.7"
+description = "Python library for calculating contours of 2D quadrilateral grids"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "contourpy-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:95c3acddf921944f241b6773b767f1cbce71d03307270e2d769fd584d5d1092d"},
+ {file = "contourpy-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fc1464c97579da9f3ab16763c32e5c5d5bb5fa1ec7ce509a4ca6108b61b84fab"},
+ {file = "contourpy-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8acf74b5d383414401926c1598ed77825cd530ac7b463ebc2e4f46638f56cce6"},
+ {file = "contourpy-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c71fdd8f1c0f84ffd58fca37d00ca4ebaa9e502fb49825484da075ac0b0b803"},
+ {file = "contourpy-1.0.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f99e9486bf1bb979d95d5cffed40689cb595abb2b841f2991fc894b3452290e8"},
+ {file = "contourpy-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87f4d8941a9564cda3f7fa6a6cd9b32ec575830780677932abdec7bcb61717b0"},
+ {file = "contourpy-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e20e5a1908e18aaa60d9077a6d8753090e3f85ca25da6e25d30dc0a9e84c2c6"},
+ {file = "contourpy-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a877ada905f7d69b2a31796c4b66e31a8068b37aa9b78832d41c82fc3e056ddd"},
+ {file = "contourpy-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6381fa66866b0ea35e15d197fc06ac3840a9b2643a6475c8fff267db8b9f1e69"},
+ {file = "contourpy-1.0.7-cp310-cp310-win32.whl", hash = "sha256:3c184ad2433635f216645fdf0493011a4667e8d46b34082f5a3de702b6ec42e3"},
+ {file = "contourpy-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:3caea6365b13119626ee996711ab63e0c9d7496f65641f4459c60a009a1f3e80"},
+ {file = "contourpy-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed33433fc3820263a6368e532f19ddb4c5990855e4886088ad84fd7c4e561c71"},
+ {file = "contourpy-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:38e2e577f0f092b8e6774459317c05a69935a1755ecfb621c0a98f0e3c09c9a5"},
+ {file = "contourpy-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ae90d5a8590e5310c32a7630b4b8618cef7563cebf649011da80874d0aa8f414"},
+ {file = "contourpy-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130230b7e49825c98edf0b428b7aa1125503d91732735ef897786fe5452b1ec2"},
+ {file = "contourpy-1.0.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58569c491e7f7e874f11519ef46737cea1d6eda1b514e4eb5ac7dab6aa864d02"},
+ {file = "contourpy-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54d43960d809c4c12508a60b66cb936e7ed57d51fb5e30b513934a4a23874fae"},
+ {file = "contourpy-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:152fd8f730c31fd67fe0ffebe1df38ab6a669403da93df218801a893645c6ccc"},
+ {file = "contourpy-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9056c5310eb1daa33fc234ef39ebfb8c8e2533f088bbf0bc7350f70a29bde1ac"},
+ {file = "contourpy-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a9d7587d2fdc820cc9177139b56795c39fb8560f540bba9ceea215f1f66e1566"},
+ {file = "contourpy-1.0.7-cp311-cp311-win32.whl", hash = "sha256:4ee3ee247f795a69e53cd91d927146fb16c4e803c7ac86c84104940c7d2cabf0"},
+ {file = "contourpy-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:5caeacc68642e5f19d707471890f037a13007feba8427eb7f2a60811a1fc1350"},
+ {file = "contourpy-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd7dc0e6812b799a34f6d12fcb1000539098c249c8da54f3566c6a6461d0dbad"},
+ {file = "contourpy-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0f9d350b639db6c2c233d92c7f213d94d2e444d8e8fc5ca44c9706cf72193772"},
+ {file = "contourpy-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e96a08b62bb8de960d3a6afbc5ed8421bf1a2d9c85cc4ea73f4bc81b4910500f"},
+ {file = "contourpy-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:031154ed61f7328ad7f97662e48660a150ef84ee1bc8876b6472af88bf5a9b98"},
+ {file = "contourpy-1.0.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e9ebb4425fc1b658e13bace354c48a933b842d53c458f02c86f371cecbedecc"},
+ {file = "contourpy-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efb8f6d08ca7998cf59eaf50c9d60717f29a1a0a09caa46460d33b2924839dbd"},
+ {file = "contourpy-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6c180d89a28787e4b73b07e9b0e2dac7741261dbdca95f2b489c4f8f887dd810"},
+ {file = "contourpy-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b8d587cc39057d0afd4166083d289bdeff221ac6d3ee5046aef2d480dc4b503c"},
+ {file = "contourpy-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:769eef00437edf115e24d87f8926955f00f7704bede656ce605097584f9966dc"},
+ {file = "contourpy-1.0.7-cp38-cp38-win32.whl", hash = "sha256:62398c80ef57589bdbe1eb8537127321c1abcfdf8c5f14f479dbbe27d0322e66"},
+ {file = "contourpy-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:57119b0116e3f408acbdccf9eb6ef19d7fe7baf0d1e9aaa5381489bc1aa56556"},
+ {file = "contourpy-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:30676ca45084ee61e9c3da589042c24a57592e375d4b138bd84d8709893a1ba4"},
+ {file = "contourpy-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e927b3868bd1e12acee7cc8f3747d815b4ab3e445a28d2e5373a7f4a6e76ba1"},
+ {file = "contourpy-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:366a0cf0fc079af5204801786ad7a1c007714ee3909e364dbac1729f5b0849e5"},
+ {file = "contourpy-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ba9bb365446a22411f0673abf6ee1fea3b2cf47b37533b970904880ceb72f3"},
+ {file = "contourpy-1.0.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b0bf0c30d432278793d2141362ac853859e87de0a7dee24a1cea35231f0d50"},
+ {file = "contourpy-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7281244c99fd7c6f27c1c6bfafba878517b0b62925a09b586d88ce750a016d2"},
+ {file = "contourpy-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b6d0f9e1d39dbfb3977f9dd79f156c86eb03e57a7face96f199e02b18e58d32a"},
+ {file = "contourpy-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7f6979d20ee5693a1057ab53e043adffa1e7418d734c1532e2d9e915b08d8ec2"},
+ {file = "contourpy-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5dd34c1ae752515318224cba7fc62b53130c45ac6a1040c8b7c1a223c46e8967"},
+ {file = "contourpy-1.0.7-cp39-cp39-win32.whl", hash = "sha256:c5210e5d5117e9aec8c47d9156d1d3835570dd909a899171b9535cb4a3f32693"},
+ {file = "contourpy-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:60835badb5ed5f4e194a6f21c09283dd6e007664a86101431bf870d9e86266c4"},
+ {file = "contourpy-1.0.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ce41676b3d0dd16dbcfabcc1dc46090aaf4688fd6e819ef343dbda5a57ef0161"},
+ {file = "contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a011cf354107b47c58ea932d13b04d93c6d1d69b8b6dce885e642531f847566"},
+ {file = "contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31a55dccc8426e71817e3fe09b37d6d48ae40aae4ecbc8c7ad59d6893569c436"},
+ {file = "contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69f8ff4db108815addd900a74df665e135dbbd6547a8a69333a68e1f6e368ac2"},
+ {file = "contourpy-1.0.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efe99298ba37e37787f6a2ea868265465410822f7bea163edcc1bd3903354ea9"},
+ {file = "contourpy-1.0.7-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a1e97b86f73715e8670ef45292d7cc033548266f07d54e2183ecb3c87598888f"},
+ {file = "contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc331c13902d0f50845099434cd936d49d7a2ca76cb654b39691974cb1e4812d"},
+ {file = "contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24847601071f740837aefb730e01bd169fbcaa610209779a78db7ebb6e6a7051"},
+ {file = "contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abf298af1e7ad44eeb93501e40eb5a67abbf93b5d90e468d01fc0c4451971afa"},
+ {file = "contourpy-1.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:64757f6460fc55d7e16ed4f1de193f362104285c667c112b50a804d482777edd"},
+ {file = "contourpy-1.0.7.tar.gz", hash = "sha256:d8165a088d31798b59e91117d1f5fc3df8168d8b48c4acc10fc0df0d0bdbcc5e"},
+]
+
+[package.dependencies]
+numpy = ">=1.16"
+
+[package.extras]
+bokeh = ["bokeh", "chromedriver", "selenium"]
+docs = ["furo", "sphinx-copybutton"]
+mypy = ["contourpy[bokeh]", "docutils-stubs", "mypy (==0.991)", "types-Pillow"]
+test = ["Pillow", "matplotlib", "pytest"]
+test-no-images = ["pytest"]
+
+[[package]]
+name = "cycler"
+version = "0.11.0"
+description = "Composable style cycles"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"},
+ {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"},
+]
+
+[[package]]
+name = "cython"
+version = "0.29.34"
+description = "The Cython compiler for writing C extensions for the Python language."
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "Cython-0.29.34-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:742544024ddb74314e2d597accdb747ed76bd126e61fcf49940a5b5be0a8f381"},
+ {file = "Cython-0.29.34-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03daae07f8cbf797506446adae512c3dd86e7f27a62a541fa1ee254baf43e32c"},
+ {file = "Cython-0.29.34-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5a8de3e793a576e40ca9b4f5518610cd416273c7dc5e254115656b6e4ec70663"},
+ {file = "Cython-0.29.34-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:60969d38e6a456a67e7ef8ae20668eff54e32ba439d4068ccf2854a44275a30f"},
+ {file = "Cython-0.29.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:21b88200620d80cfe193d199b259cdad2b9af56f916f0f7f474b5a3631ca0caa"},
+ {file = "Cython-0.29.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:308c8f1e58bf5e6e8a1c4dcf8abbd2d13d0f9b1e582f4d9ae8b89857342d8bb5"},
+ {file = "Cython-0.29.34-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:d8f822fb6ecd5d88c42136561f82960612421154fc5bf23c57103a367bb91356"},
+ {file = "Cython-0.29.34-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56866323f1660cecb4d5ff3a1fba92a56b91b7cfae0a8253777aa4bdb3bdf9a8"},
+ {file = "Cython-0.29.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e971db8aeb12e7c0697cefafe65eefcc33ff1224ae3d8c7f83346cbc42c6c270"},
+ {file = "Cython-0.29.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4401270b0dc464c23671e2e9d52a60985f988318febaf51b047190e855bbe7d"},
+ {file = "Cython-0.29.34-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:dce0a36d163c05ae8b21200059511217d79b47baf2b7b0f926e8367bd7a3cc24"},
+ {file = "Cython-0.29.34-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dbd79221869ee9a6ccc4953b2c8838bb6ae08ab4d50ea4b60d7894f03739417b"},
+ {file = "Cython-0.29.34-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0f4229df10bc4545ebbeaaf96ebb706011d8b333e54ed202beb03f2bee0a50e"},
+ {file = "Cython-0.29.34-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fd1ea21f1cebf33ae288caa0f3e9b5563a709f4df8925d53bad99be693fc0d9b"},
+ {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d7ef5f68f4c5baa93349ea54a352f8716d18bee9a37f3e93eff38a5d4e9b7262"},
+ {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:459994d1de0f99bb18fad9f2325f760c4b392b1324aef37bcc1cd94922dfce41"},
+ {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:1d6c809e2f9ce5950bbc52a1d2352ef3d4fc56186b64cb0d50c8c5a3c1d17661"},
+ {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f674ceb5f722d364395f180fbac273072fc1a266aab924acc9cfd5afc645aae1"},
+ {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9489de5b2044dcdfd9d6ca8242a02d560137b3c41b1f5ae1c4f6707d66d6e44d"},
+ {file = "Cython-0.29.34-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:5c121dc185040f4333bfded68963b4529698e1b6d994da56be32c97a90c896b6"},
+ {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b6149f7cc5b31bccb158c5b968e5a8d374fdc629792e7b928a9b66e08b03fca5"},
+ {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0ab3cbf3d62b0354631a45dc93cfcdf79098663b1c65a6033af4a452b52217a7"},
+ {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:4a2723447d1334484681d5aede34184f2da66317891f94b80e693a2f96a8f1a7"},
+ {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e40cf86aadc29ecd1cb6de67b0d9488705865deea4fc185c7ad56d7a6fc78703"},
+ {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8c3cd8bb8e880a3346f5685601004d96e0a2221e73edcaeea57ea848618b4ac6"},
+ {file = "Cython-0.29.34-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9032cd650b0cb1d2c2ef2623f5714c14d14c28d7647d589c3eeed0baf7428e"},
+ {file = "Cython-0.29.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bdb3285660e3068438791ace7dd7b1efd6b442a10b5c8d7a4f0c9d184d08c8ed"},
+ {file = "Cython-0.29.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a8ad755f9364e720f10a36734a1c7a5ced5c679446718b589259261438a517c9"},
+ {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:7595d29eaee95633dd8060f50f0e54b27472d01587659557ebcfe39da3ea946b"},
+ {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e6ef7879668214d80ea3914c17e7d4e1ebf4242e0dd4dabe95ca5ccbe75589a5"},
+ {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ccb223b5f0fd95d8d27561efc0c14502c0945f1a32274835831efa5d5baddfc1"},
+ {file = "Cython-0.29.34-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:11b1b278b8edef215caaa5250ad65a10023bfa0b5a93c776552248fc6f60098d"},
+ {file = "Cython-0.29.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5718319a01489688fdd22ddebb8e2fcbbd60be5f30de4336ea7063c3ae29fbe5"},
+ {file = "Cython-0.29.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:cfb2302ef617d647ee590a4c0a00ba3c2da05f301dcefe7721125565d2e51351"},
+ {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:67b850cf46b861bc27226d31e1d87c0e69869a02f8d3cc5d5bef549764029879"},
+ {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0963266dad685812c1dbb758fcd4de78290e3adc7db271c8664dcde27380b13e"},
+ {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7879992487d9060a61393eeefe00d299210256928dce44d887b6be313d342bac"},
+ {file = "Cython-0.29.34-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:44733366f1604b0c327613b6918469284878d2f5084297d10d26072fc6948d51"},
+ {file = "Cython-0.29.34-py2.py3-none-any.whl", hash = "sha256:be4f6b7be75a201c290c8611c0978549c60353890204573078e865423dbe3c83"},
+ {file = "Cython-0.29.34.tar.gz", hash = "sha256:1909688f5d7b521a60c396d20bba9e47a1b2d2784bfb085401e1e1e7d29a29a8"},
+]
+
+[[package]]
+name = "decorator"
+version = "5.1.1"
+description = "Decorators for Humans"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
+ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
+]
+
+[[package]]
+name = "entrypoints"
+version = "0.4"
+description = "Discover and load entry points from installed packages."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"},
+ {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"},
+]
+
+[[package]]
+name = "fairseq"
+version = "0.12.2"
+description = "Facebook AI Research Sequence-to-Sequence Toolkit"
+optional = false
+python-versions = "*"
+files = [
+ {file = "fairseq-0.12.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fe65b07c5121b7cda0c7a17166994a6b0059259ce37881b6daa117b8c209b662"},
+ {file = "fairseq-0.12.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0543905012e39f00bd8c3f3781d9f49e76ab309801eb2eb7de250f5984df0de3"},
+ {file = "fairseq-0.12.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4877d65346797fc580a3a7e6e2364d2331a0026ef099c22eb8311441e49c2c6"},
+ {file = "fairseq-0.12.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:26454f334ca705c67f898846dff34e14c148fcdaf53b4f52d64209773b509347"},
+ {file = "fairseq-0.12.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3b8c8b6dc368d2fd23a06ff613a2af05959eee275fe90846d7cffef4a43c522a"},
+ {file = "fairseq-0.12.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:08fa308c760f995cdc13d9c385e2b9d923a78b48275d8b4d78f3a854c71a8f29"},
+ {file = "fairseq-0.12.2.tar.gz", hash = "sha256:34f1b18426bf3844714534162f065ab733e049597476daa35fffb4d06a92b524"},
+]
+
+[package.dependencies]
+bitarray = "*"
+cffi = "*"
+cython = "*"
+hydra-core = ">=1.0.7,<1.1"
+numpy = {version = "*", markers = "python_version >= \"3.7\""}
+omegaconf = "<2.1"
+regex = "*"
+sacrebleu = ">=1.4.12"
+torch = "*"
+torchaudio = ">=0.8.0"
+tqdm = "*"
+
+[[package]]
+name = "faiss-cpu"
+version = "1.7.3"
+description = "A library for efficient similarity search and clustering of dense vectors."
+optional = false
+python-versions = "*"
+files = [
+ {file = "faiss-cpu-1.7.3.tar.gz", hash = "sha256:cb71fe3f2934732d157d9d8cfb6ed2dd4020a0065571c84842ff6a3f0beab310"},
+ {file = "faiss_cpu-1.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:343f025e0846239d987d0c719772387ad685b74e5ef62b2e5616cabef9062729"},
+ {file = "faiss_cpu-1.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8b7b1cf693d7c24b5a633ff024717bd715fec501af4854357da0805b4899bcec"},
+ {file = "faiss_cpu-1.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c37e5fc0a266839844798a53dd42dd6afbee0c5905611f3f278297053fccbd7"},
+ {file = "faiss_cpu-1.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0628f7b0c6263ef4431995bb4f5f39833f999e96e6663935cbf0a1f2243dc4ac"},
+ {file = "faiss_cpu-1.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:e22d1887c617156a673665c913ee82a30bfc1a3bc939ba8500b61328bce5a625"},
+ {file = "faiss_cpu-1.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6d411449a5f3c3abfcafadaac3190ab1ab206023fc9110da86649506dcbe8a27"},
+ {file = "faiss_cpu-1.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a10ea8622908f9f9ca4003e66da809dfad4af5c7d9fb7f582722d703bbc6c8bd"},
+ {file = "faiss_cpu-1.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c5ced43ae058a62f63b12194ec9aa4c34066b0ea813ecbd936c65b7d52848c8"},
+ {file = "faiss_cpu-1.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3df6371012248dea8e9509949e2d2c6d73dea7c1bdaa4ba4563eb1c3cd8021a6"},
+ {file = "faiss_cpu-1.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:8b6ff7854c3f46104718c6b34e81cd48c156d970dd87703c5122ca90217bb8dc"},
+ {file = "faiss_cpu-1.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ab6314a8fbcce11dc3ecb6f48dda8c4ec274ed11c1f336f599f480bf0561442c"},
+ {file = "faiss_cpu-1.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:877c0bbf4c4a1806d88e091aba4c91ff3fa35c3ede5663b7fafc5b39247a369e"},
+ {file = "faiss_cpu-1.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f199be10d30ecc6ed65350931006eca01b7bb8faa27d63069318eea0f6a0c1"},
+ {file = "faiss_cpu-1.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1ca2b7cdbfdcc6a2e8fa75a09594916b50ec8260913ca48334dc3ce797179b5f"},
+ {file = "faiss_cpu-1.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7b3f91856c19cfb8464178bab7e8ea94a391f6947b556be6754f9fc10b3c25fb"},
+ {file = "faiss_cpu-1.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a238a0ef4d36c614d6f60e1ea308288b3920091638a3687f708de6071d007c1"},
+ {file = "faiss_cpu-1.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af53bee502c629eaaaf8b5ec648484a726be0fd2768ad4ef2bd4b829384b2682"},
+ {file = "faiss_cpu-1.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:441d1c305595d925138f2cde63dabe8c10ee05fc8ad66bf750e278a7e8c409bd"},
+ {file = "faiss_cpu-1.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:2766cc14b9004c1aae3b3943e693c3a9566eb1a25168b681981f9048276fe1e7"},
+ {file = "faiss_cpu-1.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20ef191bb6164c8e794b11d20427568a75d15980b6d66732071e9aa57ea06e2d"},
+ {file = "faiss_cpu-1.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c57c293c4682066955626c2a2956be9a3b92594f69ed1a33abd72260a6911b69"},
+ {file = "faiss_cpu-1.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd128170446ff3c3e28d89e813d32cd04f17fa3025794778a01a0d81524275dc"},
+ {file = "faiss_cpu-1.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a14d832b5361ce9af21977eb1dcdebe23b9edcc12aad40316df7ca1bd86bc6b5"},
+ {file = "faiss_cpu-1.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:52df8895c5e59d1c9eda368a63790381a6f7fceddb22bed08f9c90a706d8a148"},
+]
+
+[[package]]
+name = "fastapi"
+version = "0.95.2"
+description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "fastapi-0.95.2-py3-none-any.whl", hash = "sha256:d374dbc4ef2ad9b803899bd3360d34c534adc574546e25314ab72c0c4411749f"},
+ {file = "fastapi-0.95.2.tar.gz", hash = "sha256:4d9d3e8c71c73f11874bcf5e33626258d143252e329a01002f767306c64fb982"},
+]
+
+[package.dependencies]
+pydantic = ">=1.6.2,<1.7 || >1.7,<1.7.1 || >1.7.1,<1.7.2 || >1.7.2,<1.7.3 || >1.7.3,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0"
+starlette = ">=0.27.0,<0.28.0"
+
+[package.extras]
+all = ["email-validator (>=1.1.1)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
+dev = ["pre-commit (>=2.17.0,<3.0.0)", "ruff (==0.0.138)", "uvicorn[standard] (>=0.12.0,<0.21.0)"]
+doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-markdownextradata-plugin (>=0.1.7,<0.3.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pyyaml (>=5.3.1,<7.0.0)", "typer-cli (>=0.0.13,<0.0.14)", "typer[all] (>=0.6.1,<0.8.0)"]
+test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==23.1.0)", "coverage[toml] (>=6.5.0,<8.0)", "databases[sqlite] (>=0.3.2,<0.7.0)", "email-validator (>=1.1.1,<2.0.0)", "flask (>=1.1.2,<3.0.0)", "httpx (>=0.23.0,<0.24.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.982)", "orjson (>=3.2.1,<4.0.0)", "passlib[bcrypt] (>=1.7.2,<2.0.0)", "peewee (>=3.13.3,<4.0.0)", "pytest (>=7.1.3,<8.0.0)", "python-jose[cryptography] (>=3.3.0,<4.0.0)", "python-multipart (>=0.0.5,<0.0.7)", "pyyaml (>=5.3.1,<7.0.0)", "ruff (==0.0.138)", "sqlalchemy (>=1.3.18,<1.4.43)", "types-orjson (==3.6.2)", "types-ujson (==5.7.0.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,<6.0.0)"]
+
+[[package]]
+name = "ffmpeg-python"
+version = "0.2.0"
+description = "Python bindings for FFmpeg - with complex filtering support"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ffmpeg-python-0.2.0.tar.gz", hash = "sha256:65225db34627c578ef0e11c8b1eb528bb35e024752f6f10b78c011f6f64c4127"},
+ {file = "ffmpeg_python-0.2.0-py3-none-any.whl", hash = "sha256:ac441a0404e053f8b6a1113a77c0f452f1cfc62f6344a769475ffdc0f56c23c5"},
+]
+
+[package.dependencies]
+future = "*"
+
+[package.extras]
+dev = ["Sphinx (==2.1.0)", "future (==0.17.1)", "numpy (==1.16.4)", "pytest (==4.6.1)", "pytest-mock (==1.10.4)", "tox (==3.12.1)"]
+
+[[package]]
+name = "ffmpy"
+version = "0.3.0"
+description = "A simple Python wrapper for ffmpeg"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ffmpy-0.3.0.tar.gz", hash = "sha256:757591581eee25b4a50ac9ffb9b58035a2794533db47e0512f53fb2d7b6f9adc"},
+]
+
+[[package]]
+name = "filelock"
+version = "3.10.7"
+description = "A platform independent file lock."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "filelock-3.10.7-py3-none-any.whl", hash = "sha256:bde48477b15fde2c7e5a0713cbe72721cb5a5ad32ee0b8f419907960b9d75536"},
+ {file = "filelock-3.10.7.tar.gz", hash = "sha256:892be14aa8efc01673b5ed6589dbccb95f9a8596f0507e232626155495c18105"},
+]
+
+[package.extras]
+docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.2.2)", "diff-cover (>=7.5)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"]
+
+[[package]]
+name = "fonttools"
+version = "4.39.3"
+description = "Tools to manipulate font files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fonttools-4.39.3-py3-none-any.whl", hash = "sha256:64c0c05c337f826183637570ac5ab49ee220eec66cf50248e8df527edfa95aeb"},
+ {file = "fonttools-4.39.3.zip", hash = "sha256:9234b9f57b74e31b192c3fc32ef1a40750a8fbc1cd9837a7b7bfc4ca4a5c51d7"},
+]
+
+[package.extras]
+all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"]
+graphite = ["lz4 (>=1.7.4.2)"]
+interpolatable = ["munkres", "scipy"]
+lxml = ["lxml (>=4.0,<5)"]
+pathops = ["skia-pathops (>=0.5.0)"]
+plot = ["matplotlib"]
+repacker = ["uharfbuzz (>=0.23.0)"]
+symfont = ["sympy"]
+type1 = ["xattr"]
+ufo = ["fs (>=2.2.0,<3)"]
+unicode = ["unicodedata2 (>=15.0.0)"]
+woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
+
+[[package]]
+name = "frozenlist"
+version = "1.3.3"
+description = "A list-like structure which implements collections.abc.MutableSequence"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"},
+ {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"},
+ {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"},
+ {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"},
+ {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"},
+ {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"},
+ {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"},
+ {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"},
+ {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"},
+ {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"},
+ {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"},
+ {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"},
+ {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"},
+ {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"},
+ {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"},
+ {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"},
+ {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"},
+ {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"},
+ {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"},
+ {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"},
+ {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"},
+]
+
+[[package]]
+name = "fsspec"
+version = "2023.3.0"
+description = "File-system specification"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fsspec-2023.3.0-py3-none-any.whl", hash = "sha256:bf57215e19dbfa4fe7edae53040cc1deef825e3b1605cca9a8d2c2fadd2328a0"},
+ {file = "fsspec-2023.3.0.tar.gz", hash = "sha256:24e635549a590d74c6c18274ddd3ffab4753341753e923408b1904eaabafe04d"},
+]
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+arrow = ["pyarrow (>=1)"]
+dask = ["dask", "distributed"]
+dropbox = ["dropbox", "dropboxdrivefs", "requests"]
+fuse = ["fusepy"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+gui = ["panel"]
+hdfs = ["pyarrow (>=1)"]
+http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"]
+libarchive = ["libarchive-c"]
+oci = ["ocifs"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+tqdm = ["tqdm"]
+
+[[package]]
+name = "functorch"
+version = "2.0.0"
+description = "JAX-like composable function transforms for PyTorch"
+optional = false
+python-versions = "*"
+files = [
+ {file = "functorch-2.0.0-py2.py3-none-any.whl", hash = "sha256:ca21ace6b9048e2ec6d132fa0fd18c776eb165ca1c91ef7e3584fdc668eaa4ea"},
+]
+
+[package.dependencies]
+torch = ">=2.0,<2.1"
+
+[package.extras]
+aot = ["networkx"]
+
+[[package]]
+name = "future"
+version = "0.18.3"
+description = "Clean single-source support for Python 3 and 2"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "future-0.18.3.tar.gz", hash = "sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307"},
+]
+
+[[package]]
+name = "google-auth"
+version = "2.17.1"
+description = "Google Authentication Library"
+optional = false
+python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*"
+files = [
+ {file = "google-auth-2.17.1.tar.gz", hash = "sha256:8f379b46bad381ad2a0b989dfb0c13ad28d3c2a79f27348213f8946a1d15d55a"},
+ {file = "google_auth-2.17.1-py2.py3-none-any.whl", hash = "sha256:357ff22a75b4c0f6093470f21816a825d2adee398177569824e37b6c10069e19"},
+]
+
+[package.dependencies]
+cachetools = ">=2.0.0,<6.0"
+pyasn1-modules = ">=0.2.1"
+rsa = {version = ">=3.1.4,<5", markers = "python_version >= \"3.6\""}
+six = ">=1.9.0"
+
+[package.extras]
+aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "requests (>=2.20.0,<3.0.0dev)"]
+enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"]
+pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
+reauth = ["pyu2f (>=0.1.5)"]
+requests = ["requests (>=2.20.0,<3.0.0dev)"]
+
+[[package]]
+name = "google-auth-oauthlib"
+version = "1.0.0"
+description = "Google Authentication Library"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "google-auth-oauthlib-1.0.0.tar.gz", hash = "sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5"},
+ {file = "google_auth_oauthlib-1.0.0-py2.py3-none-any.whl", hash = "sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb"},
+]
+
+[package.dependencies]
+google-auth = ">=2.15.0"
+requests-oauthlib = ">=0.7.0"
+
+[package.extras]
+tool = ["click (>=6.0.0)"]
+
+[[package]]
+name = "gradio"
+version = "3.34.0"
+description = "Python library for easily interacting with trained machine learning models"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "gradio-3.34.0-py3-none-any.whl", hash = "sha256:1cd8b25b598d983561d64f0a039af819382f1376c676aa9f84972c46b6875741"},
+ {file = "gradio-3.34.0.tar.gz", hash = "sha256:fd7fa7257ffc749f9dc7c297eba554eaa1e5acd1a5f9c973250b2080932d6a41"},
+]
+
+[package.dependencies]
+aiofiles = "*"
+aiohttp = "*"
+altair = ">=4.2.0"
+fastapi = "*"
+ffmpy = "*"
+gradio-client = ">=0.2.6"
+httpx = "*"
+huggingface-hub = ">=0.14.0"
+jinja2 = "*"
+markdown-it-py = {version = ">=2.0.0", extras = ["linkify"]}
+markupsafe = "*"
+matplotlib = "*"
+mdit-py-plugins = "<=0.3.3"
+numpy = "*"
+orjson = "*"
+pandas = "*"
+pillow = "*"
+pydantic = "*"
+pydub = "*"
+pygments = ">=2.12.0"
+python-multipart = "*"
+pyyaml = "*"
+requests = "*"
+semantic-version = "*"
+typing-extensions = "*"
+uvicorn = ">=0.14.0"
+websockets = ">=10.0"
+
+[[package]]
+name = "gradio-client"
+version = "0.2.7"
+description = "Python library for easily interacting with trained machine learning models"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "gradio_client-0.2.7-py3-none-any.whl", hash = "sha256:4a7ec6bb1341c626051f1ed24d50cb960ff1a4cd1a5db031dd4caaf1ee7d2d0a"},
+ {file = "gradio_client-0.2.7.tar.gz", hash = "sha256:c83008df8a1dd3f81a290c0a24c03d0ab70317741991b60f713620ed39ad8f12"},
+]
+
+[package.dependencies]
+fsspec = "*"
+httpx = "*"
+huggingface-hub = ">=0.13.0"
+packaging = "*"
+requests = "*"
+typing-extensions = "*"
+websockets = "*"
+
+[[package]]
+name = "grpcio"
+version = "1.53.0"
+description = "HTTP/2-based RPC framework"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "grpcio-1.53.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:752d2949b40e12e6ad3ed8cc552a65b54d226504f6b1fb67cab2ccee502cc06f"},
+ {file = "grpcio-1.53.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:8a48fd3a7222be226bb86b7b413ad248f17f3101a524018cdc4562eeae1eb2a3"},
+ {file = "grpcio-1.53.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:f3e837d29f0e1b9d6e7b29d569e2e9b0da61889e41879832ea15569c251c303a"},
+ {file = "grpcio-1.53.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aef7d30242409c3aa5839b501e877e453a2c8d3759ca8230dd5a21cda029f046"},
+ {file = "grpcio-1.53.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6f90698b5d1c5dd7b3236cd1fa959d7b80e17923f918d5be020b65f1c78b173"},
+ {file = "grpcio-1.53.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a96c3c7f564b263c5d7c0e49a337166c8611e89c4c919f66dba7b9a84abad137"},
+ {file = "grpcio-1.53.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ee81349411648d1abc94095c68cd25e3c2812e4e0367f9a9355be1e804a5135c"},
+ {file = "grpcio-1.53.0-cp310-cp310-win32.whl", hash = "sha256:fdc6191587de410a184550d4143e2b24a14df495c86ca15e59508710681690ac"},
+ {file = "grpcio-1.53.0-cp310-cp310-win_amd64.whl", hash = "sha256:658ffe1e39171be00490db5bd3b966f79634ac4215a1eb9a85c6cd6783bf7f6e"},
+ {file = "grpcio-1.53.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:1b172e6d497191940c4b8d75b53de82dc252e15b61de2951d577ec5b43316b29"},
+ {file = "grpcio-1.53.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:82434ba3a5935e47908bc861ce1ebc43c2edfc1001d235d6e31e5d3ed55815f7"},
+ {file = "grpcio-1.53.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:1c734a2d4843e4e14ececf5600c3c4750990ec319e1299db7e4f0d02c25c1467"},
+ {file = "grpcio-1.53.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a2ead3de3b2d53119d473aa2f224030257ef33af1e4ddabd4afee1dea5f04c"},
+ {file = "grpcio-1.53.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a34d6e905f071f9b945cabbcc776e2055de1fdb59cd13683d9aa0a8f265b5bf9"},
+ {file = "grpcio-1.53.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eaf8e3b97caaf9415227a3c6ca5aa8d800fecadd526538d2bf8f11af783f1550"},
+ {file = "grpcio-1.53.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:da95778d37be8e4e9afca771a83424f892296f5dfb2a100eda2571a1d8bbc0dc"},
+ {file = "grpcio-1.53.0-cp311-cp311-win32.whl", hash = "sha256:e4f513d63df6336fd84b74b701f17d1bb3b64e9d78a6ed5b5e8a198bbbe8bbfa"},
+ {file = "grpcio-1.53.0-cp311-cp311-win_amd64.whl", hash = "sha256:ddb2511fbbb440ed9e5c9a4b9b870f2ed649b7715859fd6f2ebc585ee85c0364"},
+ {file = "grpcio-1.53.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:2a912397eb8d23c177d6d64e3c8bc46b8a1c7680b090d9f13a640b104aaec77c"},
+ {file = "grpcio-1.53.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:55930c56b8f5b347d6c8c609cc341949a97e176c90f5cbb01d148d778f3bbd23"},
+ {file = "grpcio-1.53.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:6601d812105583948ab9c6e403a7e2dba6e387cc678c010e74f2d6d589d1d1b3"},
+ {file = "grpcio-1.53.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c705e0c21acb0e8478a00e7e773ad0ecdb34bd0e4adc282d3d2f51ba3961aac7"},
+ {file = "grpcio-1.53.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba074af9ca268ad7b05d3fc2b920b5fb3c083da94ab63637aaf67f4f71ecb755"},
+ {file = "grpcio-1.53.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:14817de09317dd7d3fbc8272864288320739973ef0f4b56bf2c0032349da8cdf"},
+ {file = "grpcio-1.53.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c7ad9fbedb93f331c2e9054e202e95cf825b885811f1bcbbdfdc301e451442db"},
+ {file = "grpcio-1.53.0-cp37-cp37m-win_amd64.whl", hash = "sha256:dad5b302a4c21c604d88a5d441973f320134e6ff6a84ecef9c1139e5ffd466f6"},
+ {file = "grpcio-1.53.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fa8eaac75d3107e3f5465f2c9e3bbd13db21790c6e45b7de1756eba16b050aca"},
+ {file = "grpcio-1.53.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:104a2210edd3776c38448b4f76c2f16e527adafbde171fc72a8a32976c20abc7"},
+ {file = "grpcio-1.53.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:dbc1ba968639c1d23476f75c356e549e7bbf2d8d6688717dcab5290e88e8482b"},
+ {file = "grpcio-1.53.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95952d3fe795b06af29bb8ec7bbf3342cdd867fc17b77cc25e6733d23fa6c519"},
+ {file = "grpcio-1.53.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f144a790f14c51b8a8e591eb5af40507ffee45ea6b818c2482f0457fec2e1a2e"},
+ {file = "grpcio-1.53.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0698c094688a2dd4c7c2f2c0e3e142cac439a64d1cef6904c97f6cde38ba422f"},
+ {file = "grpcio-1.53.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6b6d60b0958be711bab047e9f4df5dbbc40367955f8651232bfdcdd21450b9ab"},
+ {file = "grpcio-1.53.0-cp38-cp38-win32.whl", hash = "sha256:1948539ce78805d4e6256ab0e048ec793956d54787dc9d6777df71c1d19c7f81"},
+ {file = "grpcio-1.53.0-cp38-cp38-win_amd64.whl", hash = "sha256:df9ba1183b3f649210788cf80c239041dddcb375d6142d8bccafcfdf549522cd"},
+ {file = "grpcio-1.53.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:19caa5b7282a89b799e63776ff602bb39604f7ca98db6df27e2de06756ae86c3"},
+ {file = "grpcio-1.53.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:b5bd026ac928c96cc23149e6ef79183125542062eb6d1ccec34c0a37e02255e7"},
+ {file = "grpcio-1.53.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:7dc8584ca6c015ad82e186e82f4c0fe977394588f66b8ecfc4ec873285314619"},
+ {file = "grpcio-1.53.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2eddaae8af625e45b5c8500dcca1043264d751a6872cde2eda5022df8a336959"},
+ {file = "grpcio-1.53.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5fb6f3d7824696c1c9f2ad36ddb080ba5a86f2d929ef712d511b4d9972d3d27"},
+ {file = "grpcio-1.53.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8270d1dc2c98ab57e6dbf36fa187db8df4c036f04a398e5d5e25b4e01a766d70"},
+ {file = "grpcio-1.53.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:976a7f24eb213e8429cab78d5e120500dfcdeb01041f1f5a77b17b9101902615"},
+ {file = "grpcio-1.53.0-cp39-cp39-win32.whl", hash = "sha256:9c84a481451e7174f3a764a44150f93b041ab51045aa33d7b5b68b6979114e48"},
+ {file = "grpcio-1.53.0-cp39-cp39-win_amd64.whl", hash = "sha256:6beb84f83360ff29a3654f43f251ec11b809dcb5524b698d711550243debd289"},
+ {file = "grpcio-1.53.0.tar.gz", hash = "sha256:a4952899b4931a6ba12951f9a141ef3e74ff8a6ec9aa2dc602afa40f63595e33"},
+]
+
+[package.extras]
+protobuf = ["grpcio-tools (>=1.53.0)"]
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
+
+[[package]]
+name = "httpcore"
+version = "0.16.3"
+description = "A minimal low-level HTTP client."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "httpcore-0.16.3-py3-none-any.whl", hash = "sha256:da1fb708784a938aa084bde4feb8317056c55037247c787bd7e19eb2c2949dc0"},
+ {file = "httpcore-0.16.3.tar.gz", hash = "sha256:c5d6f04e2fc530f39e0c077e6a30caa53f1451096120f1f38b954afd0b17c0cb"},
+]
+
+[package.dependencies]
+anyio = ">=3.0,<5.0"
+certifi = "*"
+h11 = ">=0.13,<0.15"
+sniffio = "==1.*"
+
+[package.extras]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "httpx"
+version = "0.23.3"
+description = "The next generation HTTP client."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "httpx-0.23.3-py3-none-any.whl", hash = "sha256:a211fcce9b1254ea24f0cd6af9869b3d29aba40154e947d2a07bb499b3e310d6"},
+ {file = "httpx-0.23.3.tar.gz", hash = "sha256:9818458eb565bb54898ccb9b8b251a28785dd4a55afbc23d0eb410754fe7d0f9"},
+]
+
+[package.dependencies]
+certifi = "*"
+httpcore = ">=0.15.0,<0.17.0"
+rfc3986 = {version = ">=1.3,<2", extras = ["idna2008"]}
+sniffio = "*"
+
+[package.extras]
+brotli = ["brotli", "brotlicffi"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<13)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "huggingface-hub"
+version = "0.15.1"
+description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
+]
+
+[package.dependencies]
+filelock = "*"
+fsspec = "*"
+packaging = ">=20.9"
+pyyaml = ">=5.1"
+requests = "*"
+tqdm = ">=4.42.1"
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
+cli = ["InquirerPy (==0.3.4)"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
+fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
+quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"]
+tensorflow = ["graphviz", "pydot", "tensorflow"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
+torch = ["torch"]
+typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+
+[[package]]
+name = "hydra-core"
+version = "1.0.7"
+description = "A framework for elegantly configuring complex applications"
+optional = false
+python-versions = "*"
+files = [
+ {file = "hydra-core-1.0.7.tar.gz", hash = "sha256:58cc3f7531995b6d8de162ca21f936e17bdaebd4d1e8614d63c32e17c2e41e45"},
+ {file = "hydra_core-1.0.7-py3-none-any.whl", hash = "sha256:e800c6deb8309395508094851fa93bc13408f2285261eb97e626d37193b58a9f"},
+]
+
+[package.dependencies]
+antlr4-python3-runtime = "4.8"
+importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
+omegaconf = ">=2.0.5,<2.1"
+
+[[package]]
+name = "idna"
+version = "3.4"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+]
+
+[[package]]
+name = "importlib-metadata"
+version = "6.1.0"
+description = "Read metadata from Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"},
+ {file = "importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"},
+]
+
+[package.dependencies]
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+perf = ["ipython"]
+testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
+
+[[package]]
+name = "importlib-resources"
+version = "5.12.0"
+description = "Read resources from Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"},
+ {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"},
+]
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[[package]]
+name = "jinja2"
+version = "3.1.2"
+description = "A very fast and expressive template engine."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "joblib"
+version = "1.2.0"
+description = "Lightweight pipelining with Python functions"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "joblib-1.2.0-py3-none-any.whl", hash = "sha256:091138ed78f800342968c523bdde947e7a305b8594b910a0fea2ab83c3c6d385"},
+ {file = "joblib-1.2.0.tar.gz", hash = "sha256:e1cee4a79e4af22881164f218d4311f60074197fb707e082e803b61f6d137018"},
+]
+
+[[package]]
+name = "json5"
+version = "0.9.11"
+description = "A Python implementation of the JSON5 data format."
+optional = false
+python-versions = "*"
+files = [
+ {file = "json5-0.9.11-py2.py3-none-any.whl", hash = "sha256:1aa54b80b5e507dfe31d12b7743a642e2ffa6f70bf73b8e3d7d1d5fba83d99bd"},
+ {file = "json5-0.9.11.tar.gz", hash = "sha256:4f1e196acc55b83985a51318489f345963c7ba84aa37607e49073066c562e99b"},
+]
+
+[package.extras]
+dev = ["hypothesis"]
+
+[[package]]
+name = "jsonschema"
+version = "4.17.3"
+description = "An implementation of JSON Schema validation for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
+ {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
+]
+
+[package.dependencies]
+attrs = ">=17.4.0"
+importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
+pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
+
+[package.extras]
+format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+
+[[package]]
+name = "kiwisolver"
+version = "1.4.4"
+description = "A fast implementation of the Cassowary constraint solver"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-win32.whl", hash = "sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-win32.whl", hash = "sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-win32.whl", hash = "sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-win32.whl", hash = "sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-win32.whl", hash = "sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b"},
+ {file = "kiwisolver-1.4.4.tar.gz", hash = "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955"},
+]
+
+[[package]]
+name = "librosa"
+version = "0.9.2"
+description = "Python module for audio and music processing"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "librosa-0.9.2-py3-none-any.whl", hash = "sha256:322a813e6d37af9fbc369e6a637dcf5fdc5c6925ce806a0d27c68de61a81350f"},
+ {file = "librosa-0.9.2.tar.gz", hash = "sha256:5b576b5efdce428e90bc988bdd5a953d12a727e5f931f30d74c53b63abbe3c89"},
+]
+
+[package.dependencies]
+audioread = ">=2.1.9"
+decorator = ">=4.0.10"
+joblib = ">=0.14"
+numba = ">=0.45.1"
+numpy = ">=1.17.0"
+packaging = ">=20.0"
+pooch = ">=1.0"
+resampy = ">=0.2.2"
+scikit-learn = ">=0.19.1"
+scipy = ">=1.2.0"
+soundfile = ">=0.10.2"
+
+[package.extras]
+display = ["matplotlib (>=3.3.0)"]
+docs = ["ipython (>=7.0)", "matplotlib (>=3.3.0)", "mir-eval (>=0.5)", "numba (<0.50)", "numpydoc", "presets", "sphinx (!=1.3.1)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (==1.*)", "sphinxcontrib-svg2pdfconverter"]
+tests = ["contextlib2", "matplotlib (>=3.3.0)", "pytest", "pytest-cov", "pytest-mpl", "samplerate", "soxr"]
+
+[[package]]
+name = "linkify-it-py"
+version = "2.0.0"
+description = "Links recognition library with FULL unicode support."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "linkify-it-py-2.0.0.tar.gz", hash = "sha256:476464480906bed8b2fa3813bf55566282e55214ad7e41b7d1c2b564666caf2f"},
+ {file = "linkify_it_py-2.0.0-py3-none-any.whl", hash = "sha256:1bff43823e24e507a099e328fc54696124423dd6320c75a9da45b4b754b748ad"},
+]
+
+[package.dependencies]
+uc-micro-py = "*"
+
+[package.extras]
+benchmark = ["pytest", "pytest-benchmark"]
+dev = ["black", "flake8", "isort", "pre-commit"]
+doc = ["myst-parser", "sphinx", "sphinx-book-theme"]
+test = ["coverage", "pytest", "pytest-cov"]
+
+[[package]]
+name = "lit"
+version = "16.0.0"
+description = "A Software Testing Tool"
+optional = false
+python-versions = "*"
+files = [
+ {file = "lit-16.0.0.tar.gz", hash = "sha256:3c4ac372122a1de4a88deb277b956f91b7209420a0bef683b1ab2d2b16dabe11"},
+]
+
+[[package]]
+name = "llvmlite"
+version = "0.39.0"
+description = "lightweight wrapper around basic LLVM functionality"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "llvmlite-0.39.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:589f08a1b1920e6004735819ce9aafdd85d030d4a231c1e7adaca9360724b1ed"},
+ {file = "llvmlite-0.39.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:44a9a5cbe76db8ba01a5f6fa21649d91aa8a2634cc6f3a60291797e42e67d79e"},
+ {file = "llvmlite-0.39.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74d89f2ec4734d3e200fb90ea0b3ca5e9be40f3b3e50eb368ca9002ed5b3e4f8"},
+ {file = "llvmlite-0.39.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8b4cb4f433b48792f02ec4ab619b86b145689302a3088a3f3853f50df6c2559d"},
+ {file = "llvmlite-0.39.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35db4122182cc5112912a3ec94a3c18eab9a990bd588bfda8445087c1b748563"},
+ {file = "llvmlite-0.39.0-cp310-cp310-win32.whl", hash = "sha256:c00bf7a8dc56b4b3618c65b67e75046410f751512871d9e23919cf1feb1007b2"},
+ {file = "llvmlite-0.39.0-cp310-cp310-win_amd64.whl", hash = "sha256:72bd2e5db9790344ec39cef77098486635853829ecb0e66e6fa516488ff6dd9e"},
+ {file = "llvmlite-0.39.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:53c7c608baffdcdc2213926f4e3600036d4048aed08d6209b9f76a5439e529d6"},
+ {file = "llvmlite-0.39.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3bbd23e42593f85a842614d8ddb2b2943630e4c4c8418ea0d8cf1dce9f2fa7a"},
+ {file = "llvmlite-0.39.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d733eb9c02bb8b01373228a1339901b1e50be4581105239c6052b9573ddb9298"},
+ {file = "llvmlite-0.39.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f575fcb9bebe5bcbe20373c56ad3ebf63bae0e27d3c22c1a4dc27fa4666d0324"},
+ {file = "llvmlite-0.39.0-cp37-cp37m-win32.whl", hash = "sha256:5ca4ea962da6ec3b007bedab17065781803d71159b03435f24ce6845cf3d1c66"},
+ {file = "llvmlite-0.39.0-cp37-cp37m-win_amd64.whl", hash = "sha256:8e461608135859ac40e39211d9c63a1ce35176513f6b8be87efb554d4af3a388"},
+ {file = "llvmlite-0.39.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:62a11b8e9e5fc4783d94da45d94c5a047ce6ccc4c112ae5f764109e9405fcc2c"},
+ {file = "llvmlite-0.39.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9272b7e344d12b36dafeb6911054eff32d2a9be7256a2866f0c09d08f945e17f"},
+ {file = "llvmlite-0.39.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3df59a7c2b60764fb9eeaf9c442d757eca1f3e87298d4f88849203667528581e"},
+ {file = "llvmlite-0.39.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cfd6688efd0f551168dd8626f386464aef25663268a2400c0f6a089b97a73dc"},
+ {file = "llvmlite-0.39.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7292b394956749e51ae3d51a2085932a0e3261108b35eda61d702c1b977102c"},
+ {file = "llvmlite-0.39.0-cp38-cp38-win32.whl", hash = "sha256:f8e9463a7d0152994b6f7d630012297bb160db237ad9ca8e75c8dceef7a747cf"},
+ {file = "llvmlite-0.39.0-cp38-cp38-win_amd64.whl", hash = "sha256:8d8149fdaab40ae48ea4ec816ae2ae5d36d664795e1b1dfb911fc2c62bc73184"},
+ {file = "llvmlite-0.39.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0929e3c26bcafb53545c77bcf7020b943dcefcf8d7d3010f414384458f805cc1"},
+ {file = "llvmlite-0.39.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:56ea23c6bbcd25a7c050a26b6effe836a575a33183744cbc28fb21358b3801f8"},
+ {file = "llvmlite-0.39.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82d605c5d6c8df96fe19bc3a61c934580e24cafa694cbf79cb227cdc0e426a"},
+ {file = "llvmlite-0.39.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7f7a7278ba6d75533be46abc3d9e242030ab017f0016dd081b55f821cc03be9"},
+ {file = "llvmlite-0.39.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56ccfe736a12aef2e39450a22e4c027eee4f488c5786c81d0b18ff8a6cf52531"},
+ {file = "llvmlite-0.39.0-cp39-cp39-win32.whl", hash = "sha256:0706abf522dc510ddc818f5c9e1cdae521a1416d3c399bbfc4827813379f0164"},
+ {file = "llvmlite-0.39.0-cp39-cp39-win_amd64.whl", hash = "sha256:d4a8199263859b97f174035e39297e770617d3497fac44fe738f74ce9c51d22b"},
+ {file = "llvmlite-0.39.0.tar.gz", hash = "sha256:01098be54f1aa25e391cebba8ea71cd1533f8cd1f50e34c7dd7540c2560a93af"},
+]
+
+[[package]]
+name = "lxml"
+version = "4.9.2"
+description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
+files = [
+ {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"},
+ {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"},
+ {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"},
+ {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"},
+ {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"},
+ {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"},
+ {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"},
+ {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"},
+ {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"},
+ {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"},
+ {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"},
+ {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"},
+ {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"},
+ {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"},
+ {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"},
+ {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"},
+ {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"},
+ {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"},
+ {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"},
+ {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"},
+ {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"},
+ {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"},
+ {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"},
+ {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"},
+ {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"},
+ {file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"},
+ {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"},
+ {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"},
+ {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"},
+ {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"},
+ {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"},
+ {file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"},
+ {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"},
+ {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"},
+ {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"},
+ {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"},
+ {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"},
+ {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"},
+ {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"},
+ {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"},
+ {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"},
+ {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"},
+ {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"},
+ {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"},
+ {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"},
+ {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"},
+ {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"},
+ {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"},
+ {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"},
+ {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"},
+ {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"},
+ {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"},
+ {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"},
+ {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"},
+ {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"},
+]
+
+[package.extras]
+cssselect = ["cssselect (>=0.7)"]
+html5 = ["html5lib"]
+htmlsoup = ["BeautifulSoup4"]
+source = ["Cython (>=0.29.7)"]
+
+[[package]]
+name = "markdown"
+version = "3.4.3"
+description = "Python implementation of John Gruber's Markdown."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Markdown-3.4.3-py3-none-any.whl", hash = "sha256:065fd4df22da73a625f14890dd77eb8040edcbd68794bcd35943be14490608b2"},
+ {file = "Markdown-3.4.3.tar.gz", hash = "sha256:8bf101198e004dc93e84a12a7395e31aac6a9c9942848ae1d99b9d72cf9b3520"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
+
+[package.extras]
+testing = ["coverage", "pyyaml"]
+
+[[package]]
+name = "markdown-it-py"
+version = "2.2.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"},
+ {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"},
+]
+
+[package.dependencies]
+linkify-it-py = {version = ">=1,<3", optional = true, markers = "extra == \"linkify\""}
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+benchmarking = ["psutil", "pytest", "pytest-benchmark"]
+code-style = ["pre-commit (>=3.0,<4.0)"]
+compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
+linkify = ["linkify-it-py (>=1,<3)"]
+plugins = ["mdit-py-plugins"]
+profiling = ["gprof2dot"]
+rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
+
+[[package]]
+name = "markupsafe"
+version = "2.1.2"
+description = "Safely add untrusted strings to HTML/XML markup."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"},
+ {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"},
+]
+
+[[package]]
+name = "matplotlib"
+version = "3.7.1"
+description = "Python plotting package"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:95cbc13c1fc6844ab8812a525bbc237fa1470863ff3dace7352e910519e194b1"},
+ {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:08308bae9e91aca1ec6fd6dda66237eef9f6294ddb17f0d0b3c863169bf82353"},
+ {file = "matplotlib-3.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:544764ba51900da4639c0f983b323d288f94f65f4024dc40ecb1542d74dc0500"},
+ {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56d94989191de3fcc4e002f93f7f1be5da476385dde410ddafbb70686acf00ea"},
+ {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99bc9e65901bb9a7ce5e7bb24af03675cbd7c70b30ac670aa263240635999a4"},
+ {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb7d248c34a341cd4c31a06fd34d64306624c8cd8d0def7abb08792a5abfd556"},
+ {file = "matplotlib-3.7.1-cp310-cp310-win32.whl", hash = "sha256:ce463ce590f3825b52e9fe5c19a3c6a69fd7675a39d589e8b5fbe772272b3a24"},
+ {file = "matplotlib-3.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d7bc90727351fb841e4d8ae620d2d86d8ed92b50473cd2b42ce9186104ecbba"},
+ {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:770a205966d641627fd5cf9d3cb4b6280a716522cd36b8b284a8eb1581310f61"},
+ {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f67bfdb83a8232cb7a92b869f9355d677bce24485c460b19d01970b64b2ed476"},
+ {file = "matplotlib-3.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2bf092f9210e105f414a043b92af583c98f50050559616930d884387d0772aba"},
+ {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89768d84187f31717349c6bfadc0e0d8c321e8eb34522acec8a67b1236a66332"},
+ {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83111e6388dec67822e2534e13b243cc644c7494a4bb60584edbff91585a83c6"},
+ {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a867bf73a7eb808ef2afbca03bcdb785dae09595fbe550e1bab0cd023eba3de0"},
+ {file = "matplotlib-3.7.1-cp311-cp311-win32.whl", hash = "sha256:fbdeeb58c0cf0595efe89c05c224e0a502d1aa6a8696e68a73c3efc6bc354304"},
+ {file = "matplotlib-3.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:c0bd19c72ae53e6ab979f0ac6a3fafceb02d2ecafa023c5cca47acd934d10be7"},
+ {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6eb88d87cb2c49af00d3bbc33a003f89fd9f78d318848da029383bfc08ecfbfb"},
+ {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:cf0e4f727534b7b1457898c4f4ae838af1ef87c359b76dcd5330fa31893a3ac7"},
+ {file = "matplotlib-3.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:46a561d23b91f30bccfd25429c3c706afe7d73a5cc64ef2dfaf2b2ac47c1a5dc"},
+ {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8704726d33e9aa8a6d5215044b8d00804561971163563e6e6591f9dcf64340cc"},
+ {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4cf327e98ecf08fcbb82685acaf1939d3338548620ab8dfa02828706402c34de"},
+ {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:617f14ae9d53292ece33f45cba8503494ee199a75b44de7717964f70637a36aa"},
+ {file = "matplotlib-3.7.1-cp38-cp38-win32.whl", hash = "sha256:7c9a4b2da6fac77bcc41b1ea95fadb314e92508bf5493ceff058e727e7ecf5b0"},
+ {file = "matplotlib-3.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:14645aad967684e92fc349493fa10c08a6da514b3d03a5931a1bac26e6792bd1"},
+ {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:81a6b377ea444336538638d31fdb39af6be1a043ca5e343fe18d0f17e098770b"},
+ {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:28506a03bd7f3fe59cd3cd4ceb2a8d8a2b1db41afede01f66c42561b9be7b4b7"},
+ {file = "matplotlib-3.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8c587963b85ce41e0a8af53b9b2de8dddbf5ece4c34553f7bd9d066148dc719c"},
+ {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8bf26ade3ff0f27668989d98c8435ce9327d24cffb7f07d24ef609e33d582439"},
+ {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:def58098f96a05f90af7e92fd127d21a287068202aa43b2a93476170ebd99e87"},
+ {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f883a22a56a84dba3b588696a2b8a1ab0d2c3d41be53264115c71b0a942d8fdb"},
+ {file = "matplotlib-3.7.1-cp39-cp39-win32.whl", hash = "sha256:4f99e1b234c30c1e9714610eb0c6d2f11809c9c78c984a613ae539ea2ad2eb4b"},
+ {file = "matplotlib-3.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:3ba2af245e36990facf67fde840a760128ddd71210b2ab6406e640188d69d136"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3032884084f541163f295db8a6536e0abb0db464008fadca6c98aaf84ccf4717"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a2cb34336110e0ed8bb4f650e817eed61fa064acbefeb3591f1b33e3a84fd96"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b867e2f952ed592237a1828f027d332d8ee219ad722345b79a001f49df0936eb"},
+ {file = "matplotlib-3.7.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:57bfb8c8ea253be947ccb2bc2d1bb3862c2bccc662ad1b4626e1f5e004557042"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:438196cdf5dc8d39b50a45cb6e3f6274edbcf2254f85fa9b895bf85851c3a613"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21e9cff1a58d42e74d01153360de92b326708fb205250150018a52c70f43c290"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d4725d70b7c03e082bbb8a34639ede17f333d7247f56caceb3801cb6ff703d"},
+ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:97cc368a7268141afb5690760921765ed34867ffb9655dd325ed207af85c7529"},
+ {file = "matplotlib-3.7.1.tar.gz", hash = "sha256:7b73305f25eab4541bd7ee0b96d87e53ae9c9f1823be5659b806cd85786fe882"},
+]
+
+[package.dependencies]
+contourpy = ">=1.0.1"
+cycler = ">=0.10"
+fonttools = ">=4.22.0"
+importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""}
+kiwisolver = ">=1.0.1"
+numpy = ">=1.20"
+packaging = ">=20.0"
+pillow = ">=6.2.0"
+pyparsing = ">=2.3.1"
+python-dateutil = ">=2.7"
+
+[[package]]
+name = "matplotlib-inline"
+version = "0.1.6"
+description = "Inline Matplotlib backend for Jupyter"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"},
+ {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"},
+]
+
+[package.dependencies]
+traitlets = "*"
+
+[[package]]
+name = "mdit-py-plugins"
+version = "0.3.3"
+description = "Collection of plugins for markdown-it-py"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mdit-py-plugins-0.3.3.tar.gz", hash = "sha256:5cfd7e7ac582a594e23ba6546a2f406e94e42eb33ae596d0734781261c251260"},
+ {file = "mdit_py_plugins-0.3.3-py3-none-any.whl", hash = "sha256:36d08a29def19ec43acdcd8ba471d3ebab132e7879d442760d963f19913e04b9"},
+]
+
+[package.dependencies]
+markdown-it-py = ">=1.0.0,<3.0.0"
+
+[package.extras]
+code-style = ["pre-commit"]
+rtd = ["attrs", "myst-parser (>=0.16.1,<0.17.0)", "sphinx-book-theme (>=0.1.0,<0.2.0)"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+
+[[package]]
+name = "mpmath"
+version = "1.3.0"
+description = "Python library for arbitrary-precision floating-point arithmetic"
+optional = false
+python-versions = "*"
+files = [
+ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
+ {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
+]
+
+[package.extras]
+develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"]
+docs = ["sphinx"]
+gmpy = ["gmpy2 (>=2.1.0a4)"]
+tests = ["pytest (>=4.6)"]
+
+[[package]]
+name = "multidict"
+version = "6.0.4"
+description = "multidict implementation"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"},
+ {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"},
+ {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"},
+ {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"},
+ {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"},
+ {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"},
+ {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"},
+ {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"},
+ {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"},
+ {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"},
+ {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"},
+ {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"},
+ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"},
+]
+
+[[package]]
+name = "networkx"
+version = "3.1"
+description = "Python package for creating and manipulating graphs and networks"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"},
+ {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"},
+]
+
+[package.extras]
+default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"]
+developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"]
+doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"]
+extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"]
+test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"]
+
+[[package]]
+name = "numba"
+version = "0.56.4"
+description = "compiling Python code using LLVM"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "numba-0.56.4-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:9f62672145f8669ec08762895fe85f4cf0ead08ce3164667f2b94b2f62ab23c3"},
+ {file = "numba-0.56.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c602d015478b7958408d788ba00a50272649c5186ea8baa6cf71d4a1c761bba1"},
+ {file = "numba-0.56.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:85dbaed7a05ff96492b69a8900c5ba605551afb9b27774f7f10511095451137c"},
+ {file = "numba-0.56.4-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f4cfc3a19d1e26448032049c79fc60331b104f694cf570a9e94f4e2c9d0932bb"},
+ {file = "numba-0.56.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e08e203b163ace08bad500b0c16f6092b1eb34fd1fce4feaf31a67a3a5ecf3b"},
+ {file = "numba-0.56.4-cp310-cp310-win32.whl", hash = "sha256:0611e6d3eebe4cb903f1a836ffdb2bda8d18482bcd0a0dcc56e79e2aa3fefef5"},
+ {file = "numba-0.56.4-cp310-cp310-win_amd64.whl", hash = "sha256:fbfb45e7b297749029cb28694abf437a78695a100e7c2033983d69f0ba2698d4"},
+ {file = "numba-0.56.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:3cb1a07a082a61df80a468f232e452d818f5ae254b40c26390054e4e868556e0"},
+ {file = "numba-0.56.4-cp37-cp37m-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d69ad934e13c15684e7887100a8f5f0f61d7a8e57e0fd29d9993210089a5b531"},
+ {file = "numba-0.56.4-cp37-cp37m-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:dbcc847bac2d225265d054993a7f910fda66e73d6662fe7156452cac0325b073"},
+ {file = "numba-0.56.4-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8a95ca9cc77ea4571081f6594e08bd272b66060634b8324e99cd1843020364f9"},
+ {file = "numba-0.56.4-cp37-cp37m-win32.whl", hash = "sha256:fcdf84ba3ed8124eb7234adfbb8792f311991cbf8aed1cad4b1b1a7ee08380c1"},
+ {file = "numba-0.56.4-cp37-cp37m-win_amd64.whl", hash = "sha256:42f9e1be942b215df7e6cc9948cf9c15bb8170acc8286c063a9e57994ef82fd1"},
+ {file = "numba-0.56.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:553da2ce74e8862e18a72a209ed3b6d2924403bdd0fb341fa891c6455545ba7c"},
+ {file = "numba-0.56.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4373da9757049db7c90591e9ec55a2e97b2b36ba7ae3bf9c956a513374077470"},
+ {file = "numba-0.56.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a993349b90569518739009d8f4b523dfedd7e0049e6838c0e17435c3e70dcc4"},
+ {file = "numba-0.56.4-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:720886b852a2d62619ae3900fe71f1852c62db4f287d0c275a60219e1643fc04"},
+ {file = "numba-0.56.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e64d338b504c9394a4a34942df4627e1e6cb07396ee3b49fe7b8d6420aa5104f"},
+ {file = "numba-0.56.4-cp38-cp38-win32.whl", hash = "sha256:03fe94cd31e96185cce2fae005334a8cc712fc2ba7756e52dff8c9400718173f"},
+ {file = "numba-0.56.4-cp38-cp38-win_amd64.whl", hash = "sha256:91f021145a8081f881996818474ef737800bcc613ffb1e618a655725a0f9e246"},
+ {file = "numba-0.56.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:d0ae9270a7a5cc0ede63cd234b4ff1ce166c7a749b91dbbf45e0000c56d3eade"},
+ {file = "numba-0.56.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c75e8a5f810ce80a0cfad6e74ee94f9fde9b40c81312949bf356b7304ef20740"},
+ {file = "numba-0.56.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a12ef323c0f2101529d455cfde7f4135eaa147bad17afe10b48634f796d96abd"},
+ {file = "numba-0.56.4-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:03634579d10a6129181129de293dd6b5eaabee86881369d24d63f8fe352dd6cb"},
+ {file = "numba-0.56.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0240f9026b015e336069329839208ebd70ec34ae5bfbf402e4fcc8e06197528e"},
+ {file = "numba-0.56.4-cp39-cp39-win32.whl", hash = "sha256:14dbbabf6ffcd96ee2ac827389afa59a70ffa9f089576500434c34abf9b054a4"},
+ {file = "numba-0.56.4-cp39-cp39-win_amd64.whl", hash = "sha256:0da583c532cd72feefd8e551435747e0e0fbb3c0530357e6845fcc11e38d6aea"},
+ {file = "numba-0.56.4.tar.gz", hash = "sha256:32d9fef412c81483d7efe0ceb6cf4d3310fde8b624a9cecca00f790573ac96ee"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = "*", markers = "python_version < \"3.9\""}
+llvmlite = "==0.39.*"
+numpy = ">=1.18,<1.24"
+setuptools = "*"
+
+[[package]]
+name = "numpy"
+version = "1.23.5"
+description = "NumPy is the fundamental package for array computing with Python."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63"},
+ {file = "numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d"},
+ {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43"},
+ {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1"},
+ {file = "numpy-1.23.5-cp310-cp310-win32.whl", hash = "sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280"},
+ {file = "numpy-1.23.5-cp310-cp310-win_amd64.whl", hash = "sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6"},
+ {file = "numpy-1.23.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96"},
+ {file = "numpy-1.23.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa"},
+ {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2"},
+ {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387"},
+ {file = "numpy-1.23.5-cp311-cp311-win32.whl", hash = "sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0"},
+ {file = "numpy-1.23.5-cp311-cp311-win_amd64.whl", hash = "sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d"},
+ {file = "numpy-1.23.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a"},
+ {file = "numpy-1.23.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9"},
+ {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398"},
+ {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb"},
+ {file = "numpy-1.23.5-cp38-cp38-win32.whl", hash = "sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07"},
+ {file = "numpy-1.23.5-cp38-cp38-win_amd64.whl", hash = "sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e"},
+ {file = "numpy-1.23.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f"},
+ {file = "numpy-1.23.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de"},
+ {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d"},
+ {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719"},
+ {file = "numpy-1.23.5-cp39-cp39-win32.whl", hash = "sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481"},
+ {file = "numpy-1.23.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df"},
+ {file = "numpy-1.23.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8"},
+ {file = "numpy-1.23.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135"},
+ {file = "numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"},
+ {file = "numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"},
+]
+
+[[package]]
+name = "nvidia-cublas-cu11"
+version = "11.10.3.66"
+description = "CUBLAS native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl", hash = "sha256:d32e4d75f94ddfb93ea0a5dda08389bcc65d8916a25cb9f37ac89edaeed3bded"},
+ {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-win_amd64.whl", hash = "sha256:8ac17ba6ade3ed56ab898a036f9ae0756f1e81052a317bf98f8c6d18dc3ae49e"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cuda-cupti-cu11"
+version = "11.7.101"
+description = "CUDA profiling tools runtime libs."
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cuda_cupti_cu11-11.7.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:e0cfd9854e1f2edaa36ca20d21cd0bdd5dcfca4e3b9e130a082e05b33b6c5895"},
+ {file = "nvidia_cuda_cupti_cu11-11.7.101-py3-none-win_amd64.whl", hash = "sha256:7cc5b8f91ae5e1389c3c0ad8866b3b016a175e827ea8f162a672990a402ab2b0"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cuda-nvrtc-cu11"
+version = "11.7.99"
+description = "NVRTC native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:9f1562822ea264b7e34ed5930567e89242d266448e936b85bc97a3370feabb03"},
+ {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:f7d9610d9b7c331fa0da2d1b2858a4a8315e6d49765091d28711c8946e7425e7"},
+ {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:f2effeb1309bdd1b3854fc9b17eaf997808f8b25968ce0c7070945c4265d64a3"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cuda-runtime-cu11"
+version = "11.7.99"
+description = "CUDA Runtime native Libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:cc768314ae58d2641f07eac350f40f99dcb35719c4faff4bc458a7cd2b119e31"},
+ {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:bc77fa59a7679310df9d5c70ab13c4e34c64ae2124dd1efd7e5474b71be125c7"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cudnn-cu11"
+version = "8.5.0.96"
+description = "cuDNN runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:402f40adfc6f418f9dae9ab402e773cfed9beae52333f6d86ae3107a1b9527e7"},
+ {file = "nvidia_cudnn_cu11-8.5.0.96-py3-none-manylinux1_x86_64.whl", hash = "sha256:71f8111eb830879ff2836db3cccf03bbd735df9b0d17cd93761732ac50a8a108"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cufft-cu11"
+version = "10.9.0.58"
+description = "CUFFT native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux1_x86_64.whl", hash = "sha256:222f9da70c80384632fd6035e4c3f16762d64ea7a843829cb278f98b3cb7dd81"},
+ {file = "nvidia_cufft_cu11-10.9.0.58-py3-none-win_amd64.whl", hash = "sha256:c4d316f17c745ec9c728e30409612eaf77a8404c3733cdf6c9c1569634d1ca03"},
+]
+
+[[package]]
+name = "nvidia-curand-cu11"
+version = "10.2.10.91"
+description = "CURAND native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_curand_cu11-10.2.10.91-py3-none-manylinux1_x86_64.whl", hash = "sha256:eecb269c970fa599a2660c9232fa46aaccbf90d9170b96c462e13bcb4d129e2c"},
+ {file = "nvidia_curand_cu11-10.2.10.91-py3-none-win_amd64.whl", hash = "sha256:f742052af0e1e75523bde18895a9ed016ecf1e5aa0ecddfcc3658fd11a1ff417"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cusolver-cu11"
+version = "11.4.0.1"
+description = "CUDA solver native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cusolver_cu11-11.4.0.1-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:72fa7261d755ed55c0074960df5904b65e2326f7adce364cbe4945063c1be412"},
+ {file = "nvidia_cusolver_cu11-11.4.0.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:700b781bfefd57d161443aff9ace1878584b93e0b2cfef3d6e9296d96febbf99"},
+ {file = "nvidia_cusolver_cu11-11.4.0.1-py3-none-win_amd64.whl", hash = "sha256:00f70b256add65f8c1eb3b6a65308795a93e7740f6df9e273eccbba770d370c4"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cusparse-cu11"
+version = "11.7.4.91"
+description = "CUSPARSE native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cusparse_cu11-11.7.4.91-py3-none-manylinux1_x86_64.whl", hash = "sha256:a3389de714db63321aa11fbec3919271f415ef19fda58aed7f2ede488c32733d"},
+ {file = "nvidia_cusparse_cu11-11.7.4.91-py3-none-win_amd64.whl", hash = "sha256:304a01599534f5186a8ed1c3756879282c72c118bc77dd890dc1ff868cad25b9"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-nccl-cu11"
+version = "2.14.3"
+description = "NVIDIA Collective Communication Library (NCCL) Runtime"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_nccl_cu11-2.14.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:5e5534257d1284b8e825bc3a182c6f06acd6eb405e9f89d49340e98cd8f136eb"},
+]
+
+[[package]]
+name = "nvidia-nvtx-cu11"
+version = "11.7.91"
+description = "NVIDIA Tools Extension"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_nvtx_cu11-11.7.91-py3-none-manylinux1_x86_64.whl", hash = "sha256:b22c64eee426a62fc00952b507d6d29cf62b4c9df7a480fcc417e540e05fd5ac"},
+ {file = "nvidia_nvtx_cu11-11.7.91-py3-none-win_amd64.whl", hash = "sha256:dfd7fcb2a91742513027d63a26b757f38dd8b07fecac282c4d132a9d373ff064"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "oauthlib"
+version = "3.2.2"
+description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"},
+ {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"},
+]
+
+[package.extras]
+rsa = ["cryptography (>=3.0.0)"]
+signals = ["blinker (>=1.4.0)"]
+signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
+
+[[package]]
+name = "omegaconf"
+version = "2.0.6"
+description = "A flexible configuration library"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "omegaconf-2.0.6-py3-none-any.whl", hash = "sha256:9e349fd76819b95b47aa628edea1ff83fed5b25108608abdd6c7fdca188e302a"},
+ {file = "omegaconf-2.0.6.tar.gz", hash = "sha256:92ca535a788d21651bf4c2eaf5c1ca4c7a8003b2dab4a87cbb09109784268806"},
+]
+
+[package.dependencies]
+PyYAML = ">=5.1"
+typing-extensions = "*"
+
+[[package]]
+name = "orjson"
+version = "3.8.9"
+description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "orjson-3.8.9-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:5d029843eae7b6cbd6468b63517b8b61471afed6572162171d8b6471b6dbf41f"},
+ {file = "orjson-3.8.9-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:405933c05490efb209d0f940d8ef1403d2932a97e47010a26d2694e9dd49f84d"},
+ {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:183de66eff4d41c330a3006f210ab0bce7affe398da6f6eda9579b67245a34ff"},
+ {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb4081fe340ed1df42dddfd055e1d50479cb0ccb976d13e6b5e8667a07fec6f4"},
+ {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d11593a2e736055dd7b9587dbf89cd1cbe4a42a70e70f186e51aee7e1b38902e"},
+ {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e20649359e28f34d01b2570e4650a076f439a959bae3a8bbe7f5923ad80f54e8"},
+ {file = "orjson-3.8.9-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c02ece4f36a160c83efe74adfba5f189c7c7702361f02b809ab73744923ee139"},
+ {file = "orjson-3.8.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f0e19801836cf1b30f333d475b05d79051b8ae8639a8e2422fb5f64e82676ae7"},
+ {file = "orjson-3.8.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d4850fe5650cead3c0f8822192e381cee9d4c3b8162eb082c86c927124572dc6"},
+ {file = "orjson-3.8.9-cp310-none-win_amd64.whl", hash = "sha256:5fd4193f260d9d30112b5e379d0870b54dc88040807c93cbe8d67bfea148ba5a"},
+ {file = "orjson-3.8.9-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:70eae063ad8d7405dc63873760567b600fc10728ba0da24a69d49c1a5d318d6d"},
+ {file = "orjson-3.8.9-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:251653437632583d02203e6b118b72b99c04425175853f35340f4bac7034a36e"},
+ {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ea833751f017ba321c277e7425b51c0b1a18a2c60f8c9c0f4c6c4d7e16cbd6c"},
+ {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8563c2cdeb923b82a5cc5bfc76c28c786777428263ee39292d928e9687165fb4"},
+ {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f33e9ea45b4c9457eedca0c40f38cf5732c91b0fb68f091ac59e6ea68e03eb2"},
+ {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855dee152daecb7de7b4cd7069d7854e11aa291687bffe8433156af0a224417e"},
+ {file = "orjson-3.8.9-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:74fa9e02589339defc9d3662de9e7eef51d8f9f3a7f6304b43b18b39d7bbf10f"},
+ {file = "orjson-3.8.9-cp311-none-win_amd64.whl", hash = "sha256:6c5b10ba1e62df8f96cbc37f6d5ae9acb3f6475926dea8b1b6a1a60f201a64f7"},
+ {file = "orjson-3.8.9-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a651123d01bc399fcd866e56acc2d76512e62aae3673652b13b470ea69faf1f4"},
+ {file = "orjson-3.8.9-cp37-cp37m-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:73019b6d2cc998c99556020c6bd8f8bc28420c69583186ca290c66a27916a3b7"},
+ {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f5c3daa8b02786ad5f0e14ae16a59bbb4e02cbae3a41989a25188e5a6c962ff"},
+ {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:598598b7f81f8fda7c3e09c88165f844152b7be223bc4ea929ec8ad59b00ea17"},
+ {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:090b10bdb06baae6d5cd3550d772ecbabd833bfceed7592ff167c0a82f5b4c20"},
+ {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd46f688ddf9c2ea10367446fe9bf3ceba0f7490c15b4f96420491c7f00bb283"},
+ {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:b8ed8d780e9fab01bc404a70d755a8b2b34ea6c0b6604b65de135daaaadaf9a9"},
+ {file = "orjson-3.8.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8a32c9fb742868a34346f3c52e12d893a9d27f8e0c0bf3c480db7e6903d8be28"},
+ {file = "orjson-3.8.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2ba366009b98ac8899e935eff6fef7672d3ea43d3ce9deb3ee33452134b6cc3a"},
+ {file = "orjson-3.8.9-cp37-none-win_amd64.whl", hash = "sha256:236b9313425cb2570626c64dd5cb6caff13882d1717d491da542cff228b96e97"},
+ {file = "orjson-3.8.9-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e8efc7e9ec35336f7cc98b6692536b1262046ff1d2a545295a4d89b8a2495903"},
+ {file = "orjson-3.8.9-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:8c7eba3610ae69f4aba4032ecb61b0a6fbd1e4537283d1553eb8c1cb136e9118"},
+ {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7742649e4c357d4e7ad483a35ff5f55d519e895de56772cc486913614ee7d23b"},
+ {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6566fb8daa538c7848fd6822e2409a7e1c41dae8e65e6536598d505f641a318"},
+ {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ce8a2a667221e2e5160021e26b09e9c13eeedafb5cda1981340c8c0c0bc8f9d"},
+ {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0399631b88fa4868956badef2561fba07dffcaf050bf53959ee50d26edf6f6"},
+ {file = "orjson-3.8.9-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:189ccb16ed140a824d133fa1c55175cf0d2207edaade54f1db0456a526cb5fd8"},
+ {file = "orjson-3.8.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b707fa4481e1af19b3052ec9352c688bad3f539d7bdd8aa4a451f6dd7e4bae73"},
+ {file = "orjson-3.8.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c3d988eb562da1dda7d49e9abd8a64b3cabc632b4299d177fb9e0c0ca9f06b8c"},
+ {file = "orjson-3.8.9-cp38-none-win_amd64.whl", hash = "sha256:b30240eb6b22daab604f1595f6aacf92bcdac0d29e2d7ad507dfac68d2b39182"},
+ {file = "orjson-3.8.9-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:81869a6de00bc676d10056fa8bb28cbe805b1cf498a45c14cb7b1765eee33fcb"},
+ {file = "orjson-3.8.9-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:a25a5a215b19d414de8d416a3c5414f29165843a06f704cc0345ded9eac34ac1"},
+ {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dec0f2bea52e30ea98ce095f1f42da04535791f9a31b2aab2499caa88307bc49"},
+ {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b91d88fe96b698b28bb1b95b1fce226f72757ab3ab7d8d97551e23bc629c84f"},
+ {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7629841ccdcccd3c43ebc6a4165abe9844909fcedb2041994c0153470f610801"},
+ {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d875b304e19f4b2758d233bbf2b9d627c66fac50b3150b8d31a35ba6cda3db67"},
+ {file = "orjson-3.8.9-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:723ec880c5290fe4de330febb8030e57c1978fbd624fc5b9399969e7d7d74984"},
+ {file = "orjson-3.8.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b11f8a71c82d19fce11ce487efeec2ca0dc3bcf5b4564445fecfc68d9c268744"},
+ {file = "orjson-3.8.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b2079bf86dec62731c1b90fdfea3211f993f0c894d9261e0ce9b68ed9c9dfbec"},
+ {file = "orjson-3.8.9-cp39-none-win_amd64.whl", hash = "sha256:97d94322a2eaab767ba8d52f6bf9d0ec0f35313fe36287be6e6085dd65d55d37"},
+ {file = "orjson-3.8.9.tar.gz", hash = "sha256:c40bece58c11cb09aff17424d21b41f6f767d2b1252b2f745ec3ff29cce6a240"},
+]
+
+[[package]]
+name = "packaging"
+version = "23.0"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"},
+ {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"},
+]
+
+[[package]]
+name = "pandas"
+version = "2.0.0"
+description = "Powerful data structures for data analysis, time series, and statistics"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pandas-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbb2c5e94d6aa4e632646a3bacd05c2a871c3aa3e85c9bec9be99cb1267279f2"},
+ {file = "pandas-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5337c87c4e963f97becb1217965b6b75c6fe5f54c4cf09b9a5ac52fc0bd03d3"},
+ {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ded51f7e3dd9b4f8b87f2ceb7bd1a8df2491f7ee72f7074c6927a512607199e"},
+ {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c858de9e9fc422d25e67e1592a6e6135d7bcf9a19fcaf4d0831a0be496bf21"},
+ {file = "pandas-2.0.0-cp310-cp310-win32.whl", hash = "sha256:2d1d138848dd71b37e3cbe7cd952ff84e2ab04d8988972166e18567dcc811245"},
+ {file = "pandas-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:d08e41d96bc4de6f500afe80936c68fce6099d5a434e2af7c7fd8e7c72a3265d"},
+ {file = "pandas-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:24472cfc7ced511ac90608728b88312be56edc8f19b9ed885a7d2e47ffaf69c0"},
+ {file = "pandas-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ffb14f50c74ee541610668137830bb93e9dfa319b1bef2cedf2814cd5ac9c70"},
+ {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c24c7d12d033a372a9daf9ff2c80f8b0af6f98d14664dbb0a4f6a029094928a7"},
+ {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8318de0f886e4dcb8f9f36e45a3d6a6c3d1cfdc508354da85e739090f0222991"},
+ {file = "pandas-2.0.0-cp311-cp311-win32.whl", hash = "sha256:57c34b79c13249505e850d0377b722961b99140f81dafbe6f19ef10239f6284a"},
+ {file = "pandas-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f987ec26e96a8490909bc5d98c514147236e49830cba7df8690f6087c12bbae"},
+ {file = "pandas-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b3ba8f5dd470d8bfbc4259829589f4a32881151c49e36384d9eb982b35a12020"},
+ {file = "pandas-2.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcd471c9d9f60926ab2f15c6c29164112f458acb42280365fbefa542d0c2fc74"},
+ {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9253edfd015520ce77a9343eb7097429479c039cd3ebe81d7810ea11b4b24695"},
+ {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977326039bd1ded620001a1889e2ed4798460a6bc5a24fbaebb5f07a41c32a55"},
+ {file = "pandas-2.0.0-cp38-cp38-win32.whl", hash = "sha256:78425ca12314b23356c28b16765639db10ebb7d8983f705d6759ff7fe41357fa"},
+ {file = "pandas-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:d93b7fcfd9f3328072b250d6d001dcfeec5d3bb66c1b9c8941e109a46c0c01a8"},
+ {file = "pandas-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:425705cee8be54db2504e8dd2a730684790b15e5904b750c367611ede49098ab"},
+ {file = "pandas-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4f789b7c012a608c08cda4ff0872fd979cb18907a37982abe884e6f529b8793"},
+ {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bb9d840bf15656805f6a3d87eea9dcb7efdf1314a82adcf7f00b820427c5570"},
+ {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0778ab54c8f399d83d98ffb674d11ec716449956bc6f6821891ab835848687f2"},
+ {file = "pandas-2.0.0-cp39-cp39-win32.whl", hash = "sha256:70db5c278bbec0306d32bf78751ff56b9594c05a5098386f6c8a563659124f91"},
+ {file = "pandas-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f3320bb55f34af4193020158ef8118ee0fb9aec7cc47d2084dbfdd868a0a24f"},
+ {file = "pandas-2.0.0.tar.gz", hash = "sha256:cda9789e61b44463c1c4fe17ef755de77bcd13b09ba31c940d20f193d63a5dc8"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.20.3", markers = "python_version < \"3.10\""},
+ {version = ">=1.21.0", markers = "python_version >= \"3.10\""},
+ {version = ">=1.23.2", markers = "python_version >= \"3.11\""},
+]
+python-dateutil = ">=2.8.2"
+pytz = ">=2020.1"
+tzdata = ">=2022.1"
+
+[package.extras]
+all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"]
+aws = ["s3fs (>=2021.08.0)"]
+clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"]
+compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"]
+computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"]
+feather = ["pyarrow (>=7.0.0)"]
+fss = ["fsspec (>=2021.07.0)"]
+gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"]
+hdf5 = ["tables (>=3.6.1)"]
+html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"]
+mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"]
+parquet = ["pyarrow (>=7.0.0)"]
+performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"]
+plot = ["matplotlib (>=3.6.1)"]
+postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"]
+spss = ["pyreadstat (>=1.1.2)"]
+sql-other = ["SQLAlchemy (>=1.4.16)"]
+test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.6.3)"]
+
+[[package]]
+name = "pillow"
+version = "9.3.0"
+description = "Python Imaging Library (Fork)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
+ {file = "Pillow-9.3.0-1-cp37-cp37m-win_amd64.whl", hash = "sha256:32a44128c4bdca7f31de5be641187367fe2a450ad83b833ef78910397db491aa"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:0b7257127d646ff8676ec8a15520013a698d1fdc48bc2a79ba4e53df792526f2"},
+ {file = "Pillow-9.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b90f7616ea170e92820775ed47e136208e04c967271c9ef615b6fbd08d9af0e3"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68943d632f1f9e3dce98908e873b3a090f6cba1cbb1b892a9e8d97c938871fbe"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be55f8457cd1eac957af0c3f5ece7bc3f033f89b114ef30f710882717670b2a8"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d77adcd56a42d00cc1be30843d3426aa4e660cab4a61021dc84467123f7a00c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:829f97c8e258593b9daa80638aee3789b7df9da5cf1336035016d76f03b8860c"},
+ {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:801ec82e4188e935c7f5e22e006d01611d6b41661bba9fe45b60e7ac1a8f84de"},
+ {file = "Pillow-9.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:871b72c3643e516db4ecf20efe735deb27fe30ca17800e661d769faab45a18d7"},
+ {file = "Pillow-9.3.0-cp310-cp310-win32.whl", hash = "sha256:655a83b0058ba47c7c52e4e2df5ecf484c1b0b0349805896dd350cbc416bdd91"},
+ {file = "Pillow-9.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f47eabcd2ded7698106b05c2c338672d16a6f2a485e74481f524e2a23c2794b"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:57751894f6618fd4308ed8e0c36c333e2f5469744c34729a27532b3db106ee20"},
+ {file = "Pillow-9.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7db8b751ad307d7cf238f02101e8e36a128a6cb199326e867d1398067381bff4"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3033fbe1feb1b59394615a1cafaee85e49d01b51d54de0cbf6aa8e64182518a1"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b012ea2d065fd163ca096f4e37e47cd8b59cf4b0fd47bfca6abb93df70b34c"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a65733d103311331875c1dca05cb4606997fd33d6acfed695b1232ba1df193"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:502526a2cbfa431d9fc2a079bdd9061a2397b842bb6bc4239bb176da00993812"},
+ {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90fb88843d3902fe7c9586d439d1e8c05258f41da473952aa8b328d8b907498c"},
+ {file = "Pillow-9.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89dca0ce00a2b49024df6325925555d406b14aa3efc2f752dbb5940c52c56b11"},
+ {file = "Pillow-9.3.0-cp311-cp311-win32.whl", hash = "sha256:3168434d303babf495d4ba58fc22d6604f6e2afb97adc6a423e917dab828939c"},
+ {file = "Pillow-9.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:18498994b29e1cf86d505edcb7edbe814d133d2232d256db8c7a8ceb34d18cef"},
+ {file = "Pillow-9.3.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:772a91fc0e03eaf922c63badeca75e91baa80fe2f5f87bdaed4280662aad25c9"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa4107d1b306cdf8953edde0534562607fe8811b6c4d9a486298ad31de733b2"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4012d06c846dc2b80651b120e2cdd787b013deb39c09f407727ba90015c684f"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77ec3e7be99629898c9a6d24a09de089fa5356ee408cdffffe62d67bb75fdd72"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6c738585d7a9961d8c2821a1eb3dcb978d14e238be3d70f0a706f7fa9316946b"},
+ {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:828989c45c245518065a110434246c44a56a8b2b2f6347d1409c787e6e4651ee"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win32.whl", hash = "sha256:82409ffe29d70fd733ff3c1025a602abb3e67405d41b9403b00b01debc4c9a29"},
+ {file = "Pillow-9.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41e0051336807468be450d52b8edd12ac60bebaa97fe10c8b660f116e50b30e4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b03ae6f1a1878233ac620c98f3459f79fd77c7e3c2b20d460284e1fb370557d4"},
+ {file = "Pillow-9.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4390e9ce199fc1951fcfa65795f239a8a4944117b5935a9317fb320e7767b40f"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40e1ce476a7804b0fb74bcfa80b0a2206ea6a882938eaba917f7a0f004b42502"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a06a052c5f37b4ed81c613a455a81f9a3a69429b4fd7bb913c3fa98abefc20"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:15c42fb9dea42465dfd902fb0ecf584b8848ceb28b41ee2b58f866411be33f07"},
+ {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e0e543a33ed92db9f5ef69a0356e0b1a7a6b6a71b80df99f1d181ae5875636"},
+ {file = "Pillow-9.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3dd6caf940756101205dffc5367babf288a30043d35f80936f9bfb37f8355b32"},
+ {file = "Pillow-9.3.0-cp38-cp38-win32.whl", hash = "sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0"},
+ {file = "Pillow-9.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:276a5ca930c913f714e372b2591a22c4bd3b81a418c0f6635ba832daec1cbcfc"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:73bd195e43f3fadecfc50c682f5055ec32ee2c933243cafbfdec69ab1aa87cad"},
+ {file = "Pillow-9.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c7c8ae3864846fc95f4611c78129301e203aaa2af813b703c55d10cc1628535"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0918e03aa0c72ea56edbb00d4d664294815aa11291a11504a377ea018330d3"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0915e734b33a474d76c28e07292f196cdf2a590a0d25bcc06e64e545f2d146c"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0372acb5d3598f36ec0914deed2a63f6bcdb7b606da04dc19a88d31bf0c05b"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ad58d27a5b0262c0c19b47d54c5802db9b34d38bbf886665b626aff83c74bacd"},
+ {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c"},
+ {file = "Pillow-9.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9aaa107275d8527e9d6e7670b64aabaaa36e5b6bd71a1015ddd21da0d4e06448"},
+ {file = "Pillow-9.3.0-cp39-cp39-win32.whl", hash = "sha256:bac18ab8d2d1e6b4ce25e3424f709aceef668347db8637c2296bcf41acb7cf48"},
+ {file = "Pillow-9.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b472b5ea442148d1c3e2209f20f1e0bb0eb556538690fa70b5e1f79fa0ba8dc2"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ab388aaa3f6ce52ac1cb8e122c4bd46657c15905904b3120a6248b5b8b0bc228"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb8e7f2abee51cef77673be97760abff1674ed32847ce04b4af90f610144c7b"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca31dd6014cb8b0b2db1e46081b0ca7d936f856da3b39744aef499db5d84d02"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c7025dce65566eb6e89f56c9509d4f628fddcedb131d9465cacd3d8bac337e7e"},
+ {file = "Pillow-9.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b59430236b8e58840a0dfb4099a0e8717ffb779c952426a69ae435ca1f57210c"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12ce4932caf2ddf3e41d17fc9c02d67126935a44b86df6a206cf0d7161548627"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae5331c23ce118c53b172fa64a4c037eb83c9165aba3a7ba9ddd3ec9fa64a699"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0b07fffc13f474264c336298d1b4ce01d9c5a011415b79d4ee5527bb69ae6f65"},
+ {file = "Pillow-9.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8"},
+ {file = "Pillow-9.3.0.tar.gz", hash = "sha256:c935a22a557a560108d780f9a0fc426dd7459940dc54faa49d83249c8d3e760f"},
+]
+
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
+[[package]]
+name = "pkgutil-resolve-name"
+version = "1.3.10"
+description = "Resolve a name to an object."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
+ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
+]
+
+[[package]]
+name = "platformdirs"
+version = "3.2.0"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "platformdirs-3.2.0-py3-none-any.whl", hash = "sha256:ebe11c0d7a805086e99506aa331612429a72ca7cd52a1f0d277dc4adc20cb10e"},
+ {file = "platformdirs-3.2.0.tar.gz", hash = "sha256:d5b638ca397f25f979350ff789db335903d7ea010ab28903f57b27e1b16c2b08"},
+]
+
+[package.extras]
+docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
+
+[[package]]
+name = "pooch"
+version = "1.7.0"
+description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\""
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pooch-1.7.0-py3-none-any.whl", hash = "sha256:74258224fc33d58f53113cf955e8d51bf01386b91492927d0d1b6b341a765ad7"},
+ {file = "pooch-1.7.0.tar.gz", hash = "sha256:f174a1041b6447f0eef8860f76d17f60ed2f857dc0efa387a7f08228af05d998"},
+]
+
+[package.dependencies]
+packaging = ">=20.0"
+platformdirs = ">=2.5.0"
+requests = ">=2.19.0"
+
+[package.extras]
+progress = ["tqdm (>=4.41.0,<5.0.0)"]
+sftp = ["paramiko (>=2.7.0)"]
+xxhash = ["xxhash (>=1.4.3)"]
+
+[[package]]
+name = "portalocker"
+version = "2.7.0"
+description = "Wraps the portalocker recipe for easy usage"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "portalocker-2.7.0-py2.py3-none-any.whl", hash = "sha256:a07c5b4f3985c3cf4798369631fb7011adb498e2a46d8440efc75a8f29a0f983"},
+ {file = "portalocker-2.7.0.tar.gz", hash = "sha256:032e81d534a88ec1736d03f780ba073f047a06c478b06e2937486f334e955c51"},
+]
+
+[package.dependencies]
+pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+docs = ["sphinx (>=1.7.1)"]
+redis = ["redis"]
+tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)"]
+
+[[package]]
+name = "praat-parselmouth"
+version = "0.4.3"
+description = "Praat in Python, the Pythonic way"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "praat-parselmouth-0.4.3.tar.gz", hash = "sha256:93538d0ba06444b68d18b793efb436b0d645c62c0397c4977c1d27b679aee168"},
+ {file = "praat_parselmouth-0.4.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:124925f3e40a6d626d65789d449bdabe43078528efbee6f3a1df6e67db60c971"},
+ {file = "praat_parselmouth-0.4.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0d3023d9b625c6b0a3cbe8a4f09cc23f666f9b9df40c59e33c4c9ca5b8ea1dac"},
+ {file = "praat_parselmouth-0.4.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:6841b9d9d2a614382cf186311610d663f0170ba20824296878eb98905b04899a"},
+ {file = "praat_parselmouth-0.4.3-cp27-cp27m-win32.whl", hash = "sha256:4fee56603cb57326457c6af779b89f96e7b2745114baa996659e1d52e5f245a3"},
+ {file = "praat_parselmouth-0.4.3-cp27-cp27m-win_amd64.whl", hash = "sha256:dc688749a0db4144936d3ed5180996500eb927bbf321192019ddee535fb97f3d"},
+ {file = "praat_parselmouth-0.4.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:c0ccf73de16c0f69162952b0d1865d4dbc929de0f9b88a9d7aea57f454de3cb8"},
+ {file = "praat_parselmouth-0.4.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:87fa2dd7f8b5dd5e3127af82e97b229ae2db8e1656525329224df4c0bffa024c"},
+ {file = "praat_parselmouth-0.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2bc111055efccf2bb25039a7891ec9ef106b13ddc5680293659ff0b4c5f4353f"},
+ {file = "praat_parselmouth-0.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd38542210b1f381086b4a9424832b2330c42712e0fb7ea6c28c9200119c294b"},
+ {file = "praat_parselmouth-0.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a536b37411c52171500984c97bfd66dc000701a7dc0807e11061b85a653a600a"},
+ {file = "praat_parselmouth-0.4.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6ea1ab0632eff129516f147041aaf7874e50770561a2e9b9c81913b6de243f2a"},
+ {file = "praat_parselmouth-0.4.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:543ba3deb32502e93074b76b1cfb3f09e598e5d9f74a0345fa5b3928fedb5a51"},
+ {file = "praat_parselmouth-0.4.3-cp310-cp310-win32.whl", hash = "sha256:e0addf774a57d57a54df2b06de04ad0de34e81a3abfda03f744c732776c779ec"},
+ {file = "praat_parselmouth-0.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:fc497357aeea2e3cbca2fb308d66b9de9739dc6b320ca2661ca6250f7a7489bd"},
+ {file = "praat_parselmouth-0.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:afac52cb7a72cda7fe2ec1d9573d8f402786abcb06bd7a22f2ca240f95e33263"},
+ {file = "praat_parselmouth-0.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b2261a79c2dc5387a7a678ec304ef8dd00ed93d9e028148bbb064fd0ac222a3a"},
+ {file = "praat_parselmouth-0.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de31b458d3c1ca7ee45506871a38fdc3aec44526c065552adf8bec2876e816bd"},
+ {file = "praat_parselmouth-0.4.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63ff24e045bed7c44f140fb7bab910d89fd3a45b7e8afe5b5e936aa2eea62904"},
+ {file = "praat_parselmouth-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a40c51c628235c54c8956306fc58fd14cd04127d85359134ef73ef35ff19d651"},
+ {file = "praat_parselmouth-0.4.3-cp311-cp311-win32.whl", hash = "sha256:f8ad9ee3be60d33f1ad593ec5f99466b1c266e00d29a5ec5787f969c618a7a9a"},
+ {file = "praat_parselmouth-0.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:c32b1f3632e69ed67f501c635fff37ad72e1eae4ddd1c2c0827c4690c06ee990"},
+ {file = "praat_parselmouth-0.4.3-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:1dcb6f55376f193c83d123953a55de471bcadd756af3b157c13d455b0c052999"},
+ {file = "praat_parselmouth-0.4.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:0970facd26b771f5799a396a0e54d12a69fbf8904a4f6ae0442f3831175e4508"},
+ {file = "praat_parselmouth-0.4.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:5c1104f41d9fef48cd44247738b9c8735e10a12ba0a1860e478e0bd69201813e"},
+ {file = "praat_parselmouth-0.4.3-cp35-cp35m-win32.whl", hash = "sha256:3d12469e301d9a25f29f6cb5427aa9a1276e7f2f1edf1a3caede69a84c46170f"},
+ {file = "praat_parselmouth-0.4.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c4142faf664dd6c7f1773d04331b278d92e17064eaaef09132954f72a9041ea0"},
+ {file = "praat_parselmouth-0.4.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5ea2079d519e8d42ed8d2de3c4f68803110060a8ae5d1c56df795c600aa1c3be"},
+ {file = "praat_parselmouth-0.4.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2e88f00b740548cf3de5768b2d06e296e525164ea71ccc991920f41f2e277ad2"},
+ {file = "praat_parselmouth-0.4.3-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2da226bccd52fd81223eb94a7ea43a1a7588e4384ea65ce0818329b73ef8df6d"},
+ {file = "praat_parselmouth-0.4.3-cp36-cp36m-win32.whl", hash = "sha256:0f3af0413992398ac613b0eefdfbcb8cad064c36a28b972300a2bb760523c109"},
+ {file = "praat_parselmouth-0.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:e0ed79941b6e37a440860511767eedd85ec003060870d10ff1f98773b2a268ae"},
+ {file = "praat_parselmouth-0.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:10f3113ad4f5f6df5fe81d4080ca3ad46de2fe0fdb8ebbcad1ba884b1cae3b9d"},
+ {file = "praat_parselmouth-0.4.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6e9338f7a1b304390014bb2eec619e5a306527a4df438e68439c92aa968627dc"},
+ {file = "praat_parselmouth-0.4.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cb3798b2ca8163444662b6ae84a74b1add38b2c04e5af8d07bde55cf0335300a"},
+ {file = "praat_parselmouth-0.4.3-cp37-cp37m-win32.whl", hash = "sha256:d947f9d1fb092b91acca1259ce4dd62ff4f456338958fd1fd41ee65efc53ca2c"},
+ {file = "praat_parselmouth-0.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f3e026f590aeec8f68921359f56a42efa43076942f271244bee57fd22db8eef"},
+ {file = "praat_parselmouth-0.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:28844229dab2a9335629b4526188b9540d02208856f48b1a46776279c022f937"},
+ {file = "praat_parselmouth-0.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410748af84eb8c2eb69e408e300694a45090ed7c4f31375c4ec75a8c18f87169"},
+ {file = "praat_parselmouth-0.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:30ff6f17babad25b9d6ab086465a54494eef9d1b4368b0722230c5282be2bf94"},
+ {file = "praat_parselmouth-0.4.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ff7096bc3e87a8f719e66f5e16a90e2f6de445612abd234f86837d390b947421"},
+ {file = "praat_parselmouth-0.4.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f41d121c4d2322ff12808bb2c4490609f750f89064170e327dfd74fca13cc212"},
+ {file = "praat_parselmouth-0.4.3-cp38-cp38-win32.whl", hash = "sha256:9af9945db11fab0e1ed29ad20f7c97a3e7a8d016328ad6d7237a0d7819db075e"},
+ {file = "praat_parselmouth-0.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:ae0c63c432e8216d7c70da44131f51c845fb81d48ac04eb5f39ebcfae34624be"},
+ {file = "praat_parselmouth-0.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8e25658af5a87ed502753de6924c51bf3400d4078e67a611b5874ab08b478fdb"},
+ {file = "praat_parselmouth-0.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7aa2ffd0c6e47feda35343a9d6722b2558f3677a4a51bf5ec864f27ab80e2f42"},
+ {file = "praat_parselmouth-0.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3b245d9457ab39f12142da160cda12c4c2a58d9b916e5bb33e6b3ac267882d46"},
+ {file = "praat_parselmouth-0.4.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:da9779a694941074bc5b199dd3cb41ad4af3306552f06af8dbfdea6ab0a87dec"},
+ {file = "praat_parselmouth-0.4.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cfa141c04fd8a0477f91c168878112098a25cbac7ac4a20de627bc9293ee4667"},
+ {file = "praat_parselmouth-0.4.3-cp39-cp39-win32.whl", hash = "sha256:6941fe602802fd57ecbedecd612b41493b7d1c6bf722ac0cbf3f47f805fbbd43"},
+ {file = "praat_parselmouth-0.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:5252496e0391754a642973837670c56ecd39c8e0a1f7ec6e6b60b0cd2cc9f51d"},
+ {file = "praat_parselmouth-0.4.3-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:fd7c143c6511807b67c92b3ab94733746c0ae3a7b4ba52d6763585c4d459061d"},
+ {file = "praat_parselmouth-0.4.3-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:7ad0739ad6c102817c7d43b67b7270f78cb431eb72b6ecd9a17e354d1b379deb"},
+ {file = "praat_parselmouth-0.4.3-pp27-pypy_73-win32.whl", hash = "sha256:f5e98ec1f41efba90bedab358cff8e6a3c6473978e1f42b55d0977e580efe673"},
+ {file = "praat_parselmouth-0.4.3-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7b58c1c8fd967446f6d74775b5d9bceadfe35a928fa5f192d4d03d80cb005d92"},
+ {file = "praat_parselmouth-0.4.3-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:d217df07c770156fa284aff3e7a5c11eb43e37f0226730d729d6b45be8a7c4d7"},
+ {file = "praat_parselmouth-0.4.3-pp36-pypy36_pp73-win32.whl", hash = "sha256:29cb47438989f8155c3b3dca987afd48999dec71e4b79564aa7e922c3c5c1f9a"},
+ {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5f772b4a097654883f4bba41efae419f9ebdd5e83ef7a857e547100d26663e2c"},
+ {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bf9634a6986732dc43a88b3a16a0000cff903da1db6556b7959a6a4897f25570"},
+ {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fab1bbb6a88f47cb5d0db07a4fd6d88b9294d2775a7556aeb459e96ac372e29f"},
+ {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-win32.whl", hash = "sha256:261f03f95f25943da2cf746599e47acfcf79b7fc823c871571901d6c97bad948"},
+ {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:199b8df2659a1e6f30e9ae3064b0a28a661d834d2bccb56d22051c40cc348817"},
+ {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ef1f3f6bd08cc410d0d595f6a9c7dd72558e30ad3bd7949c94ea4e07a2de2605"},
+ {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28a61b7a3cf95a53554dd3ebb4f48e991d4b913ae2d2fbc3868a4e864d69794f"},
+ {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:488833ee33690fa1a57a3c429d286e42e6882748f5c3d28dc50889abec12b8c2"},
+ {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:10f181e199c47fa90fe7cad065275f7f3ccda2de6febf86394cf96aa48531079"},
+ {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:52702bc5cdf59b2b4db87448fe9042307e5ebce6b67ee5ea55c2b8627ce803e0"},
+ {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7d4f5d7c701517986654365f0a41b8b4a610a2ddc0365da60e48c098774259b"},
+ {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dc013608a536ad74efdc3242421cabfcb8cb2e9cd1259ec9de9aeaa141c2d14"},
+ {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d593065ed1500d305d9cf3d20f5ac7e3671061c3c073ef6e94e97817a664d399"},
+]
+
+[package.dependencies]
+numpy = ">=1.7.0"
+
+[[package]]
+name = "protobuf"
+version = "3.20.3"
+description = "Protocol Buffers"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"},
+ {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"},
+ {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"},
+ {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"},
+ {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"},
+ {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"},
+ {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"},
+ {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"},
+ {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"},
+ {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"},
+ {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"},
+ {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"},
+ {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"},
+ {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"},
+ {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"},
+ {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"},
+ {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"},
+ {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"},
+ {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"},
+ {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"},
+ {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"},
+ {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"},
+]
+
+[[package]]
+name = "pyasn1"
+version = "0.4.8"
+description = "ASN.1 types and codecs"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"},
+ {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"},
+]
+
+[[package]]
+name = "pyasn1-modules"
+version = "0.2.8"
+description = "A collection of ASN.1-based protocols modules."
+optional = false
+python-versions = "*"
+files = [
+ {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"},
+ {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.4.6,<0.5.0"
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+
+[[package]]
+name = "pydantic"
+version = "1.10.7"
+description = "Data validation and settings management using python type hints"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pydantic-1.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e79e999e539872e903767c417c897e729e015872040e56b96e67968c3b918b2d"},
+ {file = "pydantic-1.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01aea3a42c13f2602b7ecbbea484a98169fb568ebd9e247593ea05f01b884b2e"},
+ {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:516f1ed9bc2406a0467dd777afc636c7091d71f214d5e413d64fef45174cfc7a"},
+ {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae150a63564929c675d7f2303008d88426a0add46efd76c3fc797cd71cb1b46f"},
+ {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecbbc51391248116c0a055899e6c3e7ffbb11fb5e2a4cd6f2d0b93272118a209"},
+ {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f4a2b50e2b03d5776e7f21af73e2070e1b5c0d0df255a827e7c632962f8315af"},
+ {file = "pydantic-1.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:a7cd2251439988b413cb0a985c4ed82b6c6aac382dbaff53ae03c4b23a70e80a"},
+ {file = "pydantic-1.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68792151e174a4aa9e9fc1b4e653e65a354a2fa0fed169f7b3d09902ad2cb6f1"},
+ {file = "pydantic-1.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe2507b8ef209da71b6fb5f4e597b50c5a34b78d7e857c4f8f3115effaef5fe"},
+ {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a86d8c8db68086f1e30a530f7d5f83eb0685e632e411dbbcf2d5c0150e8dcd"},
+ {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75ae19d2a3dbb146b6f324031c24f8a3f52ff5d6a9f22f0683694b3afcb16fb"},
+ {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:464855a7ff7f2cc2cf537ecc421291b9132aa9c79aef44e917ad711b4a93163b"},
+ {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:193924c563fae6ddcb71d3f06fa153866423ac1b793a47936656e806b64e24ca"},
+ {file = "pydantic-1.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:b4a849d10f211389502059c33332e91327bc154acc1845f375a99eca3afa802d"},
+ {file = "pydantic-1.10.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cc1dde4e50a5fc1336ee0581c1612215bc64ed6d28d2c7c6f25d2fe3e7c3e918"},
+ {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0cfe895a504c060e5d36b287ee696e2fdad02d89e0d895f83037245218a87fe"},
+ {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:670bb4683ad1e48b0ecb06f0cfe2178dcf74ff27921cdf1606e527d2617a81ee"},
+ {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:950ce33857841f9a337ce07ddf46bc84e1c4946d2a3bba18f8280297157a3fd1"},
+ {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c15582f9055fbc1bfe50266a19771bbbef33dd28c45e78afbe1996fd70966c2a"},
+ {file = "pydantic-1.10.7-cp37-cp37m-win_amd64.whl", hash = "sha256:82dffb306dd20bd5268fd6379bc4bfe75242a9c2b79fec58e1041fbbdb1f7914"},
+ {file = "pydantic-1.10.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c7f51861d73e8b9ddcb9916ae7ac39fb52761d9ea0df41128e81e2ba42886cd"},
+ {file = "pydantic-1.10.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6434b49c0b03a51021ade5c4daa7d70c98f7a79e95b551201fff682fc1661245"},
+ {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64d34ab766fa056df49013bb6e79921a0265204c071984e75a09cbceacbbdd5d"},
+ {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:701daea9ffe9d26f97b52f1d157e0d4121644f0fcf80b443248434958fd03dc3"},
+ {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf135c46099ff3f919d2150a948ce94b9ce545598ef2c6c7bf55dca98a304b52"},
+ {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0f85904f73161817b80781cc150f8b906d521fa11e3cdabae19a581c3606209"},
+ {file = "pydantic-1.10.7-cp38-cp38-win_amd64.whl", hash = "sha256:9f6f0fd68d73257ad6685419478c5aece46432f4bdd8d32c7345f1986496171e"},
+ {file = "pydantic-1.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c230c0d8a322276d6e7b88c3f7ce885f9ed16e0910354510e0bae84d54991143"},
+ {file = "pydantic-1.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:976cae77ba6a49d80f461fd8bba183ff7ba79f44aa5cfa82f1346b5626542f8e"},
+ {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d45fc99d64af9aaf7e308054a0067fdcd87ffe974f2442312372dfa66e1001d"},
+ {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2a5ebb48958754d386195fe9e9c5106f11275867051bf017a8059410e9abf1f"},
+ {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:abfb7d4a7cd5cc4e1d1887c43503a7c5dd608eadf8bc615413fc498d3e4645cd"},
+ {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:80b1fab4deb08a8292d15e43a6edccdffa5377a36a4597bb545b93e79c5ff0a5"},
+ {file = "pydantic-1.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:d71e69699498b020ea198468e2480a2f1e7433e32a3a99760058c6520e2bea7e"},
+ {file = "pydantic-1.10.7-py3-none-any.whl", hash = "sha256:0cd181f1d0b1d00e2b705f1bf1ac7799a2d938cce3376b8007df62b29be3c2c6"},
+ {file = "pydantic-1.10.7.tar.gz", hash = "sha256:cfc83c0678b6ba51b0532bea66860617c4cd4251ecf76e9846fa5a9f3454e97e"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.2.0"
+
+[package.extras]
+dotenv = ["python-dotenv (>=0.10.4)"]
+email = ["email-validator (>=1.0.3)"]
+
+[[package]]
+name = "pydub"
+version = "0.25.1"
+description = "Manipulate audio with an simple and easy high level interface"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6"},
+ {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"},
+]
+
+[[package]]
+name = "pygments"
+version = "2.15.1"
+description = "Pygments is a syntax highlighting package written in Python."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"},
+ {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"},
+]
+
+[package.extras]
+plugins = ["importlib-metadata"]
+
+[[package]]
+name = "pyparsing"
+version = "3.0.9"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+optional = false
+python-versions = ">=3.6.8"
+files = [
+ {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
+ {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "pyrsistent"
+version = "0.19.3"
+description = "Persistent/Functional/Immutable data structures"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"},
+ {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"},
+ {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"},
+]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "python-multipart"
+version = "0.0.6"
+description = "A streaming multipart parser for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"},
+ {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"},
+]
+
+[package.extras]
+dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"]
+
+[[package]]
+name = "pytz"
+version = "2023.3"
+description = "World timezone definitions, modern and historical"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"},
+ {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"},
+]
+
+[[package]]
+name = "pywin32"
+version = "306"
+description = "Python for Window Extensions"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"},
+ {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"},
+ {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"},
+ {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"},
+ {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"},
+ {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"},
+ {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"},
+ {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"},
+ {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"},
+ {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"},
+ {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"},
+ {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"},
+ {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"},
+ {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"},
+]
+
+[[package]]
+name = "pyworld"
+version = "0.3.2"
+description = "PyWorld is a Python wrapper for WORLD vocoder."
+optional = false
+python-versions = "*"
+files = [
+ {file = "pyworld-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:688730fa5394709a185061e5a58e7a614b4548d814eeecc1dc825f73af53a9aa"},
+ {file = "pyworld-0.3.2-cp36-cp36m-win32.whl", hash = "sha256:1e110e2f95d45b0765f4ba4e49b389f9b931c9c438cd69774dce20699cc6dc7d"},
+ {file = "pyworld-0.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:e858668185a177e9e30c0ff12de3e166b39124c14b424ba3be31418694dcb2b7"},
+ {file = "pyworld-0.3.2-cp37-cp37m-win32.whl", hash = "sha256:b5325e7a08f104a9bf533d54423546bd3ef05953b80b79a8ced34efbb892862b"},
+ {file = "pyworld-0.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:fddd503ac264810221d9460bfdc1454c5c1313214e1c58a4ddd9417699f99bc8"},
+ {file = "pyworld-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:502fbf577f4e56a497b3ad8c29434ec423eabc4674b93fa11046837d297c97be"},
+ {file = "pyworld-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a8ea62219b9bce0e514ff05ee80cfbc1248b165d8d802f00b9b8754510701f3e"},
+ {file = "pyworld-0.3.2.tar.gz", hash = "sha256:668d09842c3cfa74b1f6edabdb0058a64c04f9cf17b93883e6da811e1204ad4d"},
+]
+
+[package.dependencies]
+cython = "*"
+numpy = "*"
+
+[package.extras]
+sdist = ["cython", "numpy"]
+test = ["nose"]
+
+[[package]]
+name = "pyyaml"
+version = "6.0"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
+ {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
+ {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
+ {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
+ {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"},
+ {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"},
+ {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"},
+ {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"},
+ {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
+ {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
+ {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
+ {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
+ {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
+ {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
+ {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
+ {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
+ {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
+ {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
+ {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
+ {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
+]
+
+[[package]]
+name = "regex"
+version = "2023.3.23"
+description = "Alternative regular expression module, to replace re."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "regex-2023.3.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:845a5e2d84389c4ddada1a9b95c055320070f18bb76512608374aca00d22eca8"},
+ {file = "regex-2023.3.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87d9951f5a538dd1d016bdc0dcae59241d15fa94860964833a54d18197fcd134"},
+ {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae17d3be44c0b3f782c28ae9edd8b47c1f1776d4cabe87edc0b98e1f12b021"},
+ {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b8eb1e3bca6b48dc721818a60ae83b8264d4089a4a41d62be6d05316ec38e15"},
+ {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df45fac182ebc3c494460c644e853515cc24f5ad9da05f8ffb91da891bfee879"},
+ {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7006105b10b59971d3b248ad75acc3651c7e4cf54d81694df5a5130a3c3f7ea"},
+ {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93f3f1aa608380fe294aa4cb82e2afda07a7598e828d0341e124b8fd9327c715"},
+ {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787954f541ab95d8195d97b0b8cf1dc304424adb1e07365967e656b92b38a699"},
+ {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:20abe0bdf03630fe92ccafc45a599bca8b3501f48d1de4f7d121153350a2f77d"},
+ {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11d00c31aeab9a6e0503bc77e73ed9f4527b3984279d997eb145d7c7be6268fd"},
+ {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d5bbe0e1511b844794a3be43d6c145001626ba9a6c1db8f84bdc724e91131d9d"},
+ {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ea3c0cb56eadbf4ab2277e7a095676370b3e46dbfc74d5c383bd87b0d6317910"},
+ {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d895b4c863059a4934d3e874b90998df774644a41b349ebb330f85f11b4ef2c0"},
+ {file = "regex-2023.3.23-cp310-cp310-win32.whl", hash = "sha256:9d764514d19b4edcc75fd8cb1423448ef393e8b6cbd94f38cab983ab1b75855d"},
+ {file = "regex-2023.3.23-cp310-cp310-win_amd64.whl", hash = "sha256:11d1f2b7a0696dc0310de0efb51b1f4d813ad4401fe368e83c0c62f344429f98"},
+ {file = "regex-2023.3.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8a9c63cde0eaa345795c0fdeb19dc62d22e378c50b0bc67bf4667cd5b482d98b"},
+ {file = "regex-2023.3.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dd7200b4c27b68cf9c9646da01647141c6db09f48cc5b51bc588deaf8e98a797"},
+ {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22720024b90a6ba673a725dcc62e10fb1111b889305d7c6b887ac7466b74bedb"},
+ {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b190a339090e6af25f4a5fd9e77591f6d911cc7b96ecbb2114890b061be0ac1"},
+ {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e76b6fc0d8e9efa39100369a9b3379ce35e20f6c75365653cf58d282ad290f6f"},
+ {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7868b8f218bf69a2a15402fde08b08712213a1f4b85a156d90473a6fb6b12b09"},
+ {file = "regex-2023.3.23-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2472428efc4127374f494e570e36b30bb5e6b37d9a754f7667f7073e43b0abdd"},
+ {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c37df2a060cb476d94c047b18572ee2b37c31f831df126c0da3cd9227b39253d"},
+ {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4479f9e2abc03362df4045b1332d4a2b7885b245a30d4f4b051c4083b97d95d8"},
+ {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2396e0678167f2d0c197da942b0b3fb48fee2f0b5915a0feb84d11b6686afe6"},
+ {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75f288c60232a5339e0ff2fa05779a5e9c74e9fc085c81e931d4a264501e745b"},
+ {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c869260aa62cee21c5eb171a466c0572b5e809213612ef8d495268cd2e34f20d"},
+ {file = "regex-2023.3.23-cp311-cp311-win32.whl", hash = "sha256:25f0532fd0c53e96bad84664171969de9673b4131f2297f1db850d3918d58858"},
+ {file = "regex-2023.3.23-cp311-cp311-win_amd64.whl", hash = "sha256:5ccfafd98473e007cebf7da10c1411035b7844f0f204015efd050601906dbb53"},
+ {file = "regex-2023.3.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6572ff287176c0fb96568adb292674b421fa762153ed074d94b1d939ed92c253"},
+ {file = "regex-2023.3.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a610e0adfcb0fc84ea25f6ea685e39e74cbcd9245a72a9a7aab85ff755a5ed27"},
+ {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086afe222d58b88b62847bdbd92079b4699350b4acab892f88a935db5707c790"},
+ {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79e29fd62fa2f597a6754b247356bda14b866131a22444d67f907d6d341e10f3"},
+ {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c07ce8e9eee878a48ebeb32ee661b49504b85e164b05bebf25420705709fdd31"},
+ {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b036f401895e854de9fefe061518e78d506d8a919cc250dc3416bca03f6f9a"},
+ {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78ac8dd8e18800bb1f97aad0d73f68916592dddf233b99d2b5cabc562088503a"},
+ {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:539dd010dc35af935b32f248099e38447bbffc10b59c2b542bceead2bed5c325"},
+ {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9bf4a5626f2a0ea006bf81e8963f498a57a47d58907eaa58f4b3e13be68759d8"},
+ {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf86b4328c204c3f315074a61bc1c06f8a75a8e102359f18ce99fbcbbf1951f0"},
+ {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:2848bf76673c83314068241c8d5b7fa9ad9bed866c979875a0e84039349e8fa7"},
+ {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c125a02d22c555e68f7433bac8449992fa1cead525399f14e47c2d98f2f0e467"},
+ {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cd1671e9d5ac05ce6aa86874dd8dfa048824d1dbe73060851b310c6c1a201a96"},
+ {file = "regex-2023.3.23-cp38-cp38-win32.whl", hash = "sha256:fffe57312a358be6ec6baeb43d253c36e5790e436b7bf5b7a38df360363e88e9"},
+ {file = "regex-2023.3.23-cp38-cp38-win_amd64.whl", hash = "sha256:dbb3f87e15d3dd76996d604af8678316ad2d7d20faa394e92d9394dfd621fd0c"},
+ {file = "regex-2023.3.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c88e8c226473b5549fe9616980ea7ca09289246cfbdf469241edf4741a620004"},
+ {file = "regex-2023.3.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6560776ec19c83f3645bbc5db64a7a5816c9d8fb7ed7201c5bcd269323d88072"},
+ {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b1fc2632c01f42e06173d8dd9bb2e74ab9b0afa1d698058c867288d2c7a31f3"},
+ {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdf7ad455f1916b8ea5cdbc482d379f6daf93f3867b4232d14699867a5a13af7"},
+ {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5fc33b27b1d800fc5b78d7f7d0f287e35079ecabe68e83d46930cf45690e1c8c"},
+ {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c49552dc938e3588f63f8a78c86f3c9c75301e813bca0bef13bdb4b87ccf364"},
+ {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e152461e9a0aedec7d37fc66ec0fa635eca984777d3d3c3e36f53bf3d3ceb16e"},
+ {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:db034255e72d2995cf581b14bb3fc9c00bdbe6822b49fcd4eef79e1d5f232618"},
+ {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:55ae114da21b7a790b90255ea52d2aa3a0d121a646deb2d3c6a3194e722fc762"},
+ {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ef3f528fe1cc3d139508fe1b22523745aa77b9d6cb5b0bf277f48788ee0b993f"},
+ {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:a81c9ec59ca2303acd1ccd7b9ac409f1e478e40e96f8f79b943be476c5fdb8bb"},
+ {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cde09c4fdd070772aa2596d97e942eb775a478b32459e042e1be71b739d08b77"},
+ {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3cd9f5dd7b821f141d3a6ca0d5d9359b9221e4f051ca3139320adea9f1679691"},
+ {file = "regex-2023.3.23-cp39-cp39-win32.whl", hash = "sha256:7304863f3a652dab5e68e6fb1725d05ebab36ec0390676d1736e0571ebb713ef"},
+ {file = "regex-2023.3.23-cp39-cp39-win_amd64.whl", hash = "sha256:54c3fa855a3f7438149de3211738dd9b5f0c733f48b54ae05aa7fce83d48d858"},
+ {file = "regex-2023.3.23.tar.gz", hash = "sha256:dc80df325b43ffea5cdea2e3eaa97a44f3dd298262b1c7fe9dbb2a9522b956a7"},
+]
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "requests-oauthlib"
+version = "1.3.1"
+description = "OAuthlib authentication support for Requests."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"},
+ {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"},
+]
+
+[package.dependencies]
+oauthlib = ">=3.0.0"
+requests = ">=2.0.0"
+
+[package.extras]
+rsa = ["oauthlib[signedtoken] (>=3.0.0)"]
+
+[[package]]
+name = "resampy"
+version = "0.4.2"
+description = "Efficient signal resampling"
+optional = false
+python-versions = "*"
+files = [
+ {file = "resampy-0.4.2-py3-none-any.whl", hash = "sha256:4340b6c4e685a865621dfcf016e2a3dd49d865446b6025e30fe88567f22e052e"},
+ {file = "resampy-0.4.2.tar.gz", hash = "sha256:0a469e6ddb89956f4fd6c88728300e4bbd186fae569dd4fd17dae51a91cbaa15"},
+]
+
+[package.dependencies]
+numba = ">=0.53"
+numpy = ">=1.17"
+
+[package.extras]
+design = ["optuna (>=2.10.0)"]
+docs = ["numpydoc", "sphinx (!=1.3.1)"]
+tests = ["pytest (<8)", "pytest-cov", "scipy (>=1.0)"]
+
+[[package]]
+name = "rfc3986"
+version = "1.5.0"
+description = "Validating URI References per RFC 3986"
+optional = false
+python-versions = "*"
+files = [
+ {file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"},
+ {file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"},
+]
+
+[package.dependencies]
+idna = {version = "*", optional = true, markers = "extra == \"idna2008\""}
+
+[package.extras]
+idna2008 = ["idna"]
+
+[[package]]
+name = "rsa"
+version = "4.9"
+description = "Pure-Python RSA implementation"
+optional = false
+python-versions = ">=3.6,<4"
+files = [
+ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"},
+ {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.1.3"
+
+[[package]]
+name = "sacrebleu"
+version = "2.3.1"
+description = "Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "sacrebleu-2.3.1-py3-none-any.whl", hash = "sha256:352227b8ca9e04ed509266d1fee6c8cff0ea1417c429f8c684645ad2db8b02e7"},
+ {file = "sacrebleu-2.3.1.tar.gz", hash = "sha256:7969b294f15dae84d80fb2b76d30c83b245f49f4ecb1cac79acb553eb93cb537"},
+]
+
+[package.dependencies]
+colorama = "*"
+lxml = "*"
+numpy = ">=1.17"
+portalocker = "*"
+regex = "*"
+tabulate = ">=0.8.9"
+
+[package.extras]
+ja = ["ipadic (>=1.0,<2.0)", "mecab-python3 (==1.0.5)"]
+ko = ["mecab-ko (==1.0.0)", "mecab-ko-dic (>=1.0,<2.0)"]
+
+[[package]]
+name = "scikit-learn"
+version = "1.2.2"
+description = "A set of python modules for machine learning and data mining"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "scikit-learn-1.2.2.tar.gz", hash = "sha256:8429aea30ec24e7a8c7ed8a3fa6213adf3814a6efbea09e16e0a0c71e1a1a3d7"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99cc01184e347de485bf253d19fcb3b1a3fb0ee4cea5ee3c43ec0cc429b6d29f"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e6e574db9914afcb4e11ade84fab084536a895ca60aadea3041e85b8ac963edb"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fe83b676f407f00afa388dd1fdd49e5c6612e551ed84f3b1b182858f09e987d"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2642baa0ad1e8f8188917423dd73994bf25429f8893ddbe115be3ca3183584"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ad66c3848c0a1ec13464b2a95d0a484fd5b02ce74268eaa7e0c697b904f31d6c"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dfeaf8be72117eb61a164ea6fc8afb6dfe08c6f90365bde2dc16456e4bc8e45f"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:fe0aa1a7029ed3e1dcbf4a5bc675aa3b1bc468d9012ecf6c6f081251ca47f590"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:065e9673e24e0dc5113e2dd2b4ca30c9d8aa2fa90f4c0597241c93b63130d233"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf036ea7ef66115e0d49655f16febfa547886deba20149555a41d28f56fd6d3c"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:8b0670d4224a3c2d596fd572fb4fa673b2a0ccfb07152688ebd2ea0b8c61025c"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9c710ff9f9936ba8a3b74a455ccf0dcf59b230caa1e9ba0223773c490cab1e51"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:2dd3ffd3950e3d6c0c0ef9033a9b9b32d910c61bd06cb8206303fb4514b88a49"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44b47a305190c28dd8dd73fc9445f802b6ea716669cfc22ab1eb97b335d238b1"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:953236889928d104c2ef14027539f5f2609a47ebf716b8cbe4437e85dce42744"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:7f69313884e8eb311460cc2f28676d5e400bd929841a2c8eb8742ae78ebf7c20"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8156db41e1c39c69aa2d8599ab7577af53e9e5e7a57b0504e116cc73c39138dd"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fe175ee1dab589d2e1033657c5b6bec92a8a3b69103e3dd361b58014729975c3"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d5312d9674bed14f73773d2acf15a3272639b981e60b72c9b190a0cffed5bad"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea061bf0283bf9a9f36ea3c5d3231ba2176221bbd430abd2603b1c3b2ed85c89"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:6477eed40dbce190f9f9e9d0d37e020815825b300121307942ec2110302b66a3"},
+]
+
+[package.dependencies]
+joblib = ">=1.1.1"
+numpy = ">=1.17.3"
+scipy = ">=1.3.2"
+threadpoolctl = ">=2.0.0"
+
+[package.extras]
+benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"]
+docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
+examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"]
+tests = ["black (>=22.3.0)", "flake8 (>=3.8.2)", "matplotlib (>=3.1.3)", "mypy (>=0.961)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=5.3.1)", "pytest-cov (>=2.9.0)", "scikit-image (>=0.16.2)"]
+
+[[package]]
+name = "scipy"
+version = "1.9.3"
+description = "Fundamental algorithms for scientific computing in Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"},
+ {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"},
+ {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"},
+ {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"},
+ {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"},
+ {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"},
+ {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"},
+ {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"},
+ {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"},
+ {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"},
+ {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"},
+ {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"},
+ {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"},
+ {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"},
+ {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"},
+ {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"},
+ {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"},
+ {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"},
+ {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"},
+ {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"},
+ {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"},
+]
+
+[package.dependencies]
+numpy = ">=1.18.5,<1.26.0"
+
+[package.extras]
+dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"]
+doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"]
+test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
+
+[[package]]
+name = "semantic-version"
+version = "2.10.0"
+description = "A library implementing the 'SemVer' scheme."
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"},
+ {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
+]
+
+[package.extras]
+dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"]
+doc = ["Sphinx", "sphinx-rtd-theme"]
+
+[[package]]
+name = "setuptools"
+version = "67.7.2"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "setuptools-67.7.2-py3-none-any.whl", hash = "sha256:23aaf86b85ca52ceb801d32703f12d77517b2556af839621c641fca11287952b"},
+ {file = "setuptools-67.7.2.tar.gz", hash = "sha256:f104fa03692a2602fa0fec6c6a9e63b6c8a968de13e17c026957dd1f53d80990"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
+[[package]]
+name = "soundfile"
+version = "0.12.1"
+description = "An audio library based on libsndfile, CFFI and NumPy"
+optional = false
+python-versions = "*"
+files = [
+ {file = "soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"},
+ {file = "soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"},
+ {file = "soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"},
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"},
+ {file = "soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"},
+ {file = "soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"},
+ {file = "soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"},
+]
+
+[package.dependencies]
+cffi = ">=1.0"
+
+[package.extras]
+numpy = ["numpy"]
+
+[[package]]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "sympy"
+version = "1.11.1"
+description = "Computer algebra system (CAS) in Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "sympy-1.11.1-py3-none-any.whl", hash = "sha256:938f984ee2b1e8eae8a07b884c8b7a1146010040fccddc6539c54f401c8f6fcf"},
+ {file = "sympy-1.11.1.tar.gz", hash = "sha256:e32380dce63cb7c0108ed525570092fd45168bdae2faa17e528221ef72e88658"},
+]
+
+[package.dependencies]
+mpmath = ">=0.19"
+
+[[package]]
+name = "tabulate"
+version = "0.9.0"
+description = "Pretty-print tabular data"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
+ {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
+]
+
+[package.extras]
+widechars = ["wcwidth"]
+
+[[package]]
+name = "tensorboard"
+version = "2.12.1"
+description = "TensorBoard lets you watch Tensors Flow"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "tensorboard-2.12.1-py3-none-any.whl", hash = "sha256:58f1c2a25b4829b9c48d2b1ec951dedc9325dcd1ea4b0f601d241d2887d0ed65"},
+]
+
+[package.dependencies]
+absl-py = ">=0.4"
+google-auth = ">=1.6.3,<3"
+google-auth-oauthlib = ">=0.5,<1.1"
+grpcio = ">=1.48.2"
+markdown = ">=2.6.8"
+numpy = ">=1.12.0"
+protobuf = ">=3.19.6"
+requests = ">=2.21.0,<3"
+setuptools = ">=41.0.0"
+tensorboard-data-server = ">=0.7.0,<0.8.0"
+tensorboard-plugin-wit = ">=1.6.0"
+werkzeug = ">=1.0.1"
+wheel = ">=0.26"
+
+[[package]]
+name = "tensorboard-data-server"
+version = "0.7.0"
+description = "Fast data loading for TensorBoard"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tensorboard_data_server-0.7.0-py3-none-any.whl", hash = "sha256:753d4214799b31da7b6d93837959abebbc6afa86e69eacf1e9a317a48daa31eb"},
+ {file = "tensorboard_data_server-0.7.0-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:eb7fa518737944dbf4f0cf83c2e40a7ac346bf91be2e6a0215de98be74e85454"},
+ {file = "tensorboard_data_server-0.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64aa1be7c23e80b1a42c13b686eb0875bb70f5e755f4d2b8de5c1d880cf2267f"},
+]
+
+[[package]]
+name = "tensorboard-plugin-wit"
+version = "1.8.1"
+description = "What-If Tool TensorBoard plugin."
+optional = false
+python-versions = "*"
+files = [
+ {file = "tensorboard_plugin_wit-1.8.1-py3-none-any.whl", hash = "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"},
+]
+
+[[package]]
+name = "tensorboardx"
+version = "2.6"
+description = "TensorBoardX lets you watch Tensors Flow without Tensorflow"
+optional = false
+python-versions = "*"
+files = [
+ {file = "tensorboardX-2.6-py2.py3-none-any.whl", hash = "sha256:24a7cd076488de1e9d15ef25371b8ebf90c4f8f622af2477c611198f03f4a606"},
+ {file = "tensorboardX-2.6.tar.gz", hash = "sha256:d4c036964dd2deb075a1909832b276daa383eab3f9db519ad90b99f5aea06b0c"},
+]
+
+[package.dependencies]
+numpy = "*"
+packaging = "*"
+protobuf = ">=3.8.0,<4"
+
+[[package]]
+name = "threadpoolctl"
+version = "3.1.0"
+description = "threadpoolctl"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "threadpoolctl-3.1.0-py3-none-any.whl", hash = "sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b"},
+ {file = "threadpoolctl-3.1.0.tar.gz", hash = "sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380"},
+]
+
+[[package]]
+name = "toolz"
+version = "0.12.0"
+description = "List processing tools and functional utilities"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "toolz-0.12.0-py3-none-any.whl", hash = "sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f"},
+ {file = "toolz-0.12.0.tar.gz", hash = "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"},
+]
+
+[[package]]
+name = "torch"
+version = "2.0.0"
+description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "torch-2.0.0-1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c9090bda7d2eeeecd74f51b721420dbeb44f838d4536cc1b284e879417e3064a"},
+ {file = "torch-2.0.0-1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:bd42db2a48a20574d2c33489e120e9f32789c4dc13c514b0c44272972d14a2d7"},
+ {file = "torch-2.0.0-1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8969aa8375bcbc0c2993e7ede0a7f889df9515f18b9b548433f412affed478d9"},
+ {file = "torch-2.0.0-1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ab2da16567cb55b67ae39e32d520d68ec736191d88ac79526ca5874754c32203"},
+ {file = "torch-2.0.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:7a9319a67294ef02459a19738bbfa8727bb5307b822dadd708bc2ccf6c901aca"},
+ {file = "torch-2.0.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:9f01fe1f6263f31bd04e1757946fd63ad531ae37f28bb2dbf66f5c826ee089f4"},
+ {file = "torch-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:527f4ae68df7b8301ee6b1158ca56350282ea633686537b30dbb5d7b4a52622a"},
+ {file = "torch-2.0.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:ce9b5a49bd513dff7950a5a07d6e26594dd51989cee05ba388b03e8e366fd5d5"},
+ {file = "torch-2.0.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:53e1c33c6896583cdb9a583693e22e99266444c4a43392dddc562640d39e542b"},
+ {file = "torch-2.0.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:09651bff72e439d004c991f15add0c397c66f98ab36fe60d5514b44e4da722e8"},
+ {file = "torch-2.0.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d439aec349c98f12819e8564b8c54008e4613dd4428582af0e6e14c24ca85870"},
+ {file = "torch-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:2802f84f021907deee7e9470ed10c0e78af7457ac9a08a6cd7d55adef835fede"},
+ {file = "torch-2.0.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:01858620f25f25e7a9ec4b547ff38e5e27c92d38ec4ccba9cfbfb31d7071ed9c"},
+ {file = "torch-2.0.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:9a2e53b5783ef5896a6af338b36d782f28e83c8ddfc2ac44b67b066d9d76f498"},
+ {file = "torch-2.0.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ec5fff2447663e369682838ff0f82187b4d846057ef4d119a8dea7772a0b17dd"},
+ {file = "torch-2.0.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11b0384fe3c18c01b8fc5992e70fc519cde65e44c51cc87be1838c1803daf42f"},
+ {file = "torch-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:e54846aa63855298cfb1195487f032e413e7ac9cbfa978fda32354cc39551475"},
+ {file = "torch-2.0.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:cc788cbbbbc6eb4c90e52c550efd067586c2693092cf367c135b34893a64ae78"},
+ {file = "torch-2.0.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:d292640f0fd72b7a31b2a6e3b635eb5065fcbedd4478f9cad1a1e7a9ec861d35"},
+ {file = "torch-2.0.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6befaad784004b7af357e3d87fa0863c1f642866291f12a4c2af2de435e8ac5c"},
+ {file = "torch-2.0.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a83b26bd6ae36fbf5fee3d56973d9816e2002e8a3b7d9205531167c28aaa38a7"},
+ {file = "torch-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c7e67195e1c3e33da53954b026e89a8e1ff3bc1aeb9eb32b677172d4a9b5dcbf"},
+ {file = "torch-2.0.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6e0b97beb037a165669c312591f242382e9109a240e20054d5a5782d9236cad0"},
+ {file = "torch-2.0.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:297a4919aff1c0f98a58ebe969200f71350a1d4d4f986dbfd60c02ffce780e99"},
+]
+
+[package.dependencies]
+filelock = "*"
+jinja2 = "*"
+networkx = "*"
+nvidia-cublas-cu11 = {version = "11.10.3.66", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cuda-cupti-cu11 = {version = "11.7.101", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cuda-nvrtc-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cuda-runtime-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cudnn-cu11 = {version = "8.5.0.96", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cufft-cu11 = {version = "10.9.0.58", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-curand-cu11 = {version = "10.2.10.91", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cusolver-cu11 = {version = "11.4.0.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-cusparse-cu11 = {version = "11.7.4.91", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-nccl-cu11 = {version = "2.14.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+nvidia-nvtx-cu11 = {version = "11.7.91", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+sympy = "*"
+triton = {version = "2.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""}
+typing-extensions = "*"
+
+[package.extras]
+opt-einsum = ["opt-einsum (>=3.3)"]
+
+[[package]]
+name = "torchaudio"
+version = "2.0.1"
+description = "An audio package for PyTorch"
+optional = false
+python-versions = "*"
+files = [
+ {file = "torchaudio-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5d21ebbb55e7040d418d5062b0e882f9660d68b477b38fd436fa6c92ccbb52a"},
+ {file = "torchaudio-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dbcd93b29d71a2f500f36a34ea5e467f510f773da85322098e6bdd8c9dc9948"},
+ {file = "torchaudio-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:5fdaba10ff06d098d603d9eb8d2ff541c3f3fe28ba178a78787190cec0d5187f"},
+ {file = "torchaudio-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:6419199c773c5045c594ff950d5e5dbbfa6c830892ec09721d4ed8704b702bfd"},
+ {file = "torchaudio-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:a5c81e480e5dcdcba065af1e3e31678ac29518991f00260094d37a39e63d76e5"},
+ {file = "torchaudio-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e2a047675493c0aa258fec621ef40e8b01abe3d8dbc872152e4b5998418aa3c5"},
+ {file = "torchaudio-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:91a28e587f708a03320eddbcc4a7dd1ad7150b3d4846b6c1557d85cc89a8d06c"},
+ {file = "torchaudio-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:ba7740d98f601218ff667598ab3d9dab5f326878374fcb52d656f4ff033b9e96"},
+ {file = "torchaudio-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:f401b192921c8b77cc5e478ede589b256dba463f1cee91172ecb376fea45a288"},
+ {file = "torchaudio-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:0ef6754cf75ca5fd5117cb6243a6cf33552d67e9af0075aa6954b2c34bbf1036"},
+ {file = "torchaudio-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:022ca1baa4bb819b78343bd47b57ff6dc6f9fc19fa4ef269946aadf7e62db3c0"},
+ {file = "torchaudio-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a153ad5cdb62de8ec9fd1360a0d080bbaf39d578ae04e788db211571e675b7e0"},
+ {file = "torchaudio-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:aa7897774ab4156d0b72f7078b823ebc1371ee24c50df965447782889552367a"},
+ {file = "torchaudio-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:48d133593cddfe0424a350b566d54065bf6fe7469654de7add2f11b3ef03c5d9"},
+ {file = "torchaudio-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:ac65eb067feee435debba81adfe8337fa007a06de6508c0d80261c5562b6d098"},
+ {file = "torchaudio-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e3c6c8f9ea9f0e2df7a0b9375b0dcf955906e38fc12fab542b72a861564af8e7"},
+ {file = "torchaudio-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1d0cf0779a334ec1861e9fa28bceb66a633c42e8f6b3322e2e37ff9f20d0ae81"},
+ {file = "torchaudio-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:ab7acd2b5d351a2c65e4d935bb90b9256382bed93df57ee177bdbbe31c3cc984"},
+ {file = "torchaudio-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:77b953fd7278773269a9477315b8998ae7e5011cc4b2907e0df18162327482f1"},
+ {file = "torchaudio-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:c01bcea9d4c4a6616452e6cbd44d55913d8e6dee58191b925f35d46a2bf6e71b"},
+]
+
+[package.dependencies]
+torch = "2.0.0"
+
+[[package]]
+name = "torchgen"
+version = "0.0.1"
+description = "Ready to use implementations of state-of-the-art generative models in PyTorch"
+optional = false
+python-versions = ">=3.7, <4"
+files = [
+ {file = "torchgen-0.0.1-py3-none-any.whl", hash = "sha256:78d02b5e4ea0231ce46b4262564a05a9cb2047fcfcdcf4a4ab56230a0f21be66"},
+]
+
+[[package]]
+name = "tornado"
+version = "6.3.2"
+description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
+optional = false
+python-versions = ">= 3.8"
+files = [
+ {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c367ab6c0393d71171123ca5515c61ff62fe09024fa6bf299cd1339dc9456829"},
+ {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b46a6ab20f5c7c1cb949c72c1994a4585d2eaa0be4853f50a03b5031e964fc7c"},
+ {file = "tornado-6.3.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2de14066c4a38b4ecbbcd55c5cc4b5340eb04f1c5e81da7451ef555859c833f"},
+ {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05615096845cf50a895026f749195bf0b10b8909f9be672f50b0fe69cba368e4"},
+ {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b17b1cf5f8354efa3d37c6e28fdfd9c1c1e5122f2cb56dac121ac61baa47cbe"},
+ {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:29e71c847a35f6e10ca3b5c2990a52ce38b233019d8e858b755ea6ce4dcdd19d"},
+ {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:834ae7540ad3a83199a8da8f9f2d383e3c3d5130a328889e4cc991acc81e87a0"},
+ {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6a0848f1aea0d196a7c4f6772197cbe2abc4266f836b0aac76947872cd29b411"},
+ {file = "tornado-6.3.2-cp38-abi3-win32.whl", hash = "sha256:7efcbcc30b7c654eb6a8c9c9da787a851c18f8ccd4a5a3a95b05c7accfa068d2"},
+ {file = "tornado-6.3.2-cp38-abi3-win_amd64.whl", hash = "sha256:0c325e66c8123c606eea33084976c832aa4e766b7dff8aedd7587ea44a604cdf"},
+ {file = "tornado-6.3.2.tar.gz", hash = "sha256:4b927c4f19b71e627b13f3db2324e4ae660527143f9e1f2e2fb404f3a187e2ba"},
+]
+
+[[package]]
+name = "tqdm"
+version = "4.65.0"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"},
+ {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["py-make (>=0.1.0)", "twine", "wheel"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
+[[package]]
+name = "traitlets"
+version = "5.9.0"
+description = "Traitlets Python configuration system"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"},
+ {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"},
+]
+
+[package.extras]
+docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
+test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
+
+[[package]]
+name = "triton"
+version = "2.0.0"
+description = "A language and compiler for custom Deep Learning operations"
+optional = false
+python-versions = "*"
+files = [
+ {file = "triton-2.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38806ee9663f4b0f7cd64790e96c579374089e58f49aac4a6608121aa55e2505"},
+ {file = "triton-2.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:226941c7b8595219ddef59a1fdb821e8c744289a132415ddd584facedeb475b1"},
+ {file = "triton-2.0.0-1-cp36-cp36m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4c9fc8c89874bc48eb7e7b2107a9b8d2c0bf139778637be5bfccb09191685cfd"},
+ {file = "triton-2.0.0-1-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d2684b6a60b9f174f447f36f933e9a45f31db96cb723723ecd2dcfd1c57b778b"},
+ {file = "triton-2.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9d4978298b74fcf59a75fe71e535c092b023088933b2f1df933ec32615e4beef"},
+ {file = "triton-2.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:74f118c12b437fb2ca25e1a04759173b517582fcf4c7be11913316c764213656"},
+ {file = "triton-2.0.0-1-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9618815a8da1d9157514f08f855d9e9ff92e329cd81c0305003eb9ec25cc5add"},
+ {file = "triton-2.0.0-1-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1aca3303629cd3136375b82cb9921727f804e47ebee27b2677fef23005c3851a"},
+ {file = "triton-2.0.0-1-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e3e13aa8b527c9b642e3a9defcc0fbd8ffbe1c80d8ac8c15a01692478dc64d8a"},
+ {file = "triton-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f05a7e64e4ca0565535e3d5d3405d7e49f9d308505bb7773d21fb26a4c008c2"},
+ {file = "triton-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4b99ca3c6844066e516658541d876c28a5f6e3a852286bbc97ad57134827fd"},
+ {file = "triton-2.0.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47b4d70dc92fb40af553b4460492c31dc7d3a114a979ffb7a5cdedb7eb546c08"},
+ {file = "triton-2.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fedce6a381901b1547e0e7e1f2546e4f65dca6d91e2d8a7305a2d1f5551895be"},
+ {file = "triton-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75834f27926eab6c7f00ce73aaf1ab5bfb9bec6eb57ab7c0bfc0a23fac803b4c"},
+ {file = "triton-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0117722f8c2b579cd429e0bee80f7731ae05f63fe8e9414acd9a679885fcbf42"},
+ {file = "triton-2.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcd9be5d0c2e45d2b7e6ddc6da20112b6862d69741576f9c3dbaf941d745ecae"},
+ {file = "triton-2.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42a0d2c3fc2eab4ba71384f2e785fbfd47aa41ae05fa58bf12cb31dcbd0aeceb"},
+ {file = "triton-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c47b72c72693198163ece9d90a721299e4fb3b8e24fd13141e384ad952724f"},
+]
+
+[package.dependencies]
+cmake = "*"
+filelock = "*"
+lit = "*"
+torch = "*"
+
+[package.extras]
+tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"]
+tutorials = ["matplotlib", "pandas", "tabulate"]
+
+[[package]]
+name = "typing-extensions"
+version = "4.5.0"
+description = "Backported and Experimental Type Hints for Python 3.7+"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"},
+ {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"},
+]
+
+[[package]]
+name = "tzdata"
+version = "2023.3"
+description = "Provider of IANA time zone data"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
+ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+]
+
+[[package]]
+name = "uc-micro-py"
+version = "1.0.1"
+description = "Micro subset of unicode data files for linkify-it-py projects."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "uc-micro-py-1.0.1.tar.gz", hash = "sha256:b7cdf4ea79433043ddfe2c82210208f26f7962c0cfbe3bacb05ee879a7fdb596"},
+ {file = "uc_micro_py-1.0.1-py3-none-any.whl", hash = "sha256:316cfb8b6862a0f1d03540f0ae6e7b033ff1fa0ddbe60c12cbe0d4cec846a69f"},
+]
+
+[package.extras]
+test = ["coverage", "pytest", "pytest-cov"]
+
+[[package]]
+name = "urllib3"
+version = "1.26.15"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"},
+ {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "uvicorn"
+version = "0.21.1"
+description = "The lightning-fast ASGI server."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "uvicorn-0.21.1-py3-none-any.whl", hash = "sha256:e47cac98a6da10cd41e6fd036d472c6f58ede6c5dbee3dbee3ef7a100ed97742"},
+ {file = "uvicorn-0.21.1.tar.gz", hash = "sha256:0fac9cb342ba099e0d582966005f3fdba5b0290579fed4a6266dc702ca7bb032"},
+]
+
+[package.dependencies]
+click = ">=7.0"
+h11 = ">=0.8"
+
+[package.extras]
+standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
+
+[[package]]
+name = "websockets"
+version = "11.0"
+description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "websockets-11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:269e3547877a6ca55f62acdf291b256b01bc3469535e892af36afd3e17de284a"},
+ {file = "websockets-11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a4e03d2416c1dad16ccfab97c975192337c6481b07167c90221f1926893e1e"},
+ {file = "websockets-11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4100dc8566ea3b9c0528dee73284be524ab053aebd77e3fc7439a90e0d57745b"},
+ {file = "websockets-11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8e0505c556b2b48078291b300d930f2fb8ba81d1e36379b637c060cfa561ae4"},
+ {file = "websockets-11.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d5bc68cec8269b4b52ab6d1d8690f56dba35f7bcb83a5487518406300f81cf1"},
+ {file = "websockets-11.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:067ac1f6153fc5218afc4563491dcbdb7384895cfc588a0afee962ca77fe0b58"},
+ {file = "websockets-11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:910c84c0cfe4f872905b6ebe1866c579582070331abcb7a58621935eca95c18a"},
+ {file = "websockets-11.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:df0f7769450ca67a53182f917910e2b0b6dd3f8268f88cbfe54ee6be96812889"},
+ {file = "websockets-11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fe23605f5c351773b6fb82fcf680549980d63e126fab5213ed875686c0cec25d"},
+ {file = "websockets-11.0-cp310-cp310-win32.whl", hash = "sha256:eb2e7cd654a05c36fccf726385c64a0e1027997d05ba0859f4d84c3d87db1623"},
+ {file = "websockets-11.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb26c333751a1e3805ecc416a85dcfa3657676b185acd515fd6992f0cea898ef"},
+ {file = "websockets-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4a939963bae1055f14976ef2cf53e797c1997f8835ca9cf23060afc3e7d6718"},
+ {file = "websockets-11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d7fc189fb632f8b31af8a5b32105919662a1bbaac20912320482415b7fed9c96"},
+ {file = "websockets-11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6e3cfc890f1326c95fd7d4cc50f2bd496d3f014fb2da36b4525a10f226be565d"},
+ {file = "websockets-11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9093f4c72c23ed5e475970c6a37e77c4f3a8856223421b9eb405b9fb2170629f"},
+ {file = "websockets-11.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5c56547f97bc76293522ccfcfbdde12442420f1a2c0218ff45d733a0030046df"},
+ {file = "websockets-11.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffb406b4449d4fa41ebc47faa3b9153a082f6fe0e4a0891f596a5ddb69fdeccd"},
+ {file = "websockets-11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8fad76be2c5e36fb3620ad507ac8004e9f358f5c4a9a1b756dbe7918d58884a0"},
+ {file = "websockets-11.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:17eb1988d320e2f1f20e4a3523f1068a0bb08318ab123962fc99fd90c90ab0d6"},
+ {file = "websockets-11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9873288db9c673a2ba9c0f7b59a130576c50fc75f3336a706fff686009c41631"},
+ {file = "websockets-11.0-cp311-cp311-win32.whl", hash = "sha256:cf4ef6343478bf63098d3060fe06baf54d9c011b4b1b05e65e7957091cc87ef4"},
+ {file = "websockets-11.0-cp311-cp311-win_amd64.whl", hash = "sha256:713cd5fc1fd40436495c90a259274e1a4a39416c65447a256434941ddaf2f424"},
+ {file = "websockets-11.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:649ddddcbafd318d427b843425c92b1c035660c32507645c472c77356226cf07"},
+ {file = "websockets-11.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:564c53d84b95da527e96778f2cc873ef186038924abee601f9e8f12ebda9ad46"},
+ {file = "websockets-11.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66d8df2db9801063e4093efe01458b1705c9f76382ad32617c005eeeb201a730"},
+ {file = "websockets-11.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcd876ed166a82d250fcf012b729315489e9d653cb659c2e013c19daba2eb8f"},
+ {file = "websockets-11.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cb00963b49d343210ebbdbe69a35004fbecad73da2158e83d481cd2a6716cf19"},
+ {file = "websockets-11.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3d6f7c2f822e439f47f3492ee3e48c87c7d134d619a42c6dba1a318504501bfb"},
+ {file = "websockets-11.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c4b2ae9c0f1acec5d2f8000eb496eebb9db19055a63716ee166cf0694b945982"},
+ {file = "websockets-11.0-cp37-cp37m-win32.whl", hash = "sha256:2b363e0f9b4247a0c7482e22c70ef39fb3259a14f7c0791c9200b93145f60b4b"},
+ {file = "websockets-11.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3d372c3426f165a0a22be9250526b1cd12e3556e80b4b2afaa6fd6649c99b086"},
+ {file = "websockets-11.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7eb914d37e0574246c63b995f9ca8d7bb7c2f2d53a8d4e9b00200ea856aa43c4"},
+ {file = "websockets-11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a8717a5f3a00cde308e2971064bd5fcb14e0cc08f8234b97f4eb92b505ea95d4"},
+ {file = "websockets-11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a17151722349c4af221616cca2f28e79237738bfbc53e7155240e2a8a7cc02f4"},
+ {file = "websockets-11.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4b60686d9b2ba500847c045595eb5887f4cca7102b4615773b6f490aa611107"},
+ {file = "websockets-11.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eededf25ef6b838e650eeeb1511804b82e9ece566fe6cdc11aa909d2992dcdaf"},
+ {file = "websockets-11.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7587f339f016f0e1b0b6f013e98c83e382c5929774f2b8234c1b2d3f01dd1339"},
+ {file = "websockets-11.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:26369646078e16e7364729ed3e3b1a4315ab1a22ca3c48b4e25dea48fcc1a881"},
+ {file = "websockets-11.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:92f51fbe87381ff76c1791dd44d599152b400f1adfa8453613f1ff6857200ee7"},
+ {file = "websockets-11.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b5bb04a77c326d727c0b986c37a76147916d79db95629267307d1be47788a020"},
+ {file = "websockets-11.0-cp38-cp38-win32.whl", hash = "sha256:50ac95111009178e58b9a25aa51702cdaad4ed843b98eb9b58d69b323ccb224e"},
+ {file = "websockets-11.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a4076cd6a3678def988668fc4b1779da598e1e5c9fa26319af5499f00c23e1c"},
+ {file = "websockets-11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:26559e8a385f71ce2a58f3bb1d005ddd3db7d3328ddbfbff1034f4039d46c4ec"},
+ {file = "websockets-11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f10d283697dec8d91fa983eb8e217c9cac27bc1032057768129b89780009318e"},
+ {file = "websockets-11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f74efe229e078bf5595e207e9a7b135ff37a10858263ed86be66003c4c98d47b"},
+ {file = "websockets-11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f048c88bfcc5bf0e038630cfb970b2c479f913819fd9653db920eef3b105a2b1"},
+ {file = "websockets-11.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceab6c1827fa14ad10c6b0806941d577b21d17012a3648787ac2b946182285b4"},
+ {file = "websockets-11.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:817227e23897808c4bb621da7f57b1f83ee18345bdc44f5c9c1bbd3a094a73f6"},
+ {file = "websockets-11.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6fdcc17348d8697c1f88bba38680cca94131f2a9db727a61fe067284e1e59e8d"},
+ {file = "websockets-11.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8b21ad915b747075f29fe2fa5590111d98988d6730d2cd212acfe52bbe6a2545"},
+ {file = "websockets-11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ae401ad881d5329062b9b2d8160f0b2a147430974f2a3f32e6cedadddc2d634"},
+ {file = "websockets-11.0-cp39-cp39-win32.whl", hash = "sha256:ee84660927293f449760badfe010e06409edb99d72e1910e2e404d2eeff6990f"},
+ {file = "websockets-11.0-cp39-cp39-win_amd64.whl", hash = "sha256:2b4e704a9dac1faf4994e63dceae9e2f504913ff0f865bd3e5a097cbd5874a8f"},
+ {file = "websockets-11.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c2d6429c9bcd70ed8126a1f9ca6069e4ab95c96a3cc141fc84ce02917f7b45ec"},
+ {file = "websockets-11.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff3f67567862a853af2c0db362ede8249be50c576cd9eaf380736c6fce840414"},
+ {file = "websockets-11.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b86ce3d17bcc4b6556b2a2e1277beed74ff6b1de23f002f9763e9875e8ba361d"},
+ {file = "websockets-11.0-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59c4b458cc09ea6470a5eee98b06ccaa84f2a193b92e337a879612614df0f8eb"},
+ {file = "websockets-11.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e5e21aeb350906dfcff321bfa6c60541a1d05cadb6d431ecf9d6376365be60d4"},
+ {file = "websockets-11.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8731189f6985b239a6c34a353c36b45cb3c9fed1c287fbcf7f61df9e4a7ac392"},
+ {file = "websockets-11.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee3aa7660ae0d3a4e47517bb5a545b9a02ff7b9632a640f617e755990ef65f66"},
+ {file = "websockets-11.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:027aabfd053715ce0f5f6fc5107e5093e05b3c94fa555fb65375aa09cb845a66"},
+ {file = "websockets-11.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e8c729aa179ef105f096cad12070aef230be9e2ae509eb47c3cdd9257213c14"},
+ {file = "websockets-11.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ff607c6e16409ac83f1ae59cc96167fead577bc652e8dff48f7458ce082372ff"},
+ {file = "websockets-11.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ca3d7c08f472c40f28bb9fb99610d28dc97137612ab5308f80dac7ce79f87fe1"},
+ {file = "websockets-11.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f179deef8288dd8ec227d644ba5b711609093b634008643561f6d9c74938c3c"},
+ {file = "websockets-11.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:269d33f1573a31130da9afd63a2558f60131522d3fe86d0aa2d1612ad065d27c"},
+ {file = "websockets-11.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb0b306c1180d0268341447982b415aca7c072c84b4a59688dbc1d7d2ec25df9"},
+ {file = "websockets-11.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6ae209f11e433575e17d5d6e61a2f77ceda53b4bce07df55af614aa1d618e2e7"},
+ {file = "websockets-11.0-py3-none-any.whl", hash = "sha256:6ebd971b9b2c0aaa2188c472016e4dad93108b3db425a33ad584bdc41b22026d"},
+ {file = "websockets-11.0.tar.gz", hash = "sha256:19d638549c470f5fd3b67b52b2a08f2edba5a04e05323a706937e35f5f19d056"},
+]
+
+[[package]]
+name = "werkzeug"
+version = "2.2.3"
+description = "The comprehensive WSGI web application library."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Werkzeug-2.2.3-py3-none-any.whl", hash = "sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"},
+ {file = "Werkzeug-2.2.3.tar.gz", hash = "sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.1.1"
+
+[package.extras]
+watchdog = ["watchdog"]
+
+[[package]]
+name = "wheel"
+version = "0.40.0"
+description = "A built-package format for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "wheel-0.40.0-py3-none-any.whl", hash = "sha256:d236b20e7cb522daf2390fa84c55eea81c5c30190f90f29ae2ca1ad8355bf247"},
+ {file = "wheel-0.40.0.tar.gz", hash = "sha256:cd1196f3faee2b31968d626e1731c94f99cbdb67cf5a46e4f5656cbee7738873"},
+]
+
+[package.extras]
+test = ["pytest (>=6.0.0)"]
+
+[[package]]
+name = "yarl"
+version = "1.8.2"
+description = "Yet another URL library"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bb81f753c815f6b8e2ddd2eef3c855cf7da193b82396ac013c661aaa6cc6b0a5"},
+ {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:47d49ac96156f0928f002e2424299b2c91d9db73e08c4cd6742923a086f1c863"},
+ {file = "yarl-1.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3fc056e35fa6fba63248d93ff6e672c096f95f7836938241ebc8260e062832fe"},
+ {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58a3c13d1c3005dbbac5c9f0d3210b60220a65a999b1833aa46bd6677c69b08e"},
+ {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10b08293cda921157f1e7c2790999d903b3fd28cd5c208cf8826b3b508026996"},
+ {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de986979bbd87272fe557e0a8fcb66fd40ae2ddfe28a8b1ce4eae22681728fef"},
+ {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c4fcfa71e2c6a3cb568cf81aadc12768b9995323186a10827beccf5fa23d4f8"},
+ {file = "yarl-1.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae4d7ff1049f36accde9e1ef7301912a751e5bae0a9d142459646114c70ecba6"},
+ {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bf071f797aec5b96abfc735ab97da9fd8f8768b43ce2abd85356a3127909d146"},
+ {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:74dece2bfc60f0f70907c34b857ee98f2c6dd0f75185db133770cd67300d505f"},
+ {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:df60a94d332158b444301c7f569659c926168e4d4aad2cfbf4bce0e8fb8be826"},
+ {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:63243b21c6e28ec2375f932a10ce7eda65139b5b854c0f6b82ed945ba526bff3"},
+ {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cfa2bbca929aa742b5084fd4663dd4b87c191c844326fcb21c3afd2d11497f80"},
+ {file = "yarl-1.8.2-cp310-cp310-win32.whl", hash = "sha256:b05df9ea7496df11b710081bd90ecc3a3db6adb4fee36f6a411e7bc91a18aa42"},
+ {file = "yarl-1.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:24ad1d10c9db1953291f56b5fe76203977f1ed05f82d09ec97acb623a7976574"},
+ {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2a1fca9588f360036242f379bfea2b8b44cae2721859b1c56d033adfd5893634"},
+ {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f37db05c6051eff17bc832914fe46869f8849de5b92dc4a3466cd63095d23dfd"},
+ {file = "yarl-1.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77e913b846a6b9c5f767b14dc1e759e5aff05502fe73079f6f4176359d832581"},
+ {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0978f29222e649c351b173da2b9b4665ad1feb8d1daa9d971eb90df08702668a"},
+ {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388a45dc77198b2460eac0aca1efd6a7c09e976ee768b0d5109173e521a19daf"},
+ {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2305517e332a862ef75be8fad3606ea10108662bc6fe08509d5ca99503ac2aee"},
+ {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42430ff511571940d51e75cf42f1e4dbdded477e71c1b7a17f4da76c1da8ea76"},
+ {file = "yarl-1.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3150078118f62371375e1e69b13b48288e44f6691c1069340081c3fd12c94d5b"},
+ {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c15163b6125db87c8f53c98baa5e785782078fbd2dbeaa04c6141935eb6dab7a"},
+ {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4d04acba75c72e6eb90745447d69f84e6c9056390f7a9724605ca9c56b4afcc6"},
+ {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e7fd20d6576c10306dea2d6a5765f46f0ac5d6f53436217913e952d19237efc4"},
+ {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75c16b2a900b3536dfc7014905a128a2bea8fb01f9ee26d2d7d8db0a08e7cb2c"},
+ {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6d88056a04860a98341a0cf53e950e3ac9f4e51d1b6f61a53b0609df342cc8b2"},
+ {file = "yarl-1.8.2-cp311-cp311-win32.whl", hash = "sha256:fb742dcdd5eec9f26b61224c23baea46c9055cf16f62475e11b9b15dfd5c117b"},
+ {file = "yarl-1.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:8c46d3d89902c393a1d1e243ac847e0442d0196bbd81aecc94fcebbc2fd5857c"},
+ {file = "yarl-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ceff9722e0df2e0a9e8a79c610842004fa54e5b309fe6d218e47cd52f791d7ef"},
+ {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f6b4aca43b602ba0f1459de647af954769919c4714706be36af670a5f44c9c1"},
+ {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1684a9bd9077e922300ecd48003ddae7a7474e0412bea38d4631443a91d61077"},
+ {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebb78745273e51b9832ef90c0898501006670d6e059f2cdb0e999494eb1450c2"},
+ {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3adeef150d528ded2a8e734ebf9ae2e658f4c49bf413f5f157a470e17a4a2e89"},
+ {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a7c87927a468e5a1dc60c17caf9597161d66457a34273ab1760219953f7f4c"},
+ {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:efff27bd8cbe1f9bd127e7894942ccc20c857aa8b5a0327874f30201e5ce83d0"},
+ {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a783cd344113cb88c5ff7ca32f1f16532a6f2142185147822187913eb989f739"},
+ {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:705227dccbe96ab02c7cb2c43e1228e2826e7ead880bb19ec94ef279e9555b5b"},
+ {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:34c09b43bd538bf6c4b891ecce94b6fa4f1f10663a8d4ca589a079a5018f6ed7"},
+ {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a48f4f7fea9a51098b02209d90297ac324241bf37ff6be6d2b0149ab2bd51b37"},
+ {file = "yarl-1.8.2-cp37-cp37m-win32.whl", hash = "sha256:0414fd91ce0b763d4eadb4456795b307a71524dbacd015c657bb2a39db2eab89"},
+ {file = "yarl-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:d881d152ae0007809c2c02e22aa534e702f12071e6b285e90945aa3c376463c5"},
+ {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5df5e3d04101c1e5c3b1d69710b0574171cc02fddc4b23d1b2813e75f35a30b1"},
+ {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a66c506ec67eb3159eea5096acd05f5e788ceec7b96087d30c7d2865a243918"},
+ {file = "yarl-1.8.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2b4fa2606adf392051d990c3b3877d768771adc3faf2e117b9de7eb977741229"},
+ {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e21fb44e1eff06dd6ef971d4bdc611807d6bd3691223d9c01a18cec3677939e"},
+ {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93202666046d9edadfe9f2e7bf5e0782ea0d497b6d63da322e541665d65a044e"},
+ {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc77086ce244453e074e445104f0ecb27530d6fd3a46698e33f6c38951d5a0f1"},
+ {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dd68a92cab699a233641f5929a40f02a4ede8c009068ca8aa1fe87b8c20ae3"},
+ {file = "yarl-1.8.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b372aad2b5f81db66ee7ec085cbad72c4da660d994e8e590c997e9b01e44901"},
+ {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e6f3515aafe0209dd17fb9bdd3b4e892963370b3de781f53e1746a521fb39fc0"},
+ {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dfef7350ee369197106805e193d420b75467b6cceac646ea5ed3049fcc950a05"},
+ {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:728be34f70a190566d20aa13dc1f01dc44b6aa74580e10a3fb159691bc76909d"},
+ {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ff205b58dc2929191f68162633d5e10e8044398d7a45265f90a0f1d51f85f72c"},
+ {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baf211dcad448a87a0d9047dc8282d7de59473ade7d7fdf22150b1d23859f946"},
+ {file = "yarl-1.8.2-cp38-cp38-win32.whl", hash = "sha256:272b4f1599f1b621bf2aabe4e5b54f39a933971f4e7c9aa311d6d7dc06965165"},
+ {file = "yarl-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:326dd1d3caf910cd26a26ccbfb84c03b608ba32499b5d6eeb09252c920bcbe4f"},
+ {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f8ca8ad414c85bbc50f49c0a106f951613dfa5f948ab69c10ce9b128d368baf8"},
+ {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:418857f837347e8aaef682679f41e36c24250097f9e2f315d39bae3a99a34cbf"},
+ {file = "yarl-1.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ae0eec05ab49e91a78700761777f284c2df119376e391db42c38ab46fd662b77"},
+ {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:009a028127e0a1755c38b03244c0bea9d5565630db9c4cf9572496e947137a87"},
+ {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3edac5d74bb3209c418805bda77f973117836e1de7c000e9755e572c1f7850d0"},
+ {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da65c3f263729e47351261351b8679c6429151ef9649bba08ef2528ff2c423b2"},
+ {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef8fb25e52663a1c85d608f6dd72e19bd390e2ecaf29c17fb08f730226e3a08"},
+ {file = "yarl-1.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcd7bb1e5c45274af9a1dd7494d3c52b2be5e6bd8d7e49c612705fd45420b12d"},
+ {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44ceac0450e648de86da8e42674f9b7077d763ea80c8ceb9d1c3e41f0f0a9951"},
+ {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:97209cc91189b48e7cfe777237c04af8e7cc51eb369004e061809bcdf4e55220"},
+ {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:48dd18adcf98ea9cd721a25313aef49d70d413a999d7d89df44f469edfb38a06"},
+ {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e59399dda559688461762800d7fb34d9e8a6a7444fd76ec33220a926c8be1516"},
+ {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d617c241c8c3ad5c4e78a08429fa49e4b04bedfc507b34b4d8dceb83b4af3588"},
+ {file = "yarl-1.8.2-cp39-cp39-win32.whl", hash = "sha256:cb6d48d80a41f68de41212f3dfd1a9d9898d7841c8f7ce6696cf2fd9cb57ef83"},
+ {file = "yarl-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:6604711362f2dbf7160df21c416f81fac0de6dbcf0b5445a2ef25478ecc4c778"},
+ {file = "yarl-1.8.2.tar.gz", hash = "sha256:49d43402c6e3013ad0978602bf6bf5328535c48d192304b91b97a3c6790b1562"},
+]
+
+[package.dependencies]
+idna = ">=2.0"
+multidict = ">=4.0"
+
+[[package]]
+name = "zipp"
+version = "3.15.0"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
+ {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.8"
+content-hash = "400ac506bf8f14333fa2e073fd39cc765a1941aab895d5ed6f9dd264146fc726"
diff --git a/pretrained/.gitignore b/pretrained/.gitignore
new file mode 100644
index 000000000..d6b7ef32c
--- /dev/null
+++ b/pretrained/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/pretrained_v2/.gitignore b/pretrained_v2/.gitignore
new file mode 100644
index 000000000..d6b7ef32c
--- /dev/null
+++ b/pretrained_v2/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..5b1525835
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,62 @@
+[tool.poetry]
+name = "rvc-beta"
+version = "0.1.0"
+description = ""
+authors = ["lj1995"]
+license = "MIT"
+
+[tool.poetry.dependencies]
+python = "^3.8"
+torch = "^2.0.0"
+torchaudio = "^2.0.1"
+Cython = "^0.29.34"
+gradio = "^3.34.0"
+future = "^0.18.3"
+pydub = "^0.25.1"
+soundfile = "^0.12.1"
+ffmpeg-python = "^0.2.0"
+tensorboardX = "^2.6"
+functorch = "^2.0.0"
+fairseq = "^0.12.2"
+faiss-cpu = "^1.7.2"
+Jinja2 = "^3.1.2"
+json5 = "^0.9.11"
+librosa = "0.9.2"
+llvmlite = "0.39.0"
+Markdown = "^3.4.3"
+matplotlib = "^3.7.1"
+matplotlib-inline = "^0.1.6"
+numba = "0.56.4"
+numpy = "1.23.5"
+scipy = "1.9.3"
+praat-parselmouth = "^0.4.3"
+Pillow = "9.3.0"
+pyworld = "^0.3.2"
+resampy = "^0.4.2"
+scikit-learn = "^1.2.2"
+starlette = "^0.27.0"
+tensorboard = "^2.12.1"
+tensorboard-data-server = "^0.7.0"
+tensorboard-plugin-wit = "^1.8.1"
+torchgen = "^0.0.1"
+tqdm = "^4.65.0"
+tornado = "^6.3"
+Werkzeug = "^2.2.3"
+uc-micro-py = "^1.0.1"
+sympy = "^1.11.1"
+tabulate = "^0.9.0"
+PyYAML = "^6.0"
+pyasn1 = "^0.4.8"
+pyasn1-modules = "^0.2.8"
+fsspec = "^2023.3.0"
+absl-py = "^1.4.0"
+audioread = "^3.0.0"
+uvicorn = "^0.21.1"
+colorama = "^0.4.6"
+torchcrepe = "0.0.20"
+
+[tool.poetry.dev-dependencies]
+
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
diff --git a/requirements-gpu.txt b/requirements-gpu.txt
new file mode 100644
index 000000000..6042fb6e5
--- /dev/null
+++ b/requirements-gpu.txt
@@ -0,0 +1,48 @@
+tornado>=6.1
+setuptools
+pydantic
+wheel
+google-auth-oauthlib
+pedalboard
+pydub==0.25.1
+httpx==0.23.0
+fairseq==0.12.2
+tensorboardX
+faiss_cpu==1.7.3
+ffmpeg_python==0.2.0
+ffmpy==0.3.1
+websockets>=10.0
+gradio==3.34.0
+librosa==0.9.1
+elevenlabs
+gTTS==2.3.2
+wget
+matplotlib==3.7.2
+mega.py==1.0.8
+gdown
+noisereduce==2.0.1
+unidecode
+numba==0.57.1
+numpy==1.23.5
+onnxruntime
+onnxruntime_gpu==1.15.1
+opencv_python==4.8.0.74
+opencv_python_headless==4.8.0.74
+pandas==2.0.3
+praat-parselmouth==0.4.2
+PySimpleGUI==4.60.5
+pyworld==0.3.3
+requests==2.31.0
+resampy==0.4.2
+scikit_learn==1.3.0
+scipy==1.11.1
+yt_dlp==2023.7.6
+pyngrok==4.1.12
+sounddevice==0.4.6
+soundfile==0.12.1
+tb_nightly==2.14.0a20230803
+tensorboard==2.13.0
+torchcrepe
+torch_directml==0.2.0.dev230426
+torchgen>=0.0.1
+tqdm==4.65.0
diff --git a/requirements-realtime-vc.txt b/requirements-realtime-vc.txt
new file mode 100644
index 000000000..9d6935bfe
--- /dev/null
+++ b/requirements-realtime-vc.txt
@@ -0,0 +1,29 @@
+#1.Install torch from pytorch.org:
+#torch 2.0 with cuda 11.8
+#pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
+#torch 1.11.0 with cuda 11.3
+#pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113
+einops
+fairseq
+flask
+flask_cors
+gin
+gin_config
+librosa
+local_attention
+matplotlib
+praat-parselmouth
+pyworld
+PyYAML
+resampy
+scikit_learn
+scipy
+SoundFile
+tensorboard
+tqdm
+wave
+PySimpleGUI
+sounddevice
+gradio
+noisereduce
+torchcrepe==0.0.20
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 000000000..4be5902bb
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,51 @@
+tornado>=6.1
+setuptools
+pydantic
+wheel
+google-auth-oauthlib
+pedalboard
+pydub==0.25.1
+httpx==0.23.0
+fairseq==0.12.2
+tensorboardX
+faiss_cpu==1.7.3
+ffmpeg_python==0.2.0
+ffmpy==0.3.1
+websockets>=10.0
+gradio==3.34.0
+librosa==0.9.1
+elevenlabs
+gTTS==2.3.2
+wget
+matplotlib==3.7.2
+mega.py==1.0.8
+gdown
+noisereduce==2.0.1
+unidecode
+numba==0.57.1
+numpy==1.23.5
+onnxruntime
+onnxruntime_gpu==1.15.1
+opencv_python==4.8.0.74
+opencv_python_headless==4.8.0.74
+pandas==2.0.3
+praat-parselmouth==0.4.2
+PySimpleGUI==4.60.5
+pyworld==0.3.3
+requests==2.31.0
+resampy==0.4.2
+scikit_learn==1.3.0
+scipy==1.11.1
+yt_dlp==2023.7.6
+pyngrok==4.1.12
+sounddevice==0.4.6
+soundfile==0.12.1
+tensorboard==2.13.0
+tb_nightly==2.14.0a20230803
+torch==2.0.0
+torchcrepe==0.0.21
+torch_directml==0.2.0.dev230426
+torchaudio==2.0.1
+torchvision==0.15.1
+torchgen>=0.0.1
+tqdm==4.65.0
diff --git a/rmvpe.py b/rmvpe.py
new file mode 100644
index 000000000..8ad0f4e2f
--- /dev/null
+++ b/rmvpe.py
@@ -0,0 +1,434 @@
+import sys, torch, numpy as np, traceback, pdb
+import torch.nn as nn
+from time import time as ttime
+import torch.nn.functional as F
+
+
+class BiGRU(nn.Module):
+ def __init__(self, input_features, hidden_features, num_layers):
+ super(BiGRU, self).__init__()
+ self.gru = nn.GRU(
+ input_features,
+ hidden_features,
+ num_layers=num_layers,
+ batch_first=True,
+ bidirectional=True,
+ )
+
+ def forward(self, x):
+ return self.gru(x)[0]
+
+
+class ConvBlockRes(nn.Module):
+ def __init__(self, in_channels, out_channels, momentum=0.01):
+ super(ConvBlockRes, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=(3, 3),
+ stride=(1, 1),
+ padding=(1, 1),
+ bias=False,
+ ),
+ nn.BatchNorm2d(out_channels, momentum=momentum),
+ nn.ReLU(),
+ nn.Conv2d(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=(3, 3),
+ stride=(1, 1),
+ padding=(1, 1),
+ bias=False,
+ ),
+ nn.BatchNorm2d(out_channels, momentum=momentum),
+ nn.ReLU(),
+ )
+ if in_channels != out_channels:
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
+ self.is_shortcut = True
+ else:
+ self.is_shortcut = False
+
+ def forward(self, x):
+ if self.is_shortcut:
+ return self.conv(x) + self.shortcut(x)
+ else:
+ return self.conv(x) + x
+
+
+class Encoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ in_size,
+ n_encoders,
+ kernel_size,
+ n_blocks,
+ out_channels=16,
+ momentum=0.01,
+ ):
+ super(Encoder, self).__init__()
+ self.n_encoders = n_encoders
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
+ self.layers = nn.ModuleList()
+ self.latent_channels = []
+ for i in range(self.n_encoders):
+ self.layers.append(
+ ResEncoderBlock(
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
+ )
+ )
+ self.latent_channels.append([out_channels, in_size])
+ in_channels = out_channels
+ out_channels *= 2
+ in_size //= 2
+ self.out_size = in_size
+ self.out_channel = out_channels
+
+ def forward(self, x):
+ concat_tensors = []
+ x = self.bn(x)
+ for i in range(self.n_encoders):
+ _, x = self.layers[i](x)
+ concat_tensors.append(_)
+ return x, concat_tensors
+
+
+class ResEncoderBlock(nn.Module):
+ def __init__(
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
+ ):
+ super(ResEncoderBlock, self).__init__()
+ self.n_blocks = n_blocks
+ self.conv = nn.ModuleList()
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
+ for i in range(n_blocks - 1):
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
+ self.kernel_size = kernel_size
+ if self.kernel_size is not None:
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
+
+ def forward(self, x):
+ for i in range(self.n_blocks):
+ x = self.conv[i](x)
+ if self.kernel_size is not None:
+ return x, self.pool(x)
+ else:
+ return x
+
+
+class Intermediate(nn.Module): #
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
+ super(Intermediate, self).__init__()
+ self.n_inters = n_inters
+ self.layers = nn.ModuleList()
+ self.layers.append(
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
+ )
+ for i in range(self.n_inters - 1):
+ self.layers.append(
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
+ )
+
+ def forward(self, x):
+ for i in range(self.n_inters):
+ x = self.layers[i](x)
+ return x
+
+
+class ResDecoderBlock(nn.Module):
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
+ super(ResDecoderBlock, self).__init__()
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
+ self.n_blocks = n_blocks
+ self.conv1 = nn.Sequential(
+ nn.ConvTranspose2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=(3, 3),
+ stride=stride,
+ padding=(1, 1),
+ output_padding=out_padding,
+ bias=False,
+ ),
+ nn.BatchNorm2d(out_channels, momentum=momentum),
+ nn.ReLU(),
+ )
+ self.conv2 = nn.ModuleList()
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
+ for i in range(n_blocks - 1):
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
+
+ def forward(self, x, concat_tensor):
+ x = self.conv1(x)
+ x = torch.cat((x, concat_tensor), dim=1)
+ for i in range(self.n_blocks):
+ x = self.conv2[i](x)
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
+ super(Decoder, self).__init__()
+ self.layers = nn.ModuleList()
+ self.n_decoders = n_decoders
+ for i in range(self.n_decoders):
+ out_channels = in_channels // 2
+ self.layers.append(
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
+ )
+ in_channels = out_channels
+
+ def forward(self, x, concat_tensors):
+ for i in range(self.n_decoders):
+ x = self.layers[i](x, concat_tensors[-1 - i])
+ return x
+
+
+class DeepUnet(nn.Module):
+ def __init__(
+ self,
+ kernel_size,
+ n_blocks,
+ en_de_layers=5,
+ inter_layers=4,
+ in_channels=1,
+ en_out_channels=16,
+ ):
+ super(DeepUnet, self).__init__()
+ self.encoder = Encoder(
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
+ )
+ self.intermediate = Intermediate(
+ self.encoder.out_channel // 2,
+ self.encoder.out_channel,
+ inter_layers,
+ n_blocks,
+ )
+ self.decoder = Decoder(
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
+ )
+
+ def forward(self, x):
+ x, concat_tensors = self.encoder(x)
+ x = self.intermediate(x)
+ x = self.decoder(x, concat_tensors)
+ return x
+
+
+class E2E(nn.Module):
+ def __init__(
+ self,
+ n_blocks,
+ n_gru,
+ kernel_size,
+ en_de_layers=5,
+ inter_layers=4,
+ in_channels=1,
+ en_out_channels=16,
+ ):
+ super(E2E, self).__init__()
+ self.unet = DeepUnet(
+ kernel_size,
+ n_blocks,
+ en_de_layers,
+ inter_layers,
+ in_channels,
+ en_out_channels,
+ )
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
+ if n_gru:
+ self.fc = nn.Sequential(
+ BiGRU(3 * 128, 256, n_gru),
+ nn.Linear(512, 360),
+ nn.Dropout(0.25),
+ nn.Sigmoid(),
+ )
+ else:
+ self.fc = nn.Sequential(
+ nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
+ )
+
+ def forward(self, mel):
+ mel = mel.transpose(-1, -2).unsqueeze(1)
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
+ x = self.fc(x)
+ return x
+
+
+from librosa.filters import mel
+
+
+class MelSpectrogram(torch.nn.Module):
+ def __init__(
+ self,
+ is_half,
+ n_mel_channels,
+ sampling_rate,
+ win_length,
+ hop_length,
+ n_fft=None,
+ mel_fmin=0,
+ mel_fmax=None,
+ clamp=1e-5,
+ ):
+ super().__init__()
+ n_fft = win_length if n_fft is None else n_fft
+ self.hann_window = {}
+ mel_basis = mel(
+ sr=sampling_rate,
+ n_fft=n_fft,
+ n_mels=n_mel_channels,
+ fmin=mel_fmin,
+ fmax=mel_fmax,
+ htk=True,
+ )
+ mel_basis = torch.from_numpy(mel_basis).float()
+ self.register_buffer("mel_basis", mel_basis)
+ self.n_fft = win_length if n_fft is None else n_fft
+ self.hop_length = hop_length
+ self.win_length = win_length
+ self.sampling_rate = sampling_rate
+ self.n_mel_channels = n_mel_channels
+ self.clamp = clamp
+ self.is_half = is_half
+
+ def forward(self, audio, keyshift=0, speed=1, center=True):
+ factor = 2 ** (keyshift / 12)
+ n_fft_new = int(np.round(self.n_fft * factor))
+ win_length_new = int(np.round(self.win_length * factor))
+ hop_length_new = int(np.round(self.hop_length * speed))
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
+ if keyshift_key not in self.hann_window:
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
+ audio.device
+ )
+ fft = torch.stft(
+ audio,
+ n_fft=n_fft_new,
+ hop_length=hop_length_new,
+ win_length=win_length_new,
+ window=self.hann_window[keyshift_key],
+ center=center,
+ return_complex=True,
+ )
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
+ if keyshift != 0:
+ size = self.n_fft // 2 + 1
+ resize = magnitude.size(1)
+ if resize < size:
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
+ mel_output = torch.matmul(self.mel_basis, magnitude)
+ if self.is_half == True:
+ mel_output = mel_output.half()
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
+ return log_mel_spec
+
+
+class RMVPE:
+ def __init__(self, model_path, is_half, device=None):
+ self.resample_kernel = {}
+ model = E2E(4, 1, (2, 2))
+ ckpt = torch.load(model_path, map_location="cpu")
+ model.load_state_dict(ckpt)
+ model.eval()
+ if is_half == True:
+ model = model.half()
+ self.model = model
+ self.resample_kernel = {}
+ self.is_half = is_half
+ if device is None:
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ self.device = device
+ self.mel_extractor = MelSpectrogram(
+ is_half, 128, 16000, 1024, 160, None, 30, 8000
+ ).to(device)
+ self.model = self.model.to(device)
+ cents_mapping = 20 * np.arange(360) + 1997.3794084376191
+ self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
+
+ def mel2hidden(self, mel):
+ with torch.no_grad():
+ n_frames = mel.shape[-1]
+ mel = F.pad(
+ mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
+ )
+ hidden = self.model(mel)
+ return hidden[:, :n_frames]
+
+ def decode(self, hidden, thred=0.03):
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
+ f0 = 10 * (2 ** (cents_pred / 1200))
+ f0[f0 == 10] = 0
+ # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
+ return f0
+
+ def infer_from_audio(self, audio, thred=0.03):
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
+ mel = self.mel_extractor(audio, center=True)
+ hidden = self.mel2hidden(mel)
+ hidden = hidden.squeeze(0).cpu().numpy()
+ if self.is_half == True:
+ hidden = hidden.astype("float32")
+ f0 = self.decode(hidden, thred=thred)
+ return f0
+
+ def infer_from_audio_with_pitch(self, audio, thred=0.03, f0_min=50, f0_max=1100):
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
+ mel = self.mel_extractor(audio, center=True)
+ hidden = self.mel2hidden(mel)
+ hidden = hidden.squeeze(0).cpu().numpy()
+ if self.is_half == True:
+ hidden = hidden.astype("float32")
+ f0 = self.decode(hidden, thred=thred)
+ f0[(f0 < f0_min) | (f0 > f0_max)] = 0
+ return f0
+
+ def to_local_average_cents(self, salience, thred=0.05):
+ # t0 = ttime()
+ center = np.argmax(salience, axis=1) # 帧长#index
+ salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
+ # t1 = ttime()
+ center += 4
+ todo_salience = []
+ todo_cents_mapping = []
+ starts = center - 4
+ ends = center + 5
+ for idx in range(salience.shape[0]):
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
+ # t2 = ttime()
+ todo_salience = np.array(todo_salience) # 帧长,9
+ todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
+ weight_sum = np.sum(todo_salience, 1) # 帧长
+ devided = product_sum / weight_sum # 帧长
+ # t3 = ttime()
+ maxx = np.max(salience, axis=1) # 帧长
+ devided[maxx <= thred] = 0
+ # t4 = ttime()
+ # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
+ return devided
+
+
+# if __name__ == '__main__':
+# audio, sampling_rate = sf.read("卢本伟语录~1.wav")
+# if len(audio.shape) > 1:
+# audio = librosa.to_mono(audio.transpose(1, 0))
+# audio_bak = audio.copy()
+# if sampling_rate != 16000:
+# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
+# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt"
+# thred = 0.03 # 0.01
+# device = 'cuda' if torch.cuda.is_available() else 'cpu'
+# rmvpe = RMVPE(model_path,is_half=False, device=device)
+# t0=ttime()
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# t1=ttime()
+# print(f0.shape,t1-t0)
diff --git a/run.sh b/run.sh
new file mode 100644
index 000000000..61169ba0f
--- /dev/null
+++ b/run.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+# Define common paths for Homebrew
+BREW_PATHS=(
+ "/usr/local/bin"
+ "/opt/homebrew/bin"
+)
+
+if [[ "$(uname)" == "Darwin" ]]; then
+ # macOS specific env:
+ export PYTORCH_ENABLE_MPS_FALLBACK=1
+ export PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0
+elif [[ "$(uname)" != "Linux" ]]; then
+ echo "Unsupported operating system."
+ exit 1
+fi
+
+requirements_file="requirements.txt"
+
+# Function to add a path to PATH
+add_to_path() {
+ echo "Homebrew found in $1, which is not in your PATH."
+ read -p "Do you want to add this path to your PATH? (y/n) " -n 1 -r
+ echo
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ echo "Adding $1 to PATH..."
+
+ # Detect the shell and choose the right profile file
+ local shell_profile
+ if [[ $SHELL == *"/bash"* ]]; then
+ shell_profile="$HOME/.bashrc"
+ [[ ! -f "$shell_profile" ]] && shell_profile="$HOME/.bash_profile"
+ elif [[ $SHELL == *"/zsh"* ]]; then
+ shell_profile="$HOME/.zshrc"
+ else
+ echo "Unsupported shell. Please add the following line to your shell profile file manually:"
+ echo "export PATH=\"$PATH:$1\""
+ return
+ fi
+
+ # Add the export line to the shell profile file
+ echo "export PATH=\"$PATH:$1\"" >> "$shell_profile"
+
+ # Source the shell profile file
+ source "$shell_profile"
+
+ # Verify that the new PATH includes Homebrew
+ if ! command -v brew &> /dev/null; then
+ echo "Failed to add Homebrew to the PATH."
+ fi
+ fi
+}
+
+# Check if Homebrew is in PATH
+if command -v brew &> /dev/null; then
+ echo "Homebrew is already in your PATH."
+else
+ # If not, check common paths for Homebrew
+ echo "Homebrew not found in PATH. Checking common paths..."
+ for path in "${BREW_PATHS[@]}"; do
+ if [[ -x "$path/brew" ]]; then
+ add_to_path "$path"
+ break
+ fi
+ done
+fi
+
+# Check again if Homebrew is in PATH
+if ! command -v brew &> /dev/null; then
+ echo "Homebrew still not found. Attempting to install..."
+ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
+
+ # Check again if Homebrew is in PATH
+ if ! command -v brew &> /dev/null; then
+ echo "Homebrew not found in PATH even after installation. Checking common paths again..."
+ for path in "${BREW_PATHS[@]}"; do
+ if [[ -x "$path/brew" ]]; then
+ echo "Found post-install homebrew, adding to PATH...."
+ add_to_path "$path"
+ break
+ fi
+ done
+ fi
+fi
+
+# Verifying if Homebrew has been installed successfully
+if command -v brew &> /dev/null; then
+ echo "Homebrew installed successfully."
+else
+ echo "Homebrew installation failed."
+ exit 1
+fi
+
+# Installing ffmpeg with Homebrew
+if [[ "$(uname)" == "Darwin" ]]; then
+ echo "Installing ffmpeg..."
+ brew install ffmpeg
+fi
+
+# Check if Python 3.8 is installed
+if ! command -v python3.8 &> /dev/null; then
+ echo "Python 3.8 not found. Attempting to install..."
+ if [[ "$(uname)" == "Darwin" ]] && command -v brew &> /dev/null; then
+ brew install python@3.8
+ elif [[ "$(uname)" == "Linux" ]] && command -v apt-get &> /dev/null; then
+ sudo apt-get update
+ sudo apt-get install python3.8
+ else
+ echo "Please install Python 3.8 manually."
+ exit 1
+ fi
+fi
+
+# Check if required packages are installed and install them if not
+if [ -f "${requirements_file}" ]; then
+ installed_packages=$(python3.8 -m pip list --format=freeze)
+ while IFS= read -r package; do
+ [[ "${package}" =~ ^#.* ]] && continue
+ package_name=$(echo "${package}" | sed 's/[<>=!].*//')
+ if ! echo "${installed_packages}" | grep -q "${package_name}"; then
+ echo "${package_name} not found. Attempting to install..."
+ python3.8 -m pip install --upgrade "${package}"
+ fi
+ done < "${requirements_file}"
+else
+ echo "${requirements_file} not found. Please ensure the requirements file with required packages exists."
+ exit 1
+fi
+
+# Install onnxruntime package
+echo "Installing onnxruntime..."
+python3.8 -m pip install onnxruntime
+
+download_if_not_exists() {
+ local filename=$1
+ local url=$2
+ if [ ! -f "$filename" ]; then
+ echo "$filename does not exist, downloading..."
+ curl -# -L -o "$filename" "$url"
+ echo "Download finished."
+ else
+ echo "$filename already exists."
+ fi
+}
+
+# Check and download hubert_base.pt
+download_if_not_exists "hubert_base.pt" "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt"
+
+# Check and download rmvpe.pt
+download_if_not_exists "rmvpe.pt" "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt"
+
+# Run the main script
+python3.8 infer-web.py --pycmd python3.8
diff --git a/rvc_for_realtime.py b/rvc_for_realtime.py
new file mode 100644
index 000000000..74af22b54
--- /dev/null
+++ b/rvc_for_realtime.py
@@ -0,0 +1,297 @@
+import faiss, torch, traceback, parselmouth, numpy as np, torchcrepe, torch.nn as nn, pyworld
+from fairseq import checkpoint_utils
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+import os, sys
+from time import time as ttime
+import torch.nn.functional as F
+import scipy.signal as signal
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from config import Config
+from multiprocessing import Manager as M
+
+mm = M()
+config = Config()
+
+
+class RVC:
+ def __init__(
+ self, key, pth_path, index_path, index_rate, n_cpu, inp_q, opt_q, device
+ ) -> None:
+ """
+ 初始化
+ """
+ try:
+ global config
+ self.inp_q = inp_q
+ self.opt_q = opt_q
+ self.device = device
+ self.f0_up_key = key
+ self.time_step = 160 / 16000 * 1000
+ self.f0_min = 50
+ self.f0_max = 1100
+ self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
+ self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
+ self.sr = 16000
+ self.window = 160
+ self.n_cpu = n_cpu
+ if index_rate != 0:
+ self.index = faiss.read_index(index_path)
+ self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
+ print("index search enabled")
+ self.index_rate = index_rate
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
+ ["hubert_base.pt"],
+ suffix="",
+ )
+ hubert_model = models[0]
+ hubert_model = hubert_model.to(config.device)
+ if config.is_half:
+ hubert_model = hubert_model.half()
+ else:
+ hubert_model = hubert_model.float()
+ hubert_model.eval()
+ self.model = hubert_model
+ cpt = torch.load(pth_path, map_location="cpu")
+ self.tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
+ self.if_f0 = cpt.get("f0", 1)
+ self.version = cpt.get("version", "v1")
+ if self.version == "v1":
+ if self.if_f0 == 1:
+ self.net_g = SynthesizerTrnMs256NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ elif self.version == "v2":
+ if self.if_f0 == 1:
+ self.net_g = SynthesizerTrnMs768NSFsid(
+ *cpt["config"], is_half=config.is_half
+ )
+ else:
+ self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+ del self.net_g.enc_q
+ print(self.net_g.load_state_dict(cpt["weight"], strict=False))
+ self.net_g.eval().to(device)
+ if config.is_half:
+ self.net_g = self.net_g.half()
+ else:
+ self.net_g = self.net_g.float()
+ self.is_half = config.is_half
+ except:
+ print(traceback.format_exc())
+
+ def get_f0_post(self, f0):
+ f0_min = self.f0_min
+ f0_max = self.f0_max
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+ f0bak = f0.copy()
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ f0_coarse = np.rint(f0_mel).astype(np.int_)
+ return f0_coarse, f0bak
+
+ def get_f0(self, x, f0_up_key, n_cpu, method="harvest"):
+ n_cpu = int(n_cpu)
+ if method == "crepe":
+ return self.get_f0_crepe(x, f0_up_key)
+ if method == "rmvpe":
+ return self.get_f0_rmvpe(x, f0_up_key)
+ if method == "pm":
+ p_len = x.shape[0] // 160
+ f0 = (
+ parselmouth.Sound(x, 16000)
+ .to_pitch_ac(
+ time_step=0.01,
+ voicing_threshold=0.6,
+ pitch_floor=50,
+ pitch_ceiling=1100,
+ )
+ .selected_array["frequency"]
+ )
+
+ pad_size = (p_len - len(f0) + 1) // 2
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
+ print(pad_size, p_len - len(f0) - pad_size)
+ f0 = np.pad(
+ f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
+ )
+
+ f0 *= pow(2, f0_up_key / 12)
+ return self.get_f0_post(f0)
+ if n_cpu == 1:
+ f0, t = pyworld.harvest(
+ x.astype(np.double),
+ fs=16000,
+ f0_ceil=1100,
+ f0_floor=50,
+ frame_period=10,
+ )
+ f0 = signal.medfilt(f0, 3)
+ f0 *= pow(2, f0_up_key / 12)
+ return self.get_f0_post(f0)
+ f0bak = np.zeros(x.shape[0] // 160, dtype=np.float64)
+ length = len(x)
+ part_length = int(length / n_cpu / 160) * 160
+ ts = ttime()
+ res_f0 = mm.dict()
+ for idx in range(n_cpu):
+ tail = part_length * (idx + 1) + 320
+ if idx == 0:
+ self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts))
+ else:
+ self.inp_q.put(
+ (idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts)
+ )
+ while 1:
+ res_ts = self.opt_q.get()
+ if res_ts == ts:
+ break
+ f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])]
+ for idx, f0 in enumerate(f0s):
+ if idx == 0:
+ f0 = f0[:-3]
+ elif idx != n_cpu - 1:
+ f0 = f0[2:-3]
+ else:
+ f0 = f0[2:-1]
+ f0bak[
+ part_length * idx // 160 : part_length * idx // 160 + f0.shape[0]
+ ] = f0
+ f0bak = signal.medfilt(f0bak, 3)
+ f0bak *= pow(2, f0_up_key / 12)
+ return self.get_f0_post(f0bak)
+
+ def get_f0_crepe(self, x, f0_up_key):
+ audio = torch.tensor(np.copy(x))[None].float()
+ f0, pd = torchcrepe.predict(
+ audio,
+ self.sr,
+ 160,
+ self.f0_min,
+ self.f0_max,
+ "full",
+ batch_size=512,
+ device=self.device,
+ return_periodicity=True,
+ )
+ pd = torchcrepe.filter.median(pd, 3)
+ f0 = torchcrepe.filter.mean(f0, 3)
+ f0[pd < 0.1] = 0
+ f0 = f0[0].cpu().numpy()
+ f0 *= pow(2, f0_up_key / 12)
+ return self.get_f0_post(f0)
+
+ def get_f0_rmvpe(self, x, f0_up_key):
+ if hasattr(self, "model_rmvpe") == False:
+ from rmvpe import RMVPE
+
+ print("loading rmvpe model")
+ self.model_rmvpe = RMVPE(
+ "rmvpe.pt", is_half=self.is_half, device=self.device
+ )
+ # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device)
+ f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
+ f0 *= pow(2, f0_up_key / 12)
+ return self.get_f0_post(f0)
+
+ def infer(
+ self,
+ feats: torch.Tensor,
+ indata: np.ndarray,
+ rate1,
+ rate2,
+ cache_pitch,
+ cache_pitchf,
+ f0method,
+ ) -> np.ndarray:
+ feats = feats.view(1, -1)
+ if config.is_half:
+ feats = feats.half()
+ else:
+ feats = feats.float()
+ feats = feats.to(self.device)
+ t1 = ttime()
+ with torch.no_grad():
+ padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
+ inputs = {
+ "source": feats,
+ "padding_mask": padding_mask,
+ "output_layer": 9 if self.version == "v1" else 12,
+ }
+ logits = self.model.extract_features(**inputs)
+ feats = (
+ self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
+ )
+ t2 = ttime()
+ try:
+ if hasattr(self, "index") and self.index_rate != 0:
+ leng_replace_head = int(rate1 * feats[0].shape[0])
+ npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32")
+ score, ix = self.index.search(npy, k=8)
+ weight = np.square(1 / score)
+ weight /= weight.sum(axis=1, keepdims=True)
+ npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
+ if config.is_half:
+ npy = npy.astype("float16")
+ feats[0][-leng_replace_head:] = (
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate
+ + (1 - self.index_rate) * feats[0][-leng_replace_head:]
+ )
+ else:
+ print("index search FAIL or disabled")
+ except:
+ traceback.print_exc()
+ print("index search FAIL")
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
+ t3 = ttime()
+ if self.if_f0 == 1:
+ pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method)
+ cache_pitch[:] = np.append(cache_pitch[pitch[:-1].shape[0] :], pitch[:-1])
+ cache_pitchf[:] = np.append(
+ cache_pitchf[pitchf[:-1].shape[0] :], pitchf[:-1]
+ )
+ p_len = min(feats.shape[1], 13000, cache_pitch.shape[0])
+ else:
+ cache_pitch, cache_pitchf = None, None
+ p_len = min(feats.shape[1], 13000)
+ t4 = ttime()
+ feats = feats[:, :p_len, :]
+ if self.if_f0 == 1:
+ cache_pitch = cache_pitch[:p_len]
+ cache_pitchf = cache_pitchf[:p_len]
+ cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device)
+ cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device)
+ p_len = torch.LongTensor([p_len]).to(self.device)
+ ii = 0 # sid
+ sid = torch.LongTensor([ii]).to(self.device)
+ with torch.no_grad():
+ if self.if_f0 == 1:
+ infered_audio = (
+ self.net_g.infer(
+ feats, p_len, cache_pitch, cache_pitchf, sid, rate2
+ )[0][0, 0]
+ .data.cpu()
+ .float()
+ )
+ else:
+ infered_audio = (
+ self.net_g.infer(feats, p_len, sid, rate2)[0][0, 0]
+ .data.cpu()
+ .float()
+ )
+ t5 = ttime()
+ print("time->fea-index-f0-model:", t2 - t1, t3 - t2, t4 - t3, t5 - t4)
+ return infered_audio
diff --git a/slicer2.py b/slicer2.py
new file mode 100644
index 000000000..5b29ee262
--- /dev/null
+++ b/slicer2.py
@@ -0,0 +1,260 @@
+import numpy as np
+
+
+# This function is obtained from librosa.
+def get_rms(
+ y,
+ frame_length=2048,
+ hop_length=512,
+ pad_mode="constant",
+):
+ padding = (int(frame_length // 2), int(frame_length // 2))
+ y = np.pad(y, padding, mode=pad_mode)
+
+ axis = -1
+ # put our new within-frame axis at the end for now
+ out_strides = y.strides + tuple([y.strides[axis]])
+ # Reduce the shape on the framing axis
+ x_shape_trimmed = list(y.shape)
+ x_shape_trimmed[axis] -= frame_length - 1
+ out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
+ xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
+ if axis < 0:
+ target_axis = axis - 1
+ else:
+ target_axis = axis + 1
+ xw = np.moveaxis(xw, -1, target_axis)
+ # Downsample along the target axis
+ slices = [slice(None)] * xw.ndim
+ slices[axis] = slice(0, None, hop_length)
+ x = xw[tuple(slices)]
+
+ # Calculate power
+ power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
+
+ return np.sqrt(power)
+
+
+class Slicer:
+ def __init__(
+ self,
+ sr: int,
+ threshold: float = -40.0,
+ min_length: int = 5000,
+ min_interval: int = 300,
+ hop_size: int = 20,
+ max_sil_kept: int = 5000,
+ ):
+ if not min_length >= min_interval >= hop_size:
+ raise ValueError(
+ "The following condition must be satisfied: min_length >= min_interval >= hop_size"
+ )
+ if not max_sil_kept >= hop_size:
+ raise ValueError(
+ "The following condition must be satisfied: max_sil_kept >= hop_size"
+ )
+ min_interval = sr * min_interval / 1000
+ self.threshold = 10 ** (threshold / 20.0)
+ self.hop_size = round(sr * hop_size / 1000)
+ self.win_size = min(round(min_interval), 4 * self.hop_size)
+ self.min_length = round(sr * min_length / 1000 / self.hop_size)
+ self.min_interval = round(min_interval / self.hop_size)
+ self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
+
+ def _apply_slice(self, waveform, begin, end):
+ if len(waveform.shape) > 1:
+ return waveform[
+ :, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)
+ ]
+ else:
+ return waveform[
+ begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)
+ ]
+
+ # @timeit
+ def slice(self, waveform):
+ if len(waveform.shape) > 1:
+ samples = waveform.mean(axis=0)
+ else:
+ samples = waveform
+ if samples.shape[0] <= self.min_length:
+ return [waveform]
+ rms_list = get_rms(
+ y=samples, frame_length=self.win_size, hop_length=self.hop_size
+ ).squeeze(0)
+ sil_tags = []
+ silence_start = None
+ clip_start = 0
+ for i, rms in enumerate(rms_list):
+ # Keep looping while frame is silent.
+ if rms < self.threshold:
+ # Record start of silent frames.
+ if silence_start is None:
+ silence_start = i
+ continue
+ # Keep looping while frame is not silent and silence start has not been recorded.
+ if silence_start is None:
+ continue
+ # Clear recorded silence start if interval is not enough or clip is too short
+ is_leading_silence = silence_start == 0 and i > self.max_sil_kept
+ need_slice_middle = (
+ i - silence_start >= self.min_interval
+ and i - clip_start >= self.min_length
+ )
+ if not is_leading_silence and not need_slice_middle:
+ silence_start = None
+ continue
+ # Need slicing. Record the range of silent frames to be removed.
+ if i - silence_start <= self.max_sil_kept:
+ pos = rms_list[silence_start : i + 1].argmin() + silence_start
+ if silence_start == 0:
+ sil_tags.append((0, pos))
+ else:
+ sil_tags.append((pos, pos))
+ clip_start = pos
+ elif i - silence_start <= self.max_sil_kept * 2:
+ pos = rms_list[
+ i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
+ ].argmin()
+ pos += i - self.max_sil_kept
+ pos_l = (
+ rms_list[
+ silence_start : silence_start + self.max_sil_kept + 1
+ ].argmin()
+ + silence_start
+ )
+ pos_r = (
+ rms_list[i - self.max_sil_kept : i + 1].argmin()
+ + i
+ - self.max_sil_kept
+ )
+ if silence_start == 0:
+ sil_tags.append((0, pos_r))
+ clip_start = pos_r
+ else:
+ sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
+ clip_start = max(pos_r, pos)
+ else:
+ pos_l = (
+ rms_list[
+ silence_start : silence_start + self.max_sil_kept + 1
+ ].argmin()
+ + silence_start
+ )
+ pos_r = (
+ rms_list[i - self.max_sil_kept : i + 1].argmin()
+ + i
+ - self.max_sil_kept
+ )
+ if silence_start == 0:
+ sil_tags.append((0, pos_r))
+ else:
+ sil_tags.append((pos_l, pos_r))
+ clip_start = pos_r
+ silence_start = None
+ # Deal with trailing silence.
+ total_frames = rms_list.shape[0]
+ if (
+ silence_start is not None
+ and total_frames - silence_start >= self.min_interval
+ ):
+ silence_end = min(total_frames, silence_start + self.max_sil_kept)
+ pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
+ sil_tags.append((pos, total_frames + 1))
+ # Apply and return slices.
+ if len(sil_tags) == 0:
+ return [waveform]
+ else:
+ chunks = []
+ if sil_tags[0][0] > 0:
+ chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))
+ for i in range(len(sil_tags) - 1):
+ chunks.append(
+ self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])
+ )
+ if sil_tags[-1][1] < total_frames:
+ chunks.append(
+ self._apply_slice(waveform, sil_tags[-1][1], total_frames)
+ )
+ return chunks
+
+
+def main():
+ import os.path
+ from argparse import ArgumentParser
+
+ import librosa
+ import soundfile
+
+ parser = ArgumentParser()
+ parser.add_argument("audio", type=str, help="The audio to be sliced")
+ parser.add_argument(
+ "--out", type=str, help="Output directory of the sliced audio clips"
+ )
+ parser.add_argument(
+ "--db_thresh",
+ type=float,
+ required=False,
+ default=-40,
+ help="The dB threshold for silence detection",
+ )
+ parser.add_argument(
+ "--min_length",
+ type=int,
+ required=False,
+ default=5000,
+ help="The minimum milliseconds required for each sliced audio clip",
+ )
+ parser.add_argument(
+ "--min_interval",
+ type=int,
+ required=False,
+ default=300,
+ help="The minimum milliseconds for a silence part to be sliced",
+ )
+ parser.add_argument(
+ "--hop_size",
+ type=int,
+ required=False,
+ default=10,
+ help="Frame length in milliseconds",
+ )
+ parser.add_argument(
+ "--max_sil_kept",
+ type=int,
+ required=False,
+ default=500,
+ help="The maximum silence length kept around the sliced clip, presented in milliseconds",
+ )
+ args = parser.parse_args()
+ out = args.out
+ if out is None:
+ out = os.path.dirname(os.path.abspath(args.audio))
+ audio, sr = librosa.load(args.audio, sr=None, mono=False)
+ slicer = Slicer(
+ sr=sr,
+ threshold=args.db_thresh,
+ min_length=args.min_length,
+ min_interval=args.min_interval,
+ hop_size=args.hop_size,
+ max_sil_kept=args.max_sil_kept,
+ )
+ chunks = slicer.slice(audio)
+ if not os.path.exists(out):
+ os.makedirs(out)
+ for i, chunk in enumerate(chunks):
+ if len(chunk.shape) > 1:
+ chunk = chunk.T
+ soundfile.write(
+ os.path.join(
+ out,
+ f"%s_%d.wav"
+ % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i),
+ ),
+ chunk,
+ sr,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/stftpitchshift b/stftpitchshift
new file mode 100644
index 000000000..310e89c61
Binary files /dev/null and b/stftpitchshift differ
diff --git a/stftpitchshift.exe b/stftpitchshift.exe
new file mode 100644
index 000000000..39c73ad88
Binary files /dev/null and b/stftpitchshift.exe differ
diff --git a/tensorlowest.py b/tensorlowest.py
new file mode 100644
index 000000000..eccd4dbf3
--- /dev/null
+++ b/tensorlowest.py
@@ -0,0 +1,123 @@
+from tensorboard.backend.event_processing import event_accumulator
+
+import os
+from shutil import copy2
+from re import search as RSearch
+import pandas as pd
+from ast import literal_eval as LEval
+
+weights_dir = 'weights/'
+
+def find_biggest_tensorboard(tensordir):
+ try:
+ files = [f for f in os.listdir(tensordir) if f.endswith('.0')]
+ if not files:
+ print("No files with the '.0' extension found!")
+ return
+
+ max_size = 0
+ biggest_file = ""
+
+ for file in files:
+ file_path = os.path.join(tensordir, file)
+ if os.path.isfile(file_path):
+ file_size = os.path.getsize(file_path)
+ if file_size > max_size:
+ max_size = file_size
+ biggest_file = file
+
+ return biggest_file
+
+ except FileNotFoundError:
+ print("Couldn't find your model!")
+ return
+
+def main(model_name, save_freq, lastmdls):
+ global lowestval_weight_dir, scl
+
+ tensordir = os.path.join('logs', model_name)
+ lowestval_weight_dir = os.path.join(tensordir, "lowestvals")
+
+ latest_file = find_biggest_tensorboard(tensordir)
+
+ if latest_file is None:
+ print("Couldn't find a valid tensorboard file!")
+ return
+
+ tfile = os.path.join(tensordir, latest_file)
+
+ ea = event_accumulator.EventAccumulator(tfile,
+ size_guidance={
+ event_accumulator.COMPRESSED_HISTOGRAMS: 500,
+ event_accumulator.IMAGES: 4,
+ event_accumulator.AUDIO: 4,
+ event_accumulator.SCALARS: 0,
+ event_accumulator.HISTOGRAMS: 1,
+ })
+
+ ea.Reload()
+ ea.Tags()
+
+ scl = ea.Scalars('loss/g/total')
+
+ listwstep = {}
+
+ for val in scl:
+ if (val.step // save_freq) * save_freq in [val.step for val in scl]:
+ listwstep[float(val.value)] = (val.step // save_freq) * save_freq
+
+ lowest_vals = sorted(listwstep.keys())[:lastmdls]
+
+ sorted_dict = {value: step for value, step in listwstep.items() if value in lowest_vals}
+
+ return sorted_dict
+
+def selectweights(model_name, file_dict, weights_dir, lowestval_weight_dir):
+ os.makedirs(lowestval_weight_dir, exist_ok=True)
+ logdir = []
+ files = []
+ lbldict = {
+ 'Values': {},
+ 'Names': {}
+ }
+ weights_dir_path = os.path.join(weights_dir, "")
+ low_val_path = os.path.join(os.getcwd(), os.path.join(lowestval_weight_dir, ""))
+
+ try:
+ file_dict = LEval(file_dict)
+ except Exception as e:
+ print(f"Error! {e}")
+ return f"Couldn't load tensorboard file! {e}"
+
+ weights = [f for f in os.scandir(weights_dir)]
+ for key, value in file_dict.items():
+ pattern = fr"^{model_name}_.*_s{value}\.pth$"
+ matching_weights = [f.name for f in weights if f.is_file() and RSearch(pattern, f.name)]
+ for weight in matching_weights:
+ source_path = weights_dir_path + weight
+ destination_path = os.path.join(lowestval_weight_dir, weight)
+
+ copy2(source_path, destination_path)
+
+ logdir.append(f"File = {weight} Value: {key}, Step: {value}")
+
+ lbldict['Names'][weight] = weight
+ lbldict['Values'][weight] = key
+
+ files.append(low_val_path + weight)
+
+ print(f"File = {weight} Value: {key}, Step: {value}")
+
+ yield ('\n'.join(logdir), files, pd.DataFrame(lbldict))
+
+
+ return ''.join(logdir), files, pd.DataFrame(lbldict)
+
+
+if __name__ == "__main__":
+ model = str(input("Enter the name of the model: "))
+ sav_freq = int(input("Enter save frequency of the model: "))
+ ds = main(model, sav_freq)
+
+ if ds: selectweights(model, ds, weights_dir, lowestval_weight_dir)
+
\ No newline at end of file
diff --git a/tools/dlmodels.bat b/tools/dlmodels.bat
new file mode 100644
index 000000000..547f2aef8
--- /dev/null
+++ b/tools/dlmodels.bat
@@ -0,0 +1,348 @@
+@echo off && chcp 65001
+
+echo working dir is %cd%
+echo downloading requirement aria2 check.
+echo=
+dir /a:d/b | findstr "aria2" > flag.txt
+findstr "aria2" flag.txt >nul
+if %errorlevel% ==0 (
+ echo aria2 checked.
+ echo=
+) else (
+ echo failed. please downloading aria2 from webpage!
+ echo unzip it and put in this directory!
+ timeout /T 5
+ start https://github.com/aria2/aria2/releases/tag/release-1.36.0
+ echo=
+ goto end
+)
+
+echo envfiles checking start.
+echo=
+
+for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch
+:endSch
+
+set d32=f0D32k.pth
+set d40=f0D40k.pth
+set d48=f0D48k.pth
+set g32=f0G32k.pth
+set g40=f0G40k.pth
+set g48=f0G48k.pth
+
+set d40v2=f0D40k.pth
+set g40v2=f0G40k.pth
+
+set dld32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth
+set dld40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth
+set dld48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth
+set dlg32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth
+set dlg40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth
+set dlg48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth
+
+set dld40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth
+set dlg40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth
+
+set hp2_all=HP2_all_vocals.pth
+set hp3_all=HP3_all_vocals.pth
+set hp5_only=HP5_only_main_vocal.pth
+set VR_DeEchoAggressive=VR-DeEchoAggressive.pth
+set VR_DeEchoDeReverb=VR-DeEchoDeReverb.pth
+set VR_DeEchoNormal=VR-DeEchoNormal.pth
+set onnx_dereverb=vocals.onnx
+
+set dlhp2_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth
+set dlhp3_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth
+set dlhp5_only=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth
+set dlVR_DeEchoAggressive=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth
+set dlVR_DeEchoDeReverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth
+set dlVR_DeEchoNormal=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth
+set dlonnx_dereverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
+
+set hb=hubert_base.pt
+
+set dlhb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt
+
+echo dir check start.
+echo=
+
+if exist "%~dp0pretrained" (
+ echo dir .\pretrained checked.
+ ) else (
+ echo failed. generating dir .\pretrained.
+ mkdir pretrained
+ )
+if exist "%~dp0pretrained_v2" (
+ echo dir .\pretrained_v2 checked.
+ ) else (
+ echo failed. generating dir .\pretrained_v2.
+ mkdir pretrained_v2
+ )
+if exist "%~dp0uvr5_weights" (
+ echo dir .\uvr5_weights checked.
+ ) else (
+ echo failed. generating dir .\uvr5_weights.
+ mkdir uvr5_weights
+ )
+if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy" (
+ echo dir .\uvr5_weights\onnx_dereverb_By_FoxJoy checked.
+ ) else (
+ echo failed. generating dir .\uvr5_weights\onnx_dereverb_By_FoxJoy.
+ mkdir uvr5_weights\onnx_dereverb_By_FoxJoy
+ )
+
+echo=
+echo dir check finished.
+
+echo=
+echo required files check start.
+
+echo checking D32k.pth
+if exist "%~dp0pretrained\D32k.pth" (
+ echo D32k.pth in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0pretrained -o D32k.pth
+ if exist "%~dp0pretrained\D32k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking D40k.pth
+if exist "%~dp0pretrained\D40k.pth" (
+ echo D40k.pth in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0pretrained -o D40k.pth
+ if exist "%~dp0pretrained\D40k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking D40k.pth
+if exist "%~dp0pretrained_v2\D40k.pth" (
+ echo D40k.pth in .\pretrained_v2 checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d %~dp0pretrained_v2 -o D40k.pth
+ if exist "%~dp0pretrained_v2\D40k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking D48k.pth
+if exist "%~dp0pretrained\D48k.pth" (
+ echo D48k.pth in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0pretrained -o D48k.pth
+ if exist "%~dp0pretrained\D48k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking G32k.pth
+if exist "%~dp0pretrained\G32k.pth" (
+ echo G32k.pth in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0pretrained -o G32k.pth
+ if exist "%~dp0pretrained\G32k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking G40k.pth
+if exist "%~dp0pretrained\G40k.pth" (
+ echo G40k.pth in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0pretrained -o G40k.pth
+ if exist "%~dp0pretrained\G40k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking G40k.pth
+if exist "%~dp0pretrained_v2\G40k.pth" (
+ echo G40k.pth in .\pretrained_v2 checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d %~dp0pretrained_v2 -o G40k.pth
+ if exist "%~dp0pretrained_v2\G40k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking G48k.pth
+if exist "%~dp0pretrained\G48k.pth" (
+ echo G48k.pth in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0pretrained -o G48k.pth
+ if exist "%~dp0pretrained\G48k.pth" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+
+echo checking %d32%
+if exist "%~dp0pretrained\%d32%" (
+ echo %d32% in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0pretrained -o %d32%
+ if exist "%~dp0pretrained\%d32%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %d40%
+if exist "%~dp0pretrained\%d40%" (
+ echo %d40% in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0pretrained -o %d40%
+ if exist "%~dp0pretrained\%d40%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %d40v2%
+if exist "%~dp0pretrained_v2\%d40v2%" (
+ echo %d40v2% in .\pretrained_v2 checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40v2% -d %~dp0pretrained_v2 -o %d40v2%
+ if exist "%~dp0pretrained_v2\%d40v2%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %d48%
+if exist "%~dp0pretrained\%d48%" (
+ echo %d48% in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0pretrained -o %d48%
+ if exist "%~dp0pretrained\%d48%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %g32%
+if exist "%~dp0pretrained\%g32%" (
+ echo %g32% in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0pretrained -o %g32%
+ if exist "%~dp0pretrained\%g32%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %g40%
+if exist "%~dp0pretrained\%g40%" (
+ echo %g40% in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0pretrained -o %g40%
+ if exist "%~dp0pretrained\%g40%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %g40v2%
+if exist "%~dp0pretrained_v2\%g40v2%" (
+ echo %g40v2% in .\pretrained_v2 checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40v2% -d %~dp0pretrained_v2 -o %g40v2%
+ if exist "%~dp0pretrained_v2\%g40v2%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %g48%
+if exist "%~dp0pretrained\%g48%" (
+ echo %g48% in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0\pretrained -o %g48%
+ if exist "%~dp0pretrained\%g48%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+
+echo checking %hp2_all%
+if exist "%~dp0uvr5_weights\%hp2_all%" (
+ echo %hp2_all% in .\uvr5_weights checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2_all% -d %~dp0\uvr5_weights -o %hp2_all%
+ if exist "%~dp0uvr5_weights\%hp2_all%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %hp3_all%
+if exist "%~dp0uvr5_weights\%hp3_all%" (
+ echo %hp3_all% in .\uvr5_weights checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp3_all% -d %~dp0\uvr5_weights -o %hp3_all%
+ if exist "%~dp0uvr5_weights\%hp3_all%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %hp5_only%
+if exist "%~dp0uvr5_weights\%hp5_only%" (
+ echo %hp5_only% in .\uvr5_weights checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5_only% -d %~dp0\uvr5_weights -o %hp5_only%
+ if exist "%~dp0uvr5_weights\%hp5_only%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %VR_DeEchoAggressive%
+if exist "%~dp0uvr5_weights\%VR_DeEchoAggressive%" (
+ echo %VR_DeEchoAggressive% in .\uvr5_weights checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoAggressive% -d %~dp0\uvr5_weights -o %VR_DeEchoAggressive%
+ if exist "%~dp0uvr5_weights\%VR_DeEchoAggressive%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %VR_DeEchoDeReverb%
+if exist "%~dp0uvr5_weights\%VR_DeEchoDeReverb%" (
+ echo %VR_DeEchoDeReverb% in .\uvr5_weights checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoDeReverb% -d %~dp0\uvr5_weights -o %VR_DeEchoDeReverb%
+ if exist "%~dp0uvr5_weights\%VR_DeEchoDeReverb%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %VR_DeEchoNormal%
+if exist "%~dp0uvr5_weights\%VR_DeEchoNormal%" (
+ echo %VR_DeEchoNormal% in .\uvr5_weights checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoNormal% -d %~dp0\uvr5_weights -o %VR_DeEchoNormal%
+ if exist "%~dp0uvr5_weights\%VR_DeEchoNormal%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+echo checking %onnx_dereverb%
+if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (
+ echo %onnx_dereverb% in .\uvr5_weights\onnx_dereverb_By_FoxJoy checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlonnx_dereverb% -d %~dp0\uvr5_weights\onnx_dereverb_By_FoxJoy -o %onnx_dereverb%
+ if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+
+echo checking %hb%
+if exist "%~dp0%hb%" (
+ echo %hb% in .\pretrained checked.
+ echo=
+ ) else (
+ echo failed. starting download from huggingface.
+ %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0 -o %hb%
+ if exist "%~dp0%hb%" (echo download successful.) else (echo please try again!
+ echo=)
+ )
+
+echo required files check finished.
+echo envfiles check complete.
+pause
+:end
+del flag.txt
diff --git a/tools/dlmodels.sh b/tools/dlmodels.sh
new file mode 100644
index 000000000..0ae7f7eb8
--- /dev/null
+++ b/tools/dlmodels.sh
@@ -0,0 +1,546 @@
+#!/bin/bash
+
+echo working dir is $(pwd)
+echo downloading requirement aria2 check.
+
+if command -v aria2c &> /dev/null
+then
+ echo "aria2c command found"
+else
+ echo failed. please install aria2
+ sleep 5
+ exit 1
+fi
+
+d32="f0D32k.pth"
+d40="f0D40k.pth"
+d48="f0D48k.pth"
+g32="f0G32k.pth"
+g40="f0G40k.pth"
+g48="f0G48k.pth"
+
+d40v2="f0D40k.pth"
+g40v2="f0G40k.pth"
+
+dld32="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth"
+dld40="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth"
+dld48="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth"
+dlg32="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth"
+dlg40="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth"
+dlg48="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth"
+
+dld40v2="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth"
+dlg40v2="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth"
+
+hp2_all="HP2_all_vocals.pth"
+hp3_all="HP3_all_vocals.pth"
+hp5_only="HP5_only_main_vocal.pth"
+VR_DeEchoAggressive="VR-DeEchoAggressive.pth"
+VR_DeEchoDeReverb="VR-DeEchoDeReverb.pth"
+VR_DeEchoNormal="VR-DeEchoNormal.pth"
+onnx_dereverb="vocals.onnx"
+
+dlhp2_all="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth"
+dlhp3_all="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth"
+dlhp5_only="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth"
+dlVR_DeEchoAggressive="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth"
+dlVR_DeEchoDeReverb="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth"
+dlVR_DeEchoNormal="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth"
+dlonnx_dereverb="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx"
+
+hb="hubert_base.pt"
+
+dlhb="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt"
+
+echo dir check start.
+
+if [ -d "./pretrained" ]; then
+ echo dir ./pretrained checked.
+else
+ echo failed. generating dir ./pretrained.
+ mkdir pretrained
+fi
+
+if [ -d "./pretrained_v2" ]; then
+ echo dir ./pretrained_v2 checked.
+else
+ echo failed. generating dir ./pretrained_v2.
+ mkdir pretrained_v2
+fi
+
+if [ -d "./uvr5_weights" ]; then
+ echo dir ./uvr5_weights checked.
+else
+ echo failed. generating dir ./uvr5_weights.
+ mkdir uvr5_weights
+fi
+
+if [ -d "./uvr5_weights/onnx_dereverb_By_FoxJoy" ]; then
+ echo dir ./uvr5_weights/onnx_dereverb_By_FoxJoy checked.
+else
+ echo failed. generating dir ./uvr5_weights/onnx_dereverb_By_FoxJoy.
+ mkdir uvr5_weights/onnx_dereverb_By_FoxJoy
+fi
+
+echo dir check finished.
+
+echo required files check start.
+
+echo checking D32k.pth
+if [ -f "./pretrained/D32k.pth" ]; then
+ echo D32k.pth in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d ./pretrained -o D32k.pth
+ if [ -f "./pretrained/D32k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking D40k.pth
+if [ -f "./pretrained/D40k.pth" ]; then
+ echo D40k.pth in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d ./pretrained -o D40k.pth
+ if [ -f "./pretrained/D40k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking D40k.pth
+if [ -f "./pretrained_v2/D40k.pth" ]; then
+ echo D40k.pth in ./pretrained_v2 checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d ./pretrained_v2 -o D40k.pth
+ if [ -f "./pretrained_v2/D40k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking D48k.pth
+if [ -f "./pretrained/D48k.pth" ]; then
+ echo D48k.pth in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d ./pretrained -o D48k.pth
+ if [ -f "./pretrained/D48k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking G32k.pth
+if [ -f "./pretrained/G32k.pth" ]; then
+ echo G32k.pth in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d ./pretrained -o G32k.pth
+ if [ -f "./pretrained/G32k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking G40k.pth
+if [ -f "./pretrained/G40k.pth" ]; then
+ echo G40k.pth in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d ./pretrained -o G40k.pth
+ if [ -f "./pretrained/G40k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking G40k.pth
+if [ -f "./pretrained_v2/G40k.pth" ]; then
+ echo G40k.pth in ./pretrained_v2 checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d ./pretrained_v2 -o G40k.pth
+ if [ -f "./pretrained_v2/G40k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking G48k.pth
+if [ -f "./pretrained/G48k.pth" ]; then
+ echo G48k.pth in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d ./pretrained -o G48k.pth
+ if [ -f "./pretrained/G48k.pth" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $d32
+if [ -f "./pretrained/$d32" ]; then
+ echo $d32 in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld32 -d ./pretrained -o $d32
+ if [ -f "./pretrained/$d32" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $d40
+if [ -f "./pretrained/$d40" ]; then
+ echo $d40 in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld40 -d ./pretrained -o $d40
+ if [ -f "./pretrained/$d40" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $d40v2
+if [ -f "./pretrained_v2/$d40v2" ]; then
+ echo $d40v2 in ./pretrained_v2 checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld40v2 -d ./pretrained_v2 -o $d40v2
+ if [ -f "./pretrained_v2/$d40v2" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $d48
+if [ -f "./pretrained/$d48" ]; then
+ echo $d48 in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld48 -d ./pretrained -o $d48
+ if [ -f "./pretrained/$d48" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $g32
+if [ -f "./pretrained/$g32" ]; then
+ echo $g32 in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg32 -d ./pretrained -o $g32
+ if [ -f "./pretrained/$g32" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $g40
+if [ -f "./pretrained/$g40" ]; then
+ echo $g40 in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg40 -d ./pretrained -o $g40
+ if [ -f "./pretrained/$g40" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $g40v2
+if [ -f "./pretrained_v2/$g40v2" ]; then
+ echo $g40v2 in ./pretrained_v2 checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg40v2 -d ./pretrained_v2 -o $g40v2
+ if [ -f "./pretrained_v2/$g40v2" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $g48
+if [ -f "./pretrained/$g48" ]; then
+ echo $g48 in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg48 -d ./pretrained -o $g48
+ if [ -f "./pretrained/$g48" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $hp2_all
+if [ -f "./uvr5_weights/$hp2_all" ]; then
+ echo $hp2_all in ./uvr5_weights checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp2_all -d ./uvr5_weights -o $hp2_all
+ if [ -f "./uvr5_weights/$hp2_all" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $hp3_all
+if [ -f "./uvr5_weights/$hp3_all" ]; then
+ echo $hp3_all in ./uvr5_weights checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp3_all -d ./uvr5_weights -o $hp3_all
+ if [ -f "./uvr5_weights/$hp3_all" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $hp5_only
+if [ -f "./uvr5_weights/$hp5_only" ]; then
+ echo $hp5_only in ./uvr5_weights checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp5_only -d ./uvr5_weights -o $hp5_only
+ if [ -f "./uvr5_weights/$hp5_only" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $VR_DeEchoAggressive
+if [ -f "./uvr5_weights/$VR_DeEchoAggressive" ]; then
+ echo $VR_DeEchoAggressive in ./uvr5_weights checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoAggressive -d ./uvr5_weights -o $VR_DeEchoAggressive
+ if [ -f "./uvr5_weights/$VR_DeEchoAggressive" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $VR_DeEchoDeReverb
+if [ -f "./uvr5_weights/$VR_DeEchoDeReverb" ]; then
+ echo $VR_DeEchoDeReverb in ./uvr5_weights checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoDeReverb -d ./uvr5_weights -o $VR_DeEchoDeReverb
+ if [ -f "./uvr5_weights/$VR_DeEchoDeReverb" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $VR_DeEchoNormal
+if [ -f "./uvr5_weights/$VR_DeEchoNormal" ]; then
+ echo $VR_DeEchoNormal in ./uvr5_weights checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoNormal -d ./uvr5_weights -o $VR_DeEchoNormal
+ if [ -f "./uvr5_weights/$VR_DeEchoNormal" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $onnx_dereverb
+if [ -f "./uvr5_weights/onnx_dereverb_By_FoxJoy/$onnx_dereverb" ]; then
+ echo $onnx_dereverb in ./uvr5_weights/onnx_dereverb_By_FoxJoy checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlonnx_dereverb -d ./uvr5_weights/onnx_dereverb_By_FoxJoy -o $onnx_dereverb
+ if [ -f "./uvr5_weights/onnx_dereverb_By_FoxJoy/$onnx_dereverb" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo checking $hb
+if [ -f "./pretrained/$hb" ]; then
+ echo $hb in ./pretrained checked.
+else
+ echo failed. starting download from huggingface.
+ if command -v aria2c &> /dev/null; then
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhb -d ./ -o $hb
+ if [ -f "./$hb" ]; then
+ echo download successful.
+ else
+ echo please try again!
+ exit 1
+ fi
+ else
+ echo aria2c command not found. Please install aria2c and try again.
+ exit 1
+ fi
+fi
+
+echo required files check finished.
+read -p "Press any key to continue..." -n1 -s
diff --git a/tools/infer/infer-pm-index256.py b/tools/infer/infer-pm-index256.py
new file mode 100644
index 000000000..ead4dcb56
--- /dev/null
+++ b/tools/infer/infer-pm-index256.py
@@ -0,0 +1,199 @@
+"""
+
+对源特征进行检索
+"""
+import torch, pdb, os, parselmouth
+
+os.environ["CUDA_VISIBLE_DEVICES"] = "0"
+import numpy as np
+import soundfile as sf
+
+# from models import SynthesizerTrn256#hifigan_nonsf
+# from lib.infer_pack.models import SynthesizerTrn256NSF as SynthesizerTrn256#hifigan_nsf
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid as SynthesizerTrn256,
+) # hifigan_nsf
+
+# from lib.infer_pack.models import SynthesizerTrnMs256NSFsid_sim as SynthesizerTrn256#hifigan_nsf
+# from models import SynthesizerTrn256NSFsim as SynthesizerTrn256#hifigan_nsf
+# from models import SynthesizerTrn256NSFsimFlow as SynthesizerTrn256#hifigan_nsf
+
+
+from scipy.io import wavfile
+from fairseq import checkpoint_utils
+
+# import pyworld
+import librosa
+import torch.nn.functional as F
+import scipy.signal as signal
+
+# import torchcrepe
+from time import time as ttime
+
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+model_path = r"E:\codes\py39\vits_vc_gpu_train\hubert_base.pt" #
+print("load model(s) from {}".format(model_path))
+models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+ [model_path],
+ suffix="",
+)
+model = models[0]
+model = model.to(device)
+model = model.half()
+model.eval()
+
+# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],183,256,is_half=True)#hifigan#512#256
+# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],109,256,is_half=True)#hifigan#512#256
+net_g = SynthesizerTrn256(
+ 1025,
+ 32,
+ 192,
+ 192,
+ 768,
+ 2,
+ 6,
+ 3,
+ 0,
+ "1",
+ [3, 7, 11],
+ [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ [10, 10, 2, 2],
+ 512,
+ [16, 16, 4, 4],
+ 183,
+ 256,
+ is_half=True,
+) # hifigan#512#256#no_dropout
+# net_g = SynthesizerTrn256(1025,32,192,192,768,2,3,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],0)#ts3
+# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2],512,[16,16,4],0)#hifigan-ps-sr
+#
+# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [5,5], 512, [15,15], 0)#ms
+# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,10], 512, [16,16], 0)#idwt2
+
+# weights=torch.load("infer/ft-mi_1k-noD.pt")
+# weights=torch.load("infer/ft-mi-freeze-vocoder-flow-enc_q_1k.pt")
+# weights=torch.load("infer/ft-mi-freeze-vocoder_true_1k.pt")
+# weights=torch.load("infer/ft-mi-sim1k.pt")
+weights = torch.load("infer/ft-mi-no_opt-no_dropout.pt")
+print(net_g.load_state_dict(weights, strict=True))
+
+net_g.eval().to(device)
+net_g.half()
+
+
+def get_f0(x, p_len, f0_up_key=0):
+ time_step = 160 / 16000 * 1000
+ f0_min = 50
+ f0_max = 1100
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+
+ f0 = (
+ parselmouth.Sound(x, 16000)
+ .to_pitch_ac(
+ time_step=time_step / 1000,
+ voicing_threshold=0.6,
+ pitch_floor=f0_min,
+ pitch_ceiling=f0_max,
+ )
+ .selected_array["frequency"]
+ )
+
+ pad_size = (p_len - len(f0) + 1) // 2
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
+ f0 *= pow(2, f0_up_key / 12)
+ f0bak = f0.copy()
+
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ # f0_mel[f0_mel > 188] = 188
+ f0_coarse = np.rint(f0_mel).astype(np.int)
+ return f0_coarse, f0bak
+
+
+import faiss
+
+index = faiss.read_index("infer/added_IVF512_Flat_mi_baseline_src_feat.index")
+big_npy = np.load("infer/big_src_feature_mi.npy")
+ta0 = ta1 = ta2 = 0
+for idx, name in enumerate(
+ [
+ "冬之花clip1.wav",
+ ]
+): ##
+ wav_path = "todo-songs/%s" % name #
+ f0_up_key = -2 #
+ audio, sampling_rate = sf.read(wav_path)
+ if len(audio.shape) > 1:
+ audio = librosa.to_mono(audio.transpose(1, 0))
+ if sampling_rate != 16000:
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
+
+ feats = torch.from_numpy(audio).float()
+ if feats.dim() == 2: # double channels
+ feats = feats.mean(-1)
+ assert feats.dim() == 1, feats.dim()
+ feats = feats.view(1, -1)
+ padding_mask = torch.BoolTensor(feats.shape).fill_(False)
+ inputs = {
+ "source": feats.half().to(device),
+ "padding_mask": padding_mask.to(device),
+ "output_layer": 9, # layer 9
+ }
+ if torch.cuda.is_available():
+ torch.cuda.synchronize()
+ t0 = ttime()
+ with torch.no_grad():
+ logits = model.extract_features(**inputs)
+ feats = model.final_proj(logits[0])
+
+ ####索引优化
+ npy = feats[0].cpu().numpy().astype("float32")
+ D, I = index.search(npy, 1)
+ feats = (
+ torch.from_numpy(big_npy[I.squeeze()].astype("float16")).unsqueeze(0).to(device)
+ )
+
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
+ if torch.cuda.is_available():
+ torch.cuda.synchronize()
+ t1 = ttime()
+ # p_len = min(feats.shape[1],10000,pitch.shape[0])#太大了爆显存
+ p_len = min(feats.shape[1], 10000) #
+ pitch, pitchf = get_f0(audio, p_len, f0_up_key)
+ p_len = min(feats.shape[1], 10000, pitch.shape[0]) # 太大了爆显存
+ if torch.cuda.is_available():
+ torch.cuda.synchronize()
+ t2 = ttime()
+ feats = feats[:, :p_len, :]
+ pitch = pitch[:p_len]
+ pitchf = pitchf[:p_len]
+ p_len = torch.LongTensor([p_len]).to(device)
+ pitch = torch.LongTensor(pitch).unsqueeze(0).to(device)
+ sid = torch.LongTensor([0]).to(device)
+ pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device)
+ with torch.no_grad():
+ audio = (
+ net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
+ .data.cpu()
+ .float()
+ .numpy()
+ ) # nsf
+ if torch.cuda.is_available():
+ torch.cuda.synchronize()
+ t3 = ttime()
+ ta0 += t1 - t0
+ ta1 += t2 - t1
+ ta2 += t3 - t2
+ # wavfile.write("ft-mi_1k-index256-noD-%s.wav"%name, 40000, audio)##
+ # wavfile.write("ft-mi-freeze-vocoder-flow-enc_q_1k-%s.wav"%name, 40000, audio)##
+ # wavfile.write("ft-mi-sim1k-%s.wav"%name, 40000, audio)##
+ wavfile.write("ft-mi-no_opt-no_dropout-%s.wav" % name, 40000, audio) ##
+
+
+print(ta0, ta1, ta2) #
diff --git a/tools/infer/train-index-v2.py b/tools/infer/train-index-v2.py
new file mode 100644
index 000000000..67b6162b4
--- /dev/null
+++ b/tools/infer/train-index-v2.py
@@ -0,0 +1,72 @@
+"""
+格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个
+"""
+import faiss, numpy as np, os
+from sklearn.cluster import MiniBatchKMeans
+import traceback
+from multiprocessing import cpu_count
+
+# ###########如果是原始特征要先写save
+n_cpu = 0
+if n_cpu == 0:
+ n_cpu = cpu_count()
+inp_root = r"./logs/anz/3_feature768"
+npys = []
+listdir_res = list(os.listdir(inp_root))
+for name in sorted(listdir_res):
+ phone = np.load("%s/%s" % (inp_root, name))
+ npys.append(phone)
+big_npy = np.concatenate(npys, 0)
+big_npy_idx = np.arange(big_npy.shape[0])
+np.random.shuffle(big_npy_idx)
+big_npy = big_npy[big_npy_idx]
+print(big_npy.shape) # (6196072, 192)#fp32#4.43G
+if big_npy.shape[0] > 2e5:
+ # if(1):
+ info = "Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]
+ print(info)
+ try:
+ big_npy = (
+ MiniBatchKMeans(
+ n_clusters=10000,
+ verbose=True,
+ batch_size=256 * n_cpu,
+ compute_labels=False,
+ init="random",
+ )
+ .fit(big_npy)
+ .cluster_centers_
+ )
+ except:
+ info = traceback.format_exc()
+ print(info)
+
+np.save("tools/infer/big_src_feature_mi.npy", big_npy)
+
+##################train+add
+# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy")
+n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
+index = faiss.index_factory(768, "IVF%s,Flat" % n_ivf) # mi
+print("training")
+index_ivf = faiss.extract_index_ivf(index) #
+index_ivf.nprobe = 1
+index.train(big_npy)
+faiss.write_index(
+ index, "tools/infer/trained_IVF%s_Flat_baseline_src_feat_v2.index" % (n_ivf)
+)
+print("adding")
+batch_size_add = 8192
+for i in range(0, big_npy.shape[0], batch_size_add):
+ index.add(big_npy[i : i + batch_size_add])
+faiss.write_index(
+ index, "tools/infer/added_IVF%s_Flat_mi_baseline_src_feat.index" % (n_ivf)
+)
+"""
+大小(都是FP32)
+big_src_feature 2.95G
+ (3098036, 256)
+big_emb 4.43G
+ (6196072, 192)
+big_emb双倍是因为求特征要repeat后再加pitch
+
+"""
diff --git a/tools/infer/train-index.py b/tools/infer/train-index.py
new file mode 100644
index 000000000..04396a224
--- /dev/null
+++ b/tools/infer/train-index.py
@@ -0,0 +1,36 @@
+"""
+格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个
+"""
+import faiss, numpy as np, os
+
+# ###########如果是原始特征要先写save
+inp_root = r"E:\codes\py39\dataset\mi\2-co256"
+npys = []
+for name in sorted(list(os.listdir(inp_root))):
+ phone = np.load("%s/%s" % (inp_root, name))
+ npys.append(phone)
+big_npy = np.concatenate(npys, 0)
+print(big_npy.shape) # (6196072, 192)#fp32#4.43G
+np.save("infer/big_src_feature_mi.npy", big_npy)
+
+##################train+add
+# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy")
+print(big_npy.shape)
+index = faiss.index_factory(256, "IVF512,Flat") # mi
+print("training")
+index_ivf = faiss.extract_index_ivf(index) #
+index_ivf.nprobe = 9
+index.train(big_npy)
+faiss.write_index(index, "infer/trained_IVF512_Flat_mi_baseline_src_feat.index")
+print("adding")
+index.add(big_npy)
+faiss.write_index(index, "infer/added_IVF512_Flat_mi_baseline_src_feat.index")
+"""
+大小(都是FP32)
+big_src_feature 2.95G
+ (3098036, 256)
+big_emb 4.43G
+ (6196072, 192)
+big_emb双倍是因为求特征要repeat后再加pitch
+
+"""
diff --git a/tools/infer/trans_weights.py b/tools/infer/trans_weights.py
new file mode 100644
index 000000000..da0759627
--- /dev/null
+++ b/tools/infer/trans_weights.py
@@ -0,0 +1,16 @@
+import torch, pdb
+
+# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-suc\G_1000.pth")["model"]#sim_nsf#
+# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder-flow-enc_q\G_1000.pth")["model"]#sim_nsf#
+# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder\G_1000.pth")["model"]#sim_nsf#
+# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-test\G_1000.pth")["model"]#sim_nsf#
+a = torch.load(
+ r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-no_opt-no_dropout\G_1000.pth"
+)[
+ "model"
+] # sim_nsf#
+for key in a.keys():
+ a[key] = a[key].half()
+# torch.save(a,"ft-mi-freeze-vocoder_true_1k.pt")#
+# torch.save(a,"ft-mi-sim1k.pt")#
+torch.save(a, "ft-mi-no_opt-no_dropout.pt") #
diff --git a/train/cmd.txt b/train/cmd.txt
new file mode 100644
index 000000000..e4b895e54
--- /dev/null
+++ b/train/cmd.txt
@@ -0,0 +1 @@
+python train_nsf_sim_cache_sid.py -c configs/mi_mix40k_nsf_co256_cs1sid_ms2048.json -m ft-mi
\ No newline at end of file
diff --git a/train/data_utils.py b/train/data_utils.py
new file mode 100644
index 000000000..71c0eff18
--- /dev/null
+++ b/train/data_utils.py
@@ -0,0 +1,512 @@
+import os, traceback
+import numpy as np
+import torch
+import torch.utils.data
+
+from mel_processing import spectrogram_torch
+from utils import load_wav_to_torch, load_filepaths_and_text
+
+
+class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset):
+ """
+ 1) loads audio, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
+
+ def __init__(self, audiopaths_and_text, hparams):
+ self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
+ self.max_wav_value = hparams.max_wav_value
+ self.sampling_rate = hparams.sampling_rate
+ self.filter_length = hparams.filter_length
+ self.hop_length = hparams.hop_length
+ self.win_length = hparams.win_length
+ self.sampling_rate = hparams.sampling_rate
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
+ self.max_text_len = getattr(hparams, "max_text_len", 5000)
+ self._filter()
+
+ def _filter(self):
+ """
+ Filter text & store spec lengths
+ """
+ # Store spectrogram lengths for Bucketing
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
+ # spec_length = wav_length // hop_length
+ audiopaths_and_text_new = []
+ lengths = []
+ for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text:
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
+ audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv])
+ lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
+ self.audiopaths_and_text = audiopaths_and_text_new
+ self.lengths = lengths
+
+ def get_sid(self, sid):
+ sid = torch.LongTensor([int(sid)])
+ return sid
+
+ def get_audio_text_pair(self, audiopath_and_text):
+ # separate filename and text
+ file = audiopath_and_text[0]
+ phone = audiopath_and_text[1]
+ pitch = audiopath_and_text[2]
+ pitchf = audiopath_and_text[3]
+ dv = audiopath_and_text[4]
+
+ phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf)
+ spec, wav = self.get_audio(file)
+ dv = self.get_sid(dv)
+
+ len_phone = phone.size()[0]
+ len_spec = spec.size()[-1]
+ # print(123,phone.shape,pitch.shape,spec.shape)
+ if len_phone != len_spec:
+ len_min = min(len_phone, len_spec)
+ # amor
+ len_wav = len_min * self.hop_length
+
+ spec = spec[:, :len_min]
+ wav = wav[:, :len_wav]
+
+ phone = phone[:len_min, :]
+ pitch = pitch[:len_min]
+ pitchf = pitchf[:len_min]
+
+ return (spec, wav, phone, pitch, pitchf, dv)
+
+ def get_labels(self, phone, pitch, pitchf):
+ phone = np.load(phone)
+ phone = np.repeat(phone, 2, axis=0)
+ pitch = np.load(pitch)
+ pitchf = np.load(pitchf)
+ n_num = min(phone.shape[0], 900) # DistributedBucketSampler
+ # print(234,phone.shape,pitch.shape)
+ phone = phone[:n_num, :]
+ pitch = pitch[:n_num]
+ pitchf = pitchf[:n_num]
+ phone = torch.FloatTensor(phone)
+ pitch = torch.LongTensor(pitch)
+ pitchf = torch.FloatTensor(pitchf)
+ return phone, pitch, pitchf
+
+ def get_audio(self, filename):
+ audio, sampling_rate = load_wav_to_torch(filename)
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ "{} SR doesn't match target {} SR".format(
+ sampling_rate, self.sampling_rate
+ )
+ )
+ audio_norm = audio
+ # audio_norm = audio / self.max_wav_value
+ # audio_norm = audio / np.abs(audio).max()
+
+ audio_norm = audio_norm.unsqueeze(0)
+ spec_filename = filename.replace(".wav", ".spec.pt")
+ if os.path.exists(spec_filename):
+ try:
+ spec = torch.load(spec_filename)
+ except:
+ print(spec_filename, traceback.format_exc())
+ spec = spectrogram_torch(
+ audio_norm,
+ self.filter_length,
+ self.sampling_rate,
+ self.hop_length,
+ self.win_length,
+ center=False,
+ )
+ spec = torch.squeeze(spec, 0)
+ torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
+ else:
+ spec = spectrogram_torch(
+ audio_norm,
+ self.filter_length,
+ self.sampling_rate,
+ self.hop_length,
+ self.win_length,
+ center=False,
+ )
+ spec = torch.squeeze(spec, 0)
+ torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
+ return spec, audio_norm
+
+ def __getitem__(self, index):
+ return self.get_audio_text_pair(self.audiopaths_and_text[index])
+
+ def __len__(self):
+ return len(self.audiopaths_and_text)
+
+
+class TextAudioCollateMultiNSFsid:
+ """Zero-pads model inputs and targets"""
+
+ def __init__(self, return_ids=False):
+ self.return_ids = return_ids
+
+ def __call__(self, batch):
+ """Collate's training batch from normalized text and aduio
+ PARAMS
+ ------
+ batch: [text_normalized, spec_normalized, wav_normalized]
+ """
+ # Right zero-pad all one-hot text sequences to max input length
+ _, ids_sorted_decreasing = torch.sort(
+ torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
+ )
+
+ max_spec_len = max([x[0].size(1) for x in batch])
+ max_wave_len = max([x[1].size(1) for x in batch])
+ spec_lengths = torch.LongTensor(len(batch))
+ wave_lengths = torch.LongTensor(len(batch))
+ spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
+ wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
+ spec_padded.zero_()
+ wave_padded.zero_()
+
+ max_phone_len = max([x[2].size(0) for x in batch])
+ phone_lengths = torch.LongTensor(len(batch))
+ phone_padded = torch.FloatTensor(
+ len(batch), max_phone_len, batch[0][2].shape[1]
+ ) # (spec, wav, phone, pitch)
+ pitch_padded = torch.LongTensor(len(batch), max_phone_len)
+ pitchf_padded = torch.FloatTensor(len(batch), max_phone_len)
+ phone_padded.zero_()
+ pitch_padded.zero_()
+ pitchf_padded.zero_()
+ # dv = torch.FloatTensor(len(batch), 256)#gin=256
+ sid = torch.LongTensor(len(batch))
+
+ for i in range(len(ids_sorted_decreasing)):
+ row = batch[ids_sorted_decreasing[i]]
+
+ spec = row[0]
+ spec_padded[i, :, : spec.size(1)] = spec
+ spec_lengths[i] = spec.size(1)
+
+ wave = row[1]
+ wave_padded[i, :, : wave.size(1)] = wave
+ wave_lengths[i] = wave.size(1)
+
+ phone = row[2]
+ phone_padded[i, : phone.size(0), :] = phone
+ phone_lengths[i] = phone.size(0)
+
+ pitch = row[3]
+ pitch_padded[i, : pitch.size(0)] = pitch
+ pitchf = row[4]
+ pitchf_padded[i, : pitchf.size(0)] = pitchf
+
+ # dv[i] = row[5]
+ sid[i] = row[5]
+
+ return (
+ phone_padded,
+ phone_lengths,
+ pitch_padded,
+ pitchf_padded,
+ spec_padded,
+ spec_lengths,
+ wave_padded,
+ wave_lengths,
+ # dv
+ sid,
+ )
+
+
+class TextAudioLoader(torch.utils.data.Dataset):
+ """
+ 1) loads audio, text pairs
+ 2) normalizes text and converts them to sequences of integers
+ 3) computes spectrograms from audio files.
+ """
+
+ def __init__(self, audiopaths_and_text, hparams):
+ self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
+ self.max_wav_value = hparams.max_wav_value
+ self.sampling_rate = hparams.sampling_rate
+ self.filter_length = hparams.filter_length
+ self.hop_length = hparams.hop_length
+ self.win_length = hparams.win_length
+ self.sampling_rate = hparams.sampling_rate
+ self.min_text_len = getattr(hparams, "min_text_len", 1)
+ self.max_text_len = getattr(hparams, "max_text_len", 5000)
+ self._filter()
+
+ def _filter(self):
+ """
+ Filter text & store spec lengths
+ """
+ # Store spectrogram lengths for Bucketing
+ # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
+ # spec_length = wav_length // hop_length
+ audiopaths_and_text_new = []
+ lengths = []
+ for audiopath, text, dv in self.audiopaths_and_text:
+ if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
+ audiopaths_and_text_new.append([audiopath, text, dv])
+ lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
+ self.audiopaths_and_text = audiopaths_and_text_new
+ self.lengths = lengths
+
+ def get_sid(self, sid):
+ sid = torch.LongTensor([int(sid)])
+ return sid
+
+ def get_audio_text_pair(self, audiopath_and_text):
+ # separate filename and text
+ file = audiopath_and_text[0]
+ phone = audiopath_and_text[1]
+ dv = audiopath_and_text[2]
+
+ phone = self.get_labels(phone)
+ spec, wav = self.get_audio(file)
+ dv = self.get_sid(dv)
+
+ len_phone = phone.size()[0]
+ len_spec = spec.size()[-1]
+ if len_phone != len_spec:
+ len_min = min(len_phone, len_spec)
+ len_wav = len_min * self.hop_length
+ spec = spec[:, :len_min]
+ wav = wav[:, :len_wav]
+ phone = phone[:len_min, :]
+ return (spec, wav, phone, dv)
+
+ def get_labels(self, phone):
+ phone = np.load(phone)
+ phone = np.repeat(phone, 2, axis=0)
+ n_num = min(phone.shape[0], 900) # DistributedBucketSampler
+ phone = phone[:n_num, :]
+ phone = torch.FloatTensor(phone)
+ return phone
+
+ def get_audio(self, filename):
+ audio, sampling_rate = load_wav_to_torch(filename)
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ "{} SR doesn't match target {} SR".format(
+ sampling_rate, self.sampling_rate
+ )
+ )
+ audio_norm = audio
+ # audio_norm = audio / self.max_wav_value
+ # audio_norm = audio / np.abs(audio).max()
+
+ audio_norm = audio_norm.unsqueeze(0)
+ spec_filename = filename.replace(".wav", ".spec.pt")
+ if os.path.exists(spec_filename):
+ try:
+ spec = torch.load(spec_filename)
+ except:
+ print(spec_filename, traceback.format_exc())
+ spec = spectrogram_torch(
+ audio_norm,
+ self.filter_length,
+ self.sampling_rate,
+ self.hop_length,
+ self.win_length,
+ center=False,
+ )
+ spec = torch.squeeze(spec, 0)
+ torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
+ else:
+ spec = spectrogram_torch(
+ audio_norm,
+ self.filter_length,
+ self.sampling_rate,
+ self.hop_length,
+ self.win_length,
+ center=False,
+ )
+ spec = torch.squeeze(spec, 0)
+ torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
+ return spec, audio_norm
+
+ def __getitem__(self, index):
+ return self.get_audio_text_pair(self.audiopaths_and_text[index])
+
+ def __len__(self):
+ return len(self.audiopaths_and_text)
+
+
+class TextAudioCollate:
+ """Zero-pads model inputs and targets"""
+
+ def __init__(self, return_ids=False):
+ self.return_ids = return_ids
+
+ def __call__(self, batch):
+ """Collate's training batch from normalized text and aduio
+ PARAMS
+ ------
+ batch: [text_normalized, spec_normalized, wav_normalized]
+ """
+ # Right zero-pad all one-hot text sequences to max input length
+ _, ids_sorted_decreasing = torch.sort(
+ torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
+ )
+
+ max_spec_len = max([x[0].size(1) for x in batch])
+ max_wave_len = max([x[1].size(1) for x in batch])
+ spec_lengths = torch.LongTensor(len(batch))
+ wave_lengths = torch.LongTensor(len(batch))
+ spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
+ wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
+ spec_padded.zero_()
+ wave_padded.zero_()
+
+ max_phone_len = max([x[2].size(0) for x in batch])
+ phone_lengths = torch.LongTensor(len(batch))
+ phone_padded = torch.FloatTensor(
+ len(batch), max_phone_len, batch[0][2].shape[1]
+ )
+ phone_padded.zero_()
+ sid = torch.LongTensor(len(batch))
+
+ for i in range(len(ids_sorted_decreasing)):
+ row = batch[ids_sorted_decreasing[i]]
+
+ spec = row[0]
+ spec_padded[i, :, : spec.size(1)] = spec
+ spec_lengths[i] = spec.size(1)
+
+ wave = row[1]
+ wave_padded[i, :, : wave.size(1)] = wave
+ wave_lengths[i] = wave.size(1)
+
+ phone = row[2]
+ phone_padded[i, : phone.size(0), :] = phone
+ phone_lengths[i] = phone.size(0)
+
+ sid[i] = row[3]
+
+ return (
+ phone_padded,
+ phone_lengths,
+ spec_padded,
+ spec_lengths,
+ wave_padded,
+ wave_lengths,
+ sid,
+ )
+
+
+class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
+ """
+ Maintain similar input lengths in a batch.
+ Length groups are specified by boundaries.
+ Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
+
+ It removes samples which are not included in the boundaries.
+ Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
+ """
+
+ def __init__(
+ self,
+ dataset,
+ batch_size,
+ boundaries,
+ num_replicas=None,
+ rank=None,
+ shuffle=True,
+ ):
+ super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
+ self.lengths = dataset.lengths
+ self.batch_size = batch_size
+ self.boundaries = boundaries
+
+ self.buckets, self.num_samples_per_bucket = self._create_buckets()
+ self.total_size = sum(self.num_samples_per_bucket)
+ self.num_samples = self.total_size // self.num_replicas
+
+ def _create_buckets(self):
+ buckets = [[] for _ in range(len(self.boundaries) - 1)]
+ for i in range(len(self.lengths)):
+ length = self.lengths[i]
+ idx_bucket = self._bisect(length)
+ if idx_bucket != -1:
+ buckets[idx_bucket].append(i)
+
+ for i in range(len(buckets) - 1, -1, -1): #
+ if len(buckets[i]) == 0:
+ buckets.pop(i)
+ self.boundaries.pop(i + 1)
+
+ num_samples_per_bucket = []
+ for i in range(len(buckets)):
+ len_bucket = len(buckets[i])
+ total_batch_size = self.num_replicas * self.batch_size
+ rem = (
+ total_batch_size - (len_bucket % total_batch_size)
+ ) % total_batch_size
+ num_samples_per_bucket.append(len_bucket + rem)
+ return buckets, num_samples_per_bucket
+
+ def __iter__(self):
+ # deterministically shuffle based on epoch
+ g = torch.Generator()
+ g.manual_seed(self.epoch)
+
+ indices = []
+ if self.shuffle:
+ for bucket in self.buckets:
+ indices.append(torch.randperm(len(bucket), generator=g).tolist())
+ else:
+ for bucket in self.buckets:
+ indices.append(list(range(len(bucket))))
+
+ batches = []
+ for i in range(len(self.buckets)):
+ bucket = self.buckets[i]
+ len_bucket = len(bucket)
+ ids_bucket = indices[i]
+ num_samples_bucket = self.num_samples_per_bucket[i]
+
+ # add extra samples to make it evenly divisible
+ rem = num_samples_bucket - len_bucket
+ ids_bucket = (
+ ids_bucket
+ + ids_bucket * (rem // len_bucket)
+ + ids_bucket[: (rem % len_bucket)]
+ )
+
+ # subsample
+ ids_bucket = ids_bucket[self.rank :: self.num_replicas]
+
+ # batching
+ for j in range(len(ids_bucket) // self.batch_size):
+ batch = [
+ bucket[idx]
+ for idx in ids_bucket[
+ j * self.batch_size : (j + 1) * self.batch_size
+ ]
+ ]
+ batches.append(batch)
+
+ if self.shuffle:
+ batch_ids = torch.randperm(len(batches), generator=g).tolist()
+ batches = [batches[i] for i in batch_ids]
+ self.batches = batches
+
+ assert len(self.batches) * self.batch_size == self.num_samples
+ return iter(self.batches)
+
+ def _bisect(self, x, lo=0, hi=None):
+ if hi is None:
+ hi = len(self.boundaries) - 1
+
+ if hi > lo:
+ mid = (hi + lo) // 2
+ if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
+ return mid
+ elif x <= self.boundaries[mid]:
+ return self._bisect(x, lo, mid)
+ else:
+ return self._bisect(x, mid + 1, hi)
+ else:
+ return -1
+
+ def __len__(self):
+ return self.num_samples // self.batch_size
diff --git a/train/losses.py b/train/losses.py
new file mode 100644
index 000000000..b89038f14
--- /dev/null
+++ b/train/losses.py
@@ -0,0 +1,59 @@
+import torch
+from torch.nn import functional as F
+
+
+def feature_loss(fmap_r, fmap_g):
+ loss = 0
+ for dr, dg in zip(fmap_r, fmap_g):
+ for rl, gl in zip(dr, dg):
+ rl = rl.float().detach()
+ gl = gl.float()
+ loss += torch.mean(torch.abs(rl - gl))
+
+ return loss * 2
+
+
+def discriminator_loss(disc_real_outputs, disc_generated_outputs):
+ loss = 0
+ r_losses = []
+ g_losses = []
+ for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
+ dr = dr.float()
+ dg = dg.float()
+ r_loss = torch.mean((1 - dr) ** 2)
+ g_loss = torch.mean(dg**2)
+ loss += r_loss + g_loss
+ r_losses.append(r_loss.item())
+ g_losses.append(g_loss.item())
+
+ return loss, r_losses, g_losses
+
+
+def generator_loss(disc_outputs):
+ loss = 0
+ gen_losses = []
+ for dg in disc_outputs:
+ dg = dg.float()
+ l = torch.mean((1 - dg) ** 2)
+ gen_losses.append(l)
+ loss += l
+
+ return loss, gen_losses
+
+
+def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
+ """
+ z_p, logs_q: [b, h, t_t]
+ m_p, logs_p: [b, h, t_t]
+ """
+ z_p = z_p.float()
+ logs_q = logs_q.float()
+ m_p = m_p.float()
+ logs_p = logs_p.float()
+ z_mask = z_mask.float()
+
+ kl = logs_p - logs_q - 0.5
+ kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)
+ kl = torch.sum(kl * z_mask)
+ l = kl / torch.sum(z_mask)
+ return l
diff --git a/train/mel_processing.py b/train/mel_processing.py
new file mode 100644
index 000000000..1c871ab6b
--- /dev/null
+++ b/train/mel_processing.py
@@ -0,0 +1,130 @@
+import torch
+import torch.utils.data
+from librosa.filters import mel as librosa_mel_fn
+
+
+MAX_WAV_VALUE = 32768.0
+
+
+def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
+ """
+ PARAMS
+ ------
+ C: compression factor
+ """
+ return torch.log(torch.clamp(x, min=clip_val) * C)
+
+
+def dynamic_range_decompression_torch(x, C=1):
+ """
+ PARAMS
+ ------
+ C: compression factor used to compress
+ """
+ return torch.exp(x) / C
+
+
+def spectral_normalize_torch(magnitudes):
+ return dynamic_range_compression_torch(magnitudes)
+
+
+def spectral_de_normalize_torch(magnitudes):
+ return dynamic_range_decompression_torch(magnitudes)
+
+
+# Reusable banks
+mel_basis = {}
+hann_window = {}
+
+
+def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
+ """Convert waveform into Linear-frequency Linear-amplitude spectrogram.
+
+ Args:
+ y :: (B, T) - Audio waveforms
+ n_fft
+ sampling_rate
+ hop_size
+ win_size
+ center
+ Returns:
+ :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram
+ """
+ # Validation
+ if torch.min(y) < -1.07:
+ print("min value is ", torch.min(y))
+ if torch.max(y) > 1.07:
+ print("max value is ", torch.max(y))
+
+ # Window - Cache if needed
+ global hann_window
+ dtype_device = str(y.dtype) + "_" + str(y.device)
+ wnsize_dtype_device = str(win_size) + "_" + dtype_device
+ if wnsize_dtype_device not in hann_window:
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
+ dtype=y.dtype, device=y.device
+ )
+
+ # Padding
+ y = torch.nn.functional.pad(
+ y.unsqueeze(1),
+ (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
+ mode="reflect",
+ )
+ y = y.squeeze(1)
+
+ # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2)
+ spec = torch.stft(
+ y,
+ n_fft,
+ hop_length=hop_size,
+ win_length=win_size,
+ window=hann_window[wnsize_dtype_device],
+ center=center,
+ pad_mode="reflect",
+ normalized=False,
+ onesided=True,
+ return_complex=False,
+ )
+
+ # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame)
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
+ return spec
+
+
+def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
+ # MelBasis - Cache if needed
+ global mel_basis
+ dtype_device = str(spec.dtype) + "_" + str(spec.device)
+ fmax_dtype_device = str(fmax) + "_" + dtype_device
+ if fmax_dtype_device not in mel_basis:
+ mel = librosa_mel_fn(
+ sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
+ )
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
+ dtype=spec.dtype, device=spec.device
+ )
+
+ # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame)
+ melspec = torch.matmul(mel_basis[fmax_dtype_device], spec)
+ melspec = spectral_normalize_torch(melspec)
+ return melspec
+
+
+def mel_spectrogram_torch(
+ y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
+):
+ """Convert waveform into Mel-frequency Log-amplitude spectrogram.
+
+ Args:
+ y :: (B, T) - Waveforms
+ Returns:
+ melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram
+ """
+ # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame)
+ spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center)
+
+ # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame)
+ melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax)
+
+ return melspec
diff --git a/train/process_ckpt.py b/train/process_ckpt.py
new file mode 100644
index 000000000..19de5f963
--- /dev/null
+++ b/train/process_ckpt.py
@@ -0,0 +1,259 @@
+import torch, traceback, os, pdb, sys
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+from collections import OrderedDict
+from i18n import I18nAuto
+
+i18n = I18nAuto()
+
+
+def savee(ckpt, sr, if_f0, name, epoch, version, hps):
+ try:
+ opt = OrderedDict()
+ opt["weight"] = {}
+ for key in ckpt.keys():
+ if "enc_q" in key:
+ continue
+ opt["weight"][key] = ckpt[key].half()
+ opt["config"] = [
+ hps.data.filter_length // 2 + 1,
+ 32,
+ hps.model.inter_channels,
+ hps.model.hidden_channels,
+ hps.model.filter_channels,
+ hps.model.n_heads,
+ hps.model.n_layers,
+ hps.model.kernel_size,
+ hps.model.p_dropout,
+ hps.model.resblock,
+ hps.model.resblock_kernel_sizes,
+ hps.model.resblock_dilation_sizes,
+ hps.model.upsample_rates,
+ hps.model.upsample_initial_channel,
+ hps.model.upsample_kernel_sizes,
+ hps.model.spk_embed_dim,
+ hps.model.gin_channels,
+ hps.data.sampling_rate,
+ ]
+ opt["info"] = "%sepoch" % epoch
+ opt["sr"] = sr
+ opt["f0"] = if_f0
+ opt["version"] = version
+ torch.save(opt, "weights/%s.pth" % name)
+ return "Success."
+ except:
+ return traceback.format_exc()
+
+
+def show_info(path):
+ try:
+ a = torch.load(path, map_location="cpu")
+ return "模型信息:%s\n采样率:%s\n模型是否输入音高引导:%s\n版本:%s" % (
+ a.get("info", "None"),
+ a.get("sr", "None"),
+ a.get("f0", "None"),
+ a.get("version", "None"),
+ )
+ except:
+ return traceback.format_exc()
+
+
+def extract_small_model(path, name, sr, if_f0, info, version):
+ try:
+ ckpt = torch.load(path, map_location="cpu")
+ if "model" in ckpt:
+ ckpt = ckpt["model"]
+ opt = OrderedDict()
+ opt["weight"] = {}
+ for key in ckpt.keys():
+ if "enc_q" in key:
+ continue
+ opt["weight"][key] = ckpt[key].half()
+ if sr == "40k":
+ opt["config"] = [
+ 1025,
+ 32,
+ 192,
+ 192,
+ 768,
+ 2,
+ 6,
+ 3,
+ 0,
+ "1",
+ [3, 7, 11],
+ [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ [10, 10, 2, 2],
+ 512,
+ [16, 16, 4, 4],
+ 109,
+ 256,
+ 40000,
+ ]
+ elif sr == "48k":
+ if version == "v1":
+ opt["config"] = [
+ 1025,
+ 32,
+ 192,
+ 192,
+ 768,
+ 2,
+ 6,
+ 3,
+ 0,
+ "1",
+ [3, 7, 11],
+ [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ [10, 6, 2, 2, 2],
+ 512,
+ [16, 16, 4, 4, 4],
+ 109,
+ 256,
+ 48000,
+ ]
+ else:
+ opt["config"] = [
+ 1025,
+ 32,
+ 192,
+ 192,
+ 768,
+ 2,
+ 6,
+ 3,
+ 0,
+ "1",
+ [3, 7, 11],
+ [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ [12, 10, 2, 2],
+ 512,
+ [24, 20, 4, 4],
+ 109,
+ 256,
+ 48000,
+ ]
+ elif sr == "32k":
+ if version == "v1":
+ opt["config"] = [
+ 513,
+ 32,
+ 192,
+ 192,
+ 768,
+ 2,
+ 6,
+ 3,
+ 0,
+ "1",
+ [3, 7, 11],
+ [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ [10, 4, 2, 2, 2],
+ 512,
+ [16, 16, 4, 4, 4],
+ 109,
+ 256,
+ 32000,
+ ]
+ else:
+ opt["config"] = [
+ 513,
+ 32,
+ 192,
+ 192,
+ 768,
+ 2,
+ 6,
+ 3,
+ 0,
+ "1",
+ [3, 7, 11],
+ [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ [10, 8, 2, 2],
+ 512,
+ [20, 16, 4, 4],
+ 109,
+ 256,
+ 32000,
+ ]
+ if info == "":
+ info = "Extracted model."
+ opt["info"] = info
+ opt["version"] = version
+ opt["sr"] = sr
+ opt["f0"] = int(if_f0)
+ torch.save(opt, "weights/%s.pth" % name)
+ return "Success."
+ except:
+ return traceback.format_exc()
+
+
+def change_info(path, info, name):
+ try:
+ ckpt = torch.load(path, map_location="cpu")
+ ckpt["info"] = info
+ if name == "":
+ name = os.path.basename(path)
+ torch.save(ckpt, "weights/%s" % name)
+ return "Success."
+ except:
+ return traceback.format_exc()
+
+
+def merge(path1, path2, alpha1, sr, f0, info, name, version):
+ try:
+
+ def extract(ckpt):
+ a = ckpt["model"]
+ opt = OrderedDict()
+ opt["weight"] = {}
+ for key in a.keys():
+ if "enc_q" in key:
+ continue
+ opt["weight"][key] = a[key]
+ return opt
+
+ ckpt1 = torch.load(path1, map_location="cpu")
+ ckpt2 = torch.load(path2, map_location="cpu")
+ cfg = ckpt1["config"]
+ if "model" in ckpt1:
+ ckpt1 = extract(ckpt1)
+ else:
+ ckpt1 = ckpt1["weight"]
+ if "model" in ckpt2:
+ ckpt2 = extract(ckpt2)
+ else:
+ ckpt2 = ckpt2["weight"]
+ if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())):
+ return "Fail to merge the models. The model architectures are not the same."
+ opt = OrderedDict()
+ opt["weight"] = {}
+ for key in ckpt1.keys():
+ # try:
+ if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape:
+ min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0])
+ opt["weight"][key] = (
+ alpha1 * (ckpt1[key][:min_shape0].float())
+ + (1 - alpha1) * (ckpt2[key][:min_shape0].float())
+ ).half()
+ else:
+ opt["weight"][key] = (
+ alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float())
+ ).half()
+ # except:
+ # pdb.set_trace()
+ opt["config"] = cfg
+ """
+ if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000]
+ elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000]
+ elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000]
+ """
+ opt["sr"] = sr
+ opt["f0"] = 1 if f0 else 0
+ opt["version"] = version
+ opt["info"] = info
+ torch.save(opt, "weights/%s.pth" % name)
+ return "Success."
+ except:
+ return traceback.format_exc()
diff --git a/train/utils.py b/train/utils.py
new file mode 100644
index 000000000..aae833b08
--- /dev/null
+++ b/train/utils.py
@@ -0,0 +1,500 @@
+import os, traceback
+import glob
+import sys
+import argparse
+import logging
+import json
+import subprocess
+import numpy as np
+from scipy.io.wavfile import read
+import torch
+
+MATPLOTLIB_FLAG = False
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+logger = logging
+
+
+def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1):
+ assert os.path.isfile(checkpoint_path)
+ checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
+
+ ##################
+ def go(model, bkey):
+ saved_state_dict = checkpoint_dict[bkey]
+ if hasattr(model, "module"):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ new_state_dict = {}
+ for k, v in state_dict.items(): # 模型需要的shape
+ try:
+ new_state_dict[k] = saved_state_dict[k]
+ if saved_state_dict[k].shape != state_dict[k].shape:
+ print(
+ "shape-%s-mismatch|need-%s|get-%s"
+ % (k, state_dict[k].shape, saved_state_dict[k].shape)
+ ) #
+ raise KeyError
+ except:
+ # logger.info(traceback.format_exc())
+ logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
+ new_state_dict[k] = v # 模型自带的随机值
+ if hasattr(model, "module"):
+ model.module.load_state_dict(new_state_dict, strict=False)
+ else:
+ model.load_state_dict(new_state_dict, strict=False)
+
+ go(combd, "combd")
+ go(sbd, "sbd")
+ #############
+ logger.info("Loaded model weights")
+
+ iteration = checkpoint_dict["iteration"]
+ learning_rate = checkpoint_dict["learning_rate"]
+ if (
+ optimizer is not None and load_opt == 1
+ ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
+ # try:
+ optimizer.load_state_dict(checkpoint_dict["optimizer"])
+ # except:
+ # traceback.print_exc()
+ logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
+ return model, optimizer, learning_rate, iteration
+
+
+# def load_checkpoint(checkpoint_path, model, optimizer=None):
+# assert os.path.isfile(checkpoint_path)
+# checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
+# iteration = checkpoint_dict['iteration']
+# learning_rate = checkpoint_dict['learning_rate']
+# if optimizer is not None:
+# optimizer.load_state_dict(checkpoint_dict['optimizer'])
+# # print(1111)
+# saved_state_dict = checkpoint_dict['model']
+# # print(1111)
+#
+# if hasattr(model, 'module'):
+# state_dict = model.module.state_dict()
+# else:
+# state_dict = model.state_dict()
+# new_state_dict= {}
+# for k, v in state_dict.items():
+# try:
+# new_state_dict[k] = saved_state_dict[k]
+# except:
+# logger.info("%s is not in the checkpoint" % k)
+# new_state_dict[k] = v
+# if hasattr(model, 'module'):
+# model.module.load_state_dict(new_state_dict)
+# else:
+# model.load_state_dict(new_state_dict)
+# logger.info("Loaded checkpoint '{}' (epoch {})" .format(
+# checkpoint_path, iteration))
+# return model, optimizer, learning_rate, iteration
+def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
+ assert os.path.isfile(checkpoint_path)
+ checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
+
+ saved_state_dict = checkpoint_dict["model"]
+ if hasattr(model, "module"):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ new_state_dict = {}
+ for k, v in state_dict.items(): # 模型需要的shape
+ try:
+ new_state_dict[k] = saved_state_dict[k]
+ if saved_state_dict[k].shape != state_dict[k].shape:
+ print(
+ "shape-%s-mismatch|need-%s|get-%s"
+ % (k, state_dict[k].shape, saved_state_dict[k].shape)
+ ) #
+ raise KeyError
+ except:
+ # logger.info(traceback.format_exc())
+ logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
+ new_state_dict[k] = v # 模型自带的随机值
+ if hasattr(model, "module"):
+ model.module.load_state_dict(new_state_dict, strict=False)
+ else:
+ model.load_state_dict(new_state_dict, strict=False)
+ logger.info("Loaded model weights")
+
+ iteration = checkpoint_dict["iteration"]
+ learning_rate = checkpoint_dict["learning_rate"]
+ if (
+ optimizer is not None and load_opt == 1
+ ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
+ # try:
+ optimizer.load_state_dict(checkpoint_dict["optimizer"])
+ # except:
+ # traceback.print_exc()
+ logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
+ return model, optimizer, learning_rate, iteration
+
+
+def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
+ logger.info(
+ "Saving model and optimizer state at epoch {} to {}".format(
+ iteration, checkpoint_path
+ )
+ )
+ if hasattr(model, "module"):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ torch.save(
+ {
+ "model": state_dict,
+ "iteration": iteration,
+ "optimizer": optimizer.state_dict(),
+ "learning_rate": learning_rate,
+ },
+ checkpoint_path,
+ )
+
+
+def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path):
+ logger.info(
+ "Saving model and optimizer state at epoch {} to {}".format(
+ iteration, checkpoint_path
+ )
+ )
+ if hasattr(combd, "module"):
+ state_dict_combd = combd.module.state_dict()
+ else:
+ state_dict_combd = combd.state_dict()
+ if hasattr(sbd, "module"):
+ state_dict_sbd = sbd.module.state_dict()
+ else:
+ state_dict_sbd = sbd.state_dict()
+ torch.save(
+ {
+ "combd": state_dict_combd,
+ "sbd": state_dict_sbd,
+ "iteration": iteration,
+ "optimizer": optimizer.state_dict(),
+ "learning_rate": learning_rate,
+ },
+ checkpoint_path,
+ )
+
+
+def summarize(
+ writer,
+ global_step,
+ scalars={},
+ histograms={},
+ images={},
+ audios={},
+ audio_sampling_rate=22050,
+):
+ for k, v in scalars.items():
+ writer.add_scalar(k, v, global_step)
+ for k, v in histograms.items():
+ writer.add_histogram(k, v, global_step)
+ for k, v in images.items():
+ writer.add_image(k, v, global_step, dataformats="HWC")
+ for k, v in audios.items():
+ writer.add_audio(k, v, global_step, audio_sampling_rate)
+
+
+def latest_checkpoint_path(dir_path, regex="G_*.pth"):
+ f_list = glob.glob(os.path.join(dir_path, regex))
+ f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
+ x = f_list[-1]
+ print(x)
+ return x
+
+
+def plot_spectrogram_to_numpy(spectrogram):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger("matplotlib")
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(10, 2))
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
+ plt.colorbar(im, ax=ax)
+ plt.xlabel("Frames")
+ plt.ylabel("Channels")
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def plot_alignment_to_numpy(alignment, info=None):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger("matplotlib")
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(6, 4))
+ im = ax.imshow(
+ alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
+ )
+ fig.colorbar(im, ax=ax)
+ xlabel = "Decoder timestep"
+ if info is not None:
+ xlabel += "\n\n" + info
+ plt.xlabel(xlabel)
+ plt.ylabel("Encoder timestep")
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def load_wav_to_torch(full_path):
+ sampling_rate, data = read(full_path)
+ return torch.FloatTensor(data.astype(np.float32)), sampling_rate
+
+
+def load_filepaths_and_text(filename, split="|"):
+ with open(filename, encoding='utf-8') as f:
+ filepaths_and_text = [line.strip().split(split) for line in f]
+ filepaths_and_text = [item for item in filepaths_and_text if len(item) == 5] # ensure there are 5 items.
+ return filepaths_and_text
+
+
+def get_hparams(init=True):
+ """
+ todo:
+ 结尾七人组:
+ 保存频率、总epoch done
+ bs done
+ pretrainG、pretrainD done
+ 卡号:os.en["CUDA_VISIBLE_DEVICES"] done
+ if_latest done
+ 模型:if_f0 done
+ 采样率:自动选择config done
+ 是否缓存数据集进GPU:if_cache_data_in_gpu done
+
+ -m:
+ 自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done
+ -c不要了
+ """
+ parser = argparse.ArgumentParser()
+ # parser.add_argument('-c', '--config', type=str, default="configs/40k.json",help='JSON file for configuration')
+ parser.add_argument(
+ "-se",
+ "--save_every_epoch",
+ type=int,
+ required=True,
+ help="checkpoint save frequency (epoch)",
+ )
+ parser.add_argument(
+ "-te", "--total_epoch", type=int, required=True, help="total_epoch"
+ )
+ parser.add_argument(
+ "-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path"
+ )
+ parser.add_argument(
+ "-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path"
+ )
+ parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -")
+ parser.add_argument(
+ "-bs", "--batch_size", type=int, required=True, help="batch size"
+ )
+ parser.add_argument(
+ "-e", "--experiment_dir", type=str, required=True, help="experiment dir"
+ ) # -m
+ parser.add_argument(
+ "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k"
+ )
+ parser.add_argument(
+ "-sw",
+ "--save_every_weights",
+ type=str,
+ default="0",
+ help="save the extracted model in weights directory when saving checkpoints",
+ )
+ parser.add_argument(
+ "-v", "--version", type=str, required=True, help="model version"
+ )
+ parser.add_argument(
+ "-f0",
+ "--if_f0",
+ type=int,
+ required=True,
+ help="use f0 as one of the inputs of the model, 1 or 0",
+ )
+ parser.add_argument(
+ "-l",
+ "--if_latest",
+ type=int,
+ required=True,
+ help="if only save the latest G/D pth file, 1 or 0",
+ )
+ parser.add_argument(
+ "-c",
+ "--if_cache_data_in_gpu",
+ type=int,
+ required=True,
+ help="if caching the dataset in GPU memory, 1 or 0",
+ )
+ parser.add_argument(
+ "-li", "--log_interval", type=int, required=True, help="log interval"
+ )
+
+ args = parser.parse_args()
+ name = args.experiment_dir
+ experiment_dir = os.path.join("./logs", args.experiment_dir)
+
+ if not os.path.exists(experiment_dir):
+ os.makedirs(experiment_dir)
+
+ if args.version == "v1" or args.sample_rate == "40k":
+ config_path = "configs/%s.json" % args.sample_rate
+ else:
+ config_path = "configs/%s_v2.json" % args.sample_rate
+ config_save_path = os.path.join(experiment_dir, "config.json")
+ if init:
+ with open(config_path, "r") as f:
+ data = f.read()
+ with open(config_save_path, "w") as f:
+ f.write(data)
+ else:
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ hparams.model_dir = hparams.experiment_dir = experiment_dir
+ hparams.save_every_epoch = args.save_every_epoch
+ hparams.name = name
+ hparams.total_epoch = args.total_epoch
+ hparams.pretrainG = args.pretrainG
+ hparams.pretrainD = args.pretrainD
+ hparams.version = args.version
+ hparams.gpus = args.gpus
+ hparams.train.batch_size = args.batch_size
+ hparams.sample_rate = args.sample_rate
+ hparams.if_f0 = args.if_f0
+ hparams.if_latest = args.if_latest
+ hparams.save_every_weights = args.save_every_weights
+ hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu
+ hparams.data.training_files = "%s/filelist.txt" % experiment_dir
+
+ hparams.train.log_interval = args.log_interval
+
+ # Update log_interval in the 'train' section of the config dictionary
+ config["train"]["log_interval"] = args.log_interval
+
+ # Save the updated config back to the config_save_path
+ with open(config_save_path, "w") as f:
+ json.dump(config, f, indent=4)
+
+ return hparams
+
+
+def get_hparams_from_dir(model_dir):
+ config_save_path = os.path.join(model_dir, "config.json")
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ hparams.model_dir = model_dir
+ return hparams
+
+
+def get_hparams_from_file(config_path):
+ with open(config_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ return hparams
+
+
+def check_git_hash(model_dir):
+ source_dir = os.path.dirname(os.path.realpath(__file__))
+ if not os.path.exists(os.path.join(source_dir, ".git")):
+ logger.warn(
+ "{} is not a git repository, therefore hash value comparison will be ignored.".format(
+ source_dir
+ )
+ )
+ return
+
+ cur_hash = subprocess.getoutput("git rev-parse HEAD")
+
+ path = os.path.join(model_dir, "githash")
+ if os.path.exists(path):
+ saved_hash = open(path).read()
+ if saved_hash != cur_hash:
+ logger.warn(
+ "git hash values are different. {}(saved) != {}(current)".format(
+ saved_hash[:8], cur_hash[:8]
+ )
+ )
+ else:
+ open(path, "w").write(cur_hash)
+
+
+def get_logger(model_dir, filename="train.log"):
+ global logger
+ logger = logging.getLogger(os.path.basename(model_dir))
+ logger.setLevel(logging.DEBUG)
+
+ formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+ h = logging.FileHandler(os.path.join(model_dir, filename))
+ h.setLevel(logging.DEBUG)
+ h.setFormatter(formatter)
+ logger.addHandler(h)
+ return logger
+
+
+class HParams:
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ if type(v) == dict:
+ v = HParams(**v)
+ self[k] = v
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def items(self):
+ return self.__dict__.items()
+
+ def values(self):
+ return self.__dict__.values()
+
+ def __len__(self):
+ return len(self.__dict__)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __setitem__(self, key, value):
+ return setattr(self, key, value)
+
+ def __contains__(self, key):
+ return key in self.__dict__
+
+ def __repr__(self):
+ return self.__dict__.__repr__()
diff --git a/train_nsf_sim_cache_sid_load_pretrain.py b/train_nsf_sim_cache_sid_load_pretrain.py
new file mode 100644
index 000000000..2887a97bb
--- /dev/null
+++ b/train_nsf_sim_cache_sid_load_pretrain.py
@@ -0,0 +1,510 @@
+import sys, os
+
+now_dir = os.getcwd()
+sys.path.append(os.path.join(now_dir))
+sys.path.append(os.path.join(now_dir, "train"))
+import utils
+import datetime
+
+hps = utils.get_hparams()
+os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",")
+n_gpus = len(hps.gpus.split("-"))
+from random import shuffle, randint
+import traceback, json, argparse, itertools, math, torch, pdb
+
+torch.backends.cudnn.deterministic = False
+torch.backends.cudnn.benchmark = False
+from torch import nn, optim
+from torch.nn import functional as F
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard import SummaryWriter
+import torch.multiprocessing as mp
+import torch.distributed as dist
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.cuda.amp import autocast, GradScaler
+from lib.infer_pack import commons
+from time import sleep
+from time import time as ttime
+from data_utils import (
+ TextAudioLoaderMultiNSFsid,
+ TextAudioLoader,
+ TextAudioCollateMultiNSFsid,
+ TextAudioCollate,
+ DistributedBucketSampler,
+)
+
+import csv
+
+if hps.version == "v1":
+ from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid as RVC_Model_f0,
+ SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0,
+ MultiPeriodDiscriminator,
+ )
+else:
+ from lib.infer_pack.models import (
+ SynthesizerTrnMs768NSFsid as RVC_Model_f0,
+ SynthesizerTrnMs768NSFsid_nono as RVC_Model_nof0,
+ MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator,
+ )
+from losses import generator_loss, discriminator_loss, feature_loss, kl_loss
+from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
+from process_ckpt import savee
+
+global global_step
+global_step = 0
+
+
+class EpochRecorder:
+ def __init__(self):
+ self.last_time = ttime()
+
+ def record(self):
+ now_time = ttime()
+ elapsed_time = now_time - self.last_time
+ self.last_time = now_time
+ elapsed_time_str = str(datetime.timedelta(seconds=elapsed_time))
+ current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ return f"[{current_time}] | ({elapsed_time_str})"
+
+
+def main():
+ n_gpus = torch.cuda.device_count()
+ if torch.cuda.is_available() == False and torch.backends.mps.is_available() == True:
+ n_gpus = 1
+ os.environ["MASTER_ADDR"] = "localhost"
+ os.environ["MASTER_PORT"] = str(randint(20000, 55555))
+ children = []
+ for i in range(n_gpus):
+ subproc = mp.Process(
+ target=run,
+ args=(
+ i,
+ n_gpus,
+ hps,
+ ),
+ )
+ children.append(subproc)
+ subproc.start()
+
+ for i in range(n_gpus):
+ children[i].join()
+
+def reset_stop_flag():
+ with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite:
+ csv_writer = csv.writer(STOPCSVwrite, delimiter=",")
+ csv_writer.writerow(["False"])
+
+def create_model(hps, model_f0, model_nof0):
+ filter_length_adjusted = hps.data.filter_length // 2 + 1
+ segment_size_adjusted = hps.train.segment_size // hps.data.hop_length
+ is_half = hps.train.fp16_run
+ sr = hps.sample_rate
+
+ model = model_f0 if hps.if_f0 == 1 else model_nof0
+
+ return model(
+ filter_length_adjusted,
+ segment_size_adjusted,
+ **hps.model,
+ is_half=is_half,
+ sr=sr
+ )
+
+def move_model_to_cuda_if_available(model, rank):
+ if torch.cuda.is_available():
+ return model.cuda(rank)
+ else:
+ return model
+
+def create_optimizer(model, hps):
+ return torch.optim.AdamW(
+ model.parameters(),
+ hps.train.learning_rate,
+ betas=hps.train.betas,
+ eps=hps.train.eps,
+ )
+
+def create_ddp_model(model, rank):
+ if torch.cuda.is_available():
+ return DDP(model, device_ids=[rank])
+ else:
+ return DDP(model)
+
+def create_dataset(hps, if_f0=True):
+ return TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) if if_f0 else TextAudioLoader(hps.data.training_files, hps.data)
+
+def create_sampler(dataset, batch_size, n_gpus, rank):
+ return DistributedBucketSampler(
+ dataset,
+ batch_size * n_gpus,
+ # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s
+ [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s
+ num_replicas=n_gpus,
+ rank=rank,
+ shuffle=True,
+ )
+
+def set_collate_fn(if_f0=True):
+ return TextAudioCollateMultiNSFsid() if if_f0 else TextAudioCollate()
+
+def run(rank, n_gpus, hps):
+ global global_step
+ if rank == 0:
+ logger = utils.get_logger(hps.model_dir)
+ logger.info(hps)
+ # utils.check_git_hash(hps.model_dir)
+ writer = SummaryWriter(log_dir=hps.model_dir)
+ writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
+
+ dist.init_process_group(
+ backend="gloo", init_method="env://", world_size=n_gpus, rank=rank
+ )
+ torch.manual_seed(hps.train.seed)
+ if torch.cuda.is_available():
+ torch.cuda.set_device(rank)
+
+
+ train_dataset = TextAudioLoaderMultiNSFsid(
+ hps.data.training_files, hps.data
+ ) if hps.if_f0 == 1 else TextAudioLoader(hps.data.training_files, hps.data)
+
+ train_sampler = DistributedBucketSampler(
+ train_dataset,
+ hps.train.batch_size * n_gpus,
+ # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s
+ [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s
+ num_replicas=n_gpus,
+ rank=rank,
+ shuffle=True,
+ )
+ # It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit.
+ # num_workers=8 -> num_workers=4
+
+ collate_fn = TextAudioCollateMultiNSFsid() if hps.if_f0 == 1 else TextAudioCollate()
+ train_loader = DataLoader(
+ train_dataset,
+ num_workers=4,
+ shuffle=False,
+ pin_memory=True,
+ collate_fn=collate_fn,
+ batch_sampler=train_sampler,
+ persistent_workers=True,
+ prefetch_factor=8,
+ )
+
+ net_g = create_model(hps, RVC_Model_f0, RVC_Model_nof0)
+
+ net_g = move_model_to_cuda_if_available(net_g, rank)
+ net_d = move_model_to_cuda_if_available(MultiPeriodDiscriminator(hps.model.use_spectral_norm), rank)
+
+ optim_g = create_optimizer(net_g, hps)
+ optim_d = create_optimizer(net_d, hps)
+ # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
+ # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
+ net_g = create_ddp_model(net_g, rank)
+ net_d = create_ddp_model(net_d, rank)
+
+ try: # 如果能加载自动resume
+ _, _, _, epoch_str = utils.load_checkpoint(
+ utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d
+ ) # D多半加载没事
+ if rank == 0:
+ logger.info("loaded D")
+ # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0)
+ _, _, _, epoch_str = utils.load_checkpoint(
+ utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g
+ )
+ global_step = (epoch_str - 1) * len(train_loader)
+ # epoch_str = 1
+ # global_step = 0
+ except: # 如果首次不能加载,加载pretrain
+ # traceback.print_exc()
+ epoch_str = 1
+ global_step = 0
+ if hps.pretrainG != "":
+ if rank == 0:
+ logger.info(f"loaded pretrained {hps.pretrainG}")
+ print(
+ net_g.module.load_state_dict(
+ torch.load(hps.pretrainG, map_location="cpu")["model"]
+ )
+ ) ##测试不加载优化器
+ if hps.pretrainD != "":
+ if rank == 0:
+ logger.info("loaded pretrained %s" % (hps.pretrainD))
+ print(
+ net_d.module.load_state_dict(
+ torch.load(hps.pretrainD, map_location="cpu")["model"]
+ )
+ )
+
+ scheduler_g = torch.optim.lr_scheduler.ExponentialLR(
+ optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2
+ )
+ scheduler_d = torch.optim.lr_scheduler.ExponentialLR(
+ optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2
+ )
+
+ scaler = GradScaler(enabled=hps.train.fp16_run)
+
+ cache = []
+ for epoch in range(epoch_str, hps.train.epochs + 1):
+ if rank == 0:
+ train_and_evaluate(
+ rank,
+ epoch,
+ hps,
+ [net_g, net_d],
+ [optim_g, optim_d],
+ [scheduler_g, scheduler_d],
+ scaler,
+ [train_loader, None],
+ logger,
+ [writer, writer_eval],
+ cache,
+ )
+ else:
+ train_and_evaluate(
+ rank,
+ epoch,
+ hps,
+ [net_g, net_d],
+ [optim_g, optim_d],
+ [scheduler_g, scheduler_d],
+ scaler,
+ [train_loader, None],
+ None,
+ None,
+ cache,
+ )
+ scheduler_g.step()
+ scheduler_d.step()
+
+
+def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, cache):
+ net_g, net_d = nets
+ optim_g, optim_d = optims
+ train_loader, eval_loader = loaders
+ writer, writer_eval = (writers if writers is not None else (None, None))
+
+ train_loader.batch_sampler.set_epoch(epoch)
+ global global_step
+
+ nets = [net_g, net_d]
+ for net in nets:
+ net.train()
+
+ def save_checkpoint(name):
+ ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict()
+ result = savee(ckpt, hps.sample_rate, hps.if_f0, name, epoch, hps.version, hps)
+ logger.info("Saving final ckpt: {}".format(result))
+ sleep(1)
+
+ if hps.if_cache_data_in_gpu:
+ # Use Cache
+ data_iterator = cache
+ if len(cache) == 0:
+ gpu_available = torch.cuda.is_available()
+
+ for batch_idx, info in enumerate(train_loader):
+ # Unpack
+ info = list(info)
+ if hps.if_f0:
+ tensors = info
+ else:
+ # We consider that pitch and pitchf are not included in this case
+ tensors = info[:2] + info[4:]
+
+ # Load on CUDA
+ if gpu_available:
+ tensors = [tensor.cuda(rank, non_blocking=True) for tensor in tensors]
+
+ # Cache on list
+ cache.extend([(batch_idx, tuple(tensor for tensor in tensors if tensor is not None))])
+ else:
+ shuffle(cache)
+ else:
+ data_iterator = enumerate(train_loader)
+
+ def to_gpu_if_available(tensor):
+ return tensor.cuda(rank, non_blocking=True) if torch.cuda.is_available() else tensor
+
+ # Run steps
+ gpu_available = torch.cuda.is_available()
+ epoch_recorder = EpochRecorder()
+ fp16_run = hps.train.fp16_run
+ c_mel = hps.train.c_mel
+
+ for batch_idx, info in data_iterator:
+ # Data
+ ## Unpack
+ if hps.if_f0 == 1:
+ phone, phone_lengths, pitch, pitchf, spec, spec_lengths, wave, wave_lengths, sid = info
+ else:
+ phone, phone_lengths, spec, spec_lengths, wave, wave_lengths, sid = info
+ ## Load on CUDA
+ if (not hps.if_cache_data_in_gpu) and gpu_available:
+ phone = to_gpu_if_available(phone)
+ phone_lengths = to_gpu_if_available(phone_lengths)
+ sid = to_gpu_if_available(sid)
+ spec = to_gpu_if_available(spec)
+ spec_lengths = to_gpu_if_available(spec_lengths)
+ wave = to_gpu_if_available(wave)
+
+ if hps.if_f0 == 1:
+ pitch = to_gpu_if_available(pitch)
+ pitchf = to_gpu_if_available(pitchf)
+
+ # Calculate
+ with autocast(enabled=fp16_run):
+ if hps.if_f0 == 1:
+ y_hat, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = \
+ net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid)
+ else:
+ y_hat, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = \
+ net_g(phone, phone_lengths, spec, spec_lengths, sid)
+ mel = spec_to_mel_torch(spec, hps.data.filter_length, hps.data.n_mel_channels,
+ hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax)
+
+ y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
+ y_hat_mel = mel_spectrogram_torch(
+ y_hat.float().squeeze(1),
+ hps.data.filter_length,
+ hps.data.n_mel_channels,
+ hps.data.sampling_rate,
+ hps.data.hop_length,
+ hps.data.win_length,
+ hps.data.mel_fmin,
+ hps.data.mel_fmax,
+ )
+
+ if fp16_run: y_hat_mel = y_hat_mel.half()
+
+ wave = commons.slice_segments(wave, ids_slice * hps.data.hop_length,
+ hps.train.segment_size) # slice
+
+ y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach())
+
+ loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
+ net_d_params = net_d.parameters()
+ net_g_params = net_g.parameters()
+ lr_scalar = optim_g.param_groups[0]["lr"]
+
+ optim_d.zero_grad()
+ scaler.scale(loss_disc).backward()
+ scaler.unscale_(optim_d)
+ grad_norm_d = commons.clip_grad_value_(net_d_params, None)
+ scaler.step(optim_d)
+
+ with autocast(enabled=fp16_run):
+ y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat)
+
+ loss_mel = F.l1_loss(y_mel, y_hat_mel) * c_mel
+ loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
+ loss_fm = feature_loss(fmap_r, fmap_g)
+ loss_gen, losses_gen = generator_loss(y_d_hat_g)
+ loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl
+
+ optim_g.zero_grad()
+ scaler.scale(loss_gen_all).backward()
+ scaler.unscale_(optim_g)
+ grad_norm_g = commons.clip_grad_value_(net_g_params, None)
+ scaler.step(optim_g)
+ scaler.update()
+
+ if rank == 0 and global_step % hps.train.log_interval == 0:
+ lr = lr_scalar # use stored lr scalar here
+ logger.info("Train Epoch: {} [{:.0f}%]".format(epoch, 100.0 * batch_idx / len(train_loader)))
+
+ # Amor For Tensorboard display
+ loss_mel, loss_kl = min(loss_mel, 75), min(loss_kl, 9)
+
+ scalar_dict = {
+ "loss/g/total": loss_gen_all,
+ "loss/d/total": loss_disc,
+ "learning_rate": lr,
+ "grad_norm_d": grad_norm_d,
+ "grad_norm_g": grad_norm_g,
+ "loss/g/fm": loss_fm,
+ "loss/g/mel": loss_mel,
+ "loss/g/kl": loss_kl,
+ **{"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)},
+ **{"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)},
+ **{"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)},
+ }
+
+ image_dict = {
+ "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
+ "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
+ "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
+ }
+
+ utils.summarize(
+ writer=writer,
+ global_step=global_step,
+ images=image_dict,
+ scalars=scalar_dict,
+ )
+ global_step += 1
+
+ if epoch % hps.save_every_epoch == 0:
+ if rank == 0:
+ save_format = str(2333333) if hps.if_latest else str(global_step)
+ model_dir = hps.model_dir
+ learning_rate = hps.train.learning_rate
+ name_epoch = f"{hps.name}_e{epoch}"
+ models = {'G': net_g, 'D': net_d}
+ optims = {'G': optim_g, 'D': optim_d}
+
+ for model_name, model in models.items():
+ path = os.path.join(model_dir, f"{model_name}_{save_format}.pth")
+ utils.save_checkpoint(model, optims[model_name], learning_rate, epoch, path)
+
+ if hps.save_every_weights == "1":
+ ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict()
+ logger.info(
+ "saving ckpt %s_%s"
+ % (
+ name_epoch,
+ savee(
+ ckpt,
+ hps.sample_rate,
+ hps.if_f0,
+ f"{name_epoch}_s{global_step}",
+ epoch,
+ hps.version,
+ hps,
+ ),
+ )
+ )
+
+ stopbtn = False
+ try:
+ with open("csvdb/stop.csv", 'r') as csv_file:
+ stopbtn_str = next(csv.reader(csv_file), [None])[0]
+ if stopbtn_str is not None: stopbtn = stopbtn_str.lower() == 'true'
+ except (ValueError, TypeError, FileNotFoundError, IndexError) as e:
+ print(f"Handling exception: {e}")
+ stopbtn = False
+
+ if stopbtn:
+ logger.info("Stop Button was pressed. The program is closed.")
+ ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict()
+ logger.info(f"Saving final ckpt:{savee(ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps)}")
+ sleep(1)
+ reset_stop_flag()
+ os._exit(2333333)
+
+ if rank == 0:
+ logger.info(f"====> Epoch: {epoch} {epoch_recorder.record()}")
+
+ if epoch >= hps.total_epoch:
+ logger.info("Training is done. The program is closed.")
+ save_checkpoint(hps.name)
+ os._exit(2333333)
+
+
+if __name__ == "__main__":
+ torch.multiprocessing.set_start_method("spawn")
+ main()
diff --git a/trainset_preprocess_pipeline_print.py b/trainset_preprocess_pipeline_print.py
new file mode 100644
index 000000000..f20845839
--- /dev/null
+++ b/trainset_preprocess_pipeline_print.py
@@ -0,0 +1,155 @@
+import sys, os, multiprocessing
+from scipy import signal
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+
+inp_root = sys.argv[1]
+sr = int(sys.argv[2])
+n_p = int(sys.argv[3])
+exp_dir = sys.argv[4]
+noparallel = sys.argv[5] == "True"
+import numpy as np, os, traceback
+from slicer2 import Slicer
+import librosa, traceback
+from scipy.io import wavfile
+import multiprocessing
+from my_utils import load_audio, check_audio_duration
+import tqdm
+
+DoFormant = False
+Quefrency = 1.0
+Timbre = 1.0
+
+mutex = multiprocessing.Lock()
+f = open(f"{exp_dir}/preprocess.log", "a+")
+
+
+def println(strr):
+ mutex.acquire()
+ print(strr)
+ f.write("%s\n" % strr)
+ f.flush()
+ mutex.release()
+
+
+class PreProcess:
+ def __init__(self, sr, exp_dir):
+ self.slicer = Slicer(
+ sr=sr,
+ threshold=-42,
+ min_length=1500,
+ min_interval=400,
+ hop_size=15,
+ max_sil_kept=500,
+ )
+ self.sr = sr
+ self.bh, self.ah = signal.butter(N=5, Wn=48, btype="high", fs=self.sr)
+ self.per = 3.0
+ self.overlap = 0.3
+ self.tail = self.per + self.overlap
+ self.max = 0.9
+ self.alpha = 0.75
+ self.exp_dir = exp_dir
+ self.gt_wavs_dir = "%s/0_gt_wavs" % exp_dir
+ self.wavs16k_dir = "%s/1_16k_wavs" % exp_dir
+ os.makedirs(self.exp_dir, exist_ok=True)
+ os.makedirs(self.gt_wavs_dir, exist_ok=True)
+ os.makedirs(self.wavs16k_dir, exist_ok=True)
+
+ def norm_write(self, tmp_audio, idx0, idx1):
+ tmp_max = np.abs(tmp_audio).max()
+ if tmp_max > 2.5:
+ print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max))
+ return
+ tmp_audio = (tmp_audio / tmp_max * (self.max * self.alpha)) + (
+ 1 - self.alpha
+ ) * tmp_audio
+ wavfile.write(
+ "%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1),
+ self.sr,
+ tmp_audio.astype(np.float32),
+ )
+ tmp_audio = librosa.resample(
+ tmp_audio, orig_sr=self.sr, target_sr=16000
+ ) # , res_type="soxr_vhq"
+ wavfile.write(
+ "%s/%s_%s.wav" % (self.wavs16k_dir, idx0, idx1),
+ 16000,
+ tmp_audio.astype(np.float32),
+ )
+
+ def pipeline(self, path, idx0):
+
+ file_extension = path.split('.')[-1]
+ supported_file_extensions = {'wav', 'mp3', 'flac', 'ogg', 'opus',
+ 'm4a', 'mp4', 'aac', 'alac', 'wma',
+ 'aiff', 'webm', 'ac3'}
+
+ try:
+ if file_extension in supported_file_extensions:
+ if not check_audio_duration(path): return
+ audio = load_audio(path, self.sr, DoFormant=False)
+ # zero phased digital filter cause pre-ringing noise...
+ # audio = signal.filtfilt(self.bh, self.ah, audio)
+ audio = signal.lfilter(self.bh, self.ah, audio)
+
+ idx1 = 0
+ for audio in self.slicer.slice(audio):
+ frame_start_points = range(0, len(audio), int(self.sr * (self.per - self.overlap)))
+
+ for _, start in enumerate(frame_start_points):
+ if len(audio[start:]) <= self.tail * self.sr:
+ tmp_audio = audio[start:]
+ idx1 += 1
+ break
+
+ tmp_audio = audio[start : start + int(self.per * self.sr)]
+ self.norm_write(tmp_audio, idx0, idx1)
+ idx1 += 1
+ self.norm_write(tmp_audio, idx0, idx1)
+ # println("%s->Suc." % path)
+ else:
+ print(f"Unsupported audio format! - {path.split('/')[-1]}")
+ except:
+ println("%s->%s" % (path, traceback.format_exc()))
+
+ def pipeline_mp(self, infos, thread_n):
+ for path, idx0 in tqdm.tqdm(
+ infos, position=thread_n, leave=True, desc="thread:%s" % thread_n
+ ):
+ self.pipeline(path, idx0)
+
+ def pipeline_mp_inp_dir(self, inp_root, n_p):
+ try:
+ infos = [
+ ("%s/%s" % (inp_root, name), idx)
+ for idx, name in enumerate(sorted(list(os.listdir(inp_root))))
+ ]
+ if noparallel:
+ for i in range(n_p):
+ self.pipeline_mp(infos[i::n_p])
+ else:
+ ps = []
+ for i in range(n_p):
+ p = multiprocessing.Process(
+ target=self.pipeline_mp, args=(infos[i::n_p], i)
+ )
+ ps.append(p)
+ p.start()
+ for i in range(n_p):
+ ps[i].join()
+ except:
+ println("Fail. %s" % traceback.format_exc())
+
+
+def preprocess_trainset(inp_root, sr, n_p, exp_dir):
+ pp = PreProcess(sr, exp_dir)
+ println("start preprocess")
+ println(sys.argv)
+ pp.pipeline_mp_inp_dir(inp_root, n_p)
+ println("end preprocess")
+
+
+if __name__ == "__main__":
+ preprocess_trainset(inp_root, sr, n_p, exp_dir)
diff --git a/uvr5_weights/.gitignore b/uvr5_weights/.gitignore
new file mode 100644
index 000000000..d6b7ef32c
--- /dev/null
+++ b/uvr5_weights/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/vc_infer_pipeline.py b/vc_infer_pipeline.py
new file mode 100644
index 000000000..eed21b3eb
--- /dev/null
+++ b/vc_infer_pipeline.py
@@ -0,0 +1,594 @@
+from scipy.io import wavfile
+import numpy as np, parselmouth, torch, pdb, sys, os
+from time import time as ttime
+import torch.nn.functional as F
+import torchcrepe # Fork feature. Use the crepe f0 algorithm. New dependency (pip install torchcrepe)
+from torch import Tensor
+import scipy.signal as signal
+import pyworld, os, traceback, faiss, librosa, torchcrepe
+from scipy import signal
+from functools import lru_cache
+
+from functools import partial
+import re
+
+from tqdm import tqdm
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+
+from LazyImport import lazyload
+
+torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess
+torch = lazyload("torch")
+rmvpe = lazyload("rmvpe")
+
+bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
+
+input_audio_path2wav = {}
+
+
+@lru_cache
+def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
+ audio = input_audio_path2wav[input_audio_path]
+ f0, t = pyworld.harvest(
+ audio,
+ fs=fs,
+ f0_ceil=f0max,
+ f0_floor=f0min,
+ frame_period=frame_period,
+ )
+ f0 = pyworld.stonemask(audio, f0, t, fs)
+ return f0
+
+
+def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
+ # print(data1.max(),data2.max())
+ rms1 = librosa.feature.rms(
+ y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
+ ) # 每半秒一个点
+ rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
+ rms1 = torch.from_numpy(rms1)
+ rms1 = F.interpolate(
+ rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
+ ).squeeze()
+ rms2 = torch.from_numpy(rms2)
+ rms2 = F.interpolate(
+ rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
+ ).squeeze()
+ rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
+ data2 *= (
+ torch.pow(rms1, torch.tensor(1 - rate))
+ * torch.pow(rms2, torch.tensor(rate - 1))
+ ).numpy()
+ return data2
+
+
+class VC(object):
+ def __init__(self, tgt_sr, config):
+ self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
+ config.x_pad,
+ config.x_query,
+ config.x_center,
+ config.x_max,
+ config.is_half,
+ )
+
+ self.sr = 16000 # hubert输入采样率
+ self.window = 160 # 每帧点数
+ self.t_pad = self.sr * self.x_pad # 每条前后pad时间
+ self.t_pad_tgt = tgt_sr * self.x_pad
+ self.t_pad2 = self.t_pad * 2
+ self.t_query = self.sr * self.x_query # 查询切点前后查询时间
+ self.t_center = self.sr * self.x_center # 查询切点位置
+ self.t_max = self.sr * self.x_max # 免查询时长阈值
+ self.device = config.device
+ self.model_rmvpe = rmvpe.RMVPE("rmvpe.pt", is_half=False, device="cuda:0")
+ self.f0_method_dict = {
+ "pm": self.get_pm,
+ "harvest": self.get_harvest,
+ "dio": self.get_dio,
+ "rmvpe": self.get_rmvpe,
+ "rmvpe+": self.get_pitch_dependant_rmvpe,
+ "crepe": self.get_f0_official_crepe_computation,
+ "crepe-tiny": partial(self.get_f0_official_crepe_computation, model='model'),
+ "mangio-crepe": self.get_f0_crepe_computation,
+ "mangio-crepe-tiny": partial(self.get_f0_crepe_computation, model='model'),
+
+ }
+
+ # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)
+ def get_optimal_torch_device(self, index: int = 0) -> torch.device:
+ if torch.cuda.is_available():
+ return torch.device(
+ f"cuda:{index % torch.cuda.device_count()}"
+ ) # Very fast
+ elif torch.backends.mps.is_available():
+ return torch.device("mps")
+ return torch.device("cpu")
+
+ # Fork Feature: Compute f0 with the crepe method
+ def get_f0_crepe_computation(
+ self,
+ x,
+ f0_min,
+ f0_max,
+ p_len,
+ *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.
+ **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full
+ ):
+ x = x.astype(
+ np.float32
+ ) # fixes the F.conv2D exception. We needed to convert double to float.
+ x /= np.quantile(np.abs(x), 0.999)
+ torch_device = self.get_optimal_torch_device()
+ audio = torch.from_numpy(x).to(torch_device, copy=True)
+ audio = torch.unsqueeze(audio, dim=0)
+ if audio.ndim == 2 and audio.shape[0] > 1:
+ audio = torch.mean(audio, dim=0, keepdim=True).detach()
+ audio = audio.detach()
+ hop_length = kwargs.get('crepe_hop_length', 160)
+ model = kwargs.get('model', 'full')
+ print("Initiating prediction with a crepe_hop_length of: " + str(hop_length))
+ pitch: Tensor = torchcrepe.predict(
+ audio,
+ self.sr,
+ hop_length,
+ f0_min,
+ f0_max,
+ model,
+ batch_size=hop_length * 2,
+ device=torch_device,
+ pad=True,
+ )
+ p_len = p_len or x.shape[0] // hop_length
+ # Resize the pitch for final f0
+ source = np.array(pitch.squeeze(0).cpu().float().numpy())
+ source[source < 0.001] = np.nan
+ target = np.interp(
+ np.arange(0, len(source) * p_len, len(source)) / p_len,
+ np.arange(0, len(source)),
+ source,
+ )
+ f0 = np.nan_to_num(target)
+ return f0 # Resized f0
+
+ def get_f0_official_crepe_computation(
+ self,
+ x,
+ f0_min,
+ f0_max,
+ *args,
+ **kwargs
+ ):
+ # Pick a batch size that doesn't cause memory errors on your gpu
+ batch_size = 512
+ # Compute pitch using first gpu
+ audio = torch.tensor(np.copy(x))[None].float()
+ model = kwargs.get('model', 'full')
+ f0, pd = torchcrepe.predict(
+ audio,
+ self.sr,
+ self.window,
+ f0_min,
+ f0_max,
+ model,
+ batch_size=batch_size,
+ device=self.device,
+ return_periodicity=True,
+ )
+ pd = torchcrepe.filter.median(pd, 3)
+ f0 = torchcrepe.filter.mean(f0, 3)
+ f0[pd < 0.1] = 0
+ f0 = f0[0].cpu().numpy()
+ return f0
+
+ # Fork Feature: Compute pYIN f0 method
+ def get_f0_pyin_computation(self, x, f0_min, f0_max):
+ y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True)
+ f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max)
+ f0 = f0[1:] # Get rid of extra first frame
+ return f0
+
+ def get_pm(self, x, p_len, *args, **kwargs):
+ f0 = parselmouth.Sound(x, self.sr).to_pitch_ac(
+ time_step=160 / 16000,
+ voicing_threshold=0.6,
+ pitch_floor=kwargs.get('f0_min'),
+ pitch_ceiling=kwargs.get('f0_max'),
+ ).selected_array["frequency"]
+
+ return np.pad(
+ f0,
+ [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]],
+ mode="constant"
+ )
+
+ def get_harvest(self, x, *args, **kwargs):
+ f0_spectral = pyworld.harvest(
+ x.astype(np.double),
+ fs=self.sr,
+ f0_ceil=kwargs.get('f0_max'),
+ f0_floor=kwargs.get('f0_min'),
+ frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
+ )
+ return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
+
+ def get_dio(self, x, *args, **kwargs):
+ f0_spectral = pyworld.dio(
+ x.astype(np.double),
+ fs=self.sr,
+ f0_ceil=kwargs.get('f0_max'),
+ f0_floor=kwargs.get('f0_min'),
+ frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
+ )
+ return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
+
+
+ def get_rmvpe(self, x, *args, **kwargs):
+ return self.model_rmvpe.infer_from_audio(x, thred=0.03)
+
+ def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs):
+ return self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max)
+
+ # Fork Feature: Acquire median hybrid f0 estimation calculation
+ def get_f0_hybrid_computation(
+ self,
+ methods_str,
+ input_audio_path,
+ x,
+ f0_min,
+ f0_max,
+ p_len,
+ filter_radius,
+ crepe_hop_length,
+ time_step
+ ):
+ # Get various f0 methods from input to use in the computation stack
+ params = {'x': x, 'p_len': p_len, 'f0_min': f0_min,
+ 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius,
+ 'crepe_hop_length': crepe_hop_length, 'model': "full"
+ }
+ methods_str = re.search('hybrid\[(.+)\]', methods_str)
+ if methods_str: # Ensure a match was found
+ methods = [method.strip() for method in methods_str.group(1).split('+')]
+ f0_computation_stack = []
+
+ print(f"Calculating f0 pitch estimations for methods: {str(methods)}")
+ x = x.astype(np.float32)
+ x /= np.quantile(np.abs(x), 0.999)
+ # Get f0 calculations for all methods specified
+
+ for method in methods:
+ if method not in self.f0_method_dict:
+ print(f"Method {method} not found.")
+ continue
+ f0 = self.f0_method_dict[method](**params)
+ if method == 'harvest' and filter_radius > 2:
+ f0 = signal.medfilt(f0, 3)
+ f0 = f0[1:] # Get rid of first frame.
+ f0_computation_stack.append(f0)
+
+ for fc in f0_computation_stack:
+ print(len(fc))
+
+ print(f"Calculating hybrid median f0 from the stack of: {str(methods)}")
+ f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)
+ return f0_median_hybrid
+
+ def get_f0(
+ self,
+ input_audio_path,
+ x,
+ p_len,
+ f0_up_key,
+ f0_method,
+ filter_radius,
+ crepe_hop_length,
+ inp_f0=None,
+ f0_min=50,
+ f0_max=1100,
+ ):
+ global input_audio_path2wav
+ time_step = self.window / self.sr * 1000
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+ params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min,
+ 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius,
+ 'crepe_hop_length': crepe_hop_length, 'model': "full"
+ }
+ f0 = self.f0_method_dict[f0_method](**params)
+
+ if "hybrid" in f0_method:
+ # Perform hybrid median pitch estimation
+ input_audio_path2wav[input_audio_path] = x.astype(np.double)
+ f0 = self.get_f0_hybrid_computation(
+ f0_method,+
+ input_audio_path,
+ x,
+ f0_min,
+ f0_max,
+ p_len,
+ filter_radius,
+ crepe_hop_length,
+ time_step,
+ )
+
+ f0 *= pow(2, f0_up_key / 12)
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
+ tf0 = self.sr // self.window # 每秒f0点数
+ if inp_f0 is not None:
+ delta_t = np.round(
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
+ ).astype("int16")
+ replace_f0 = np.interp(
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
+ )
+ shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
+ f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
+ :shape
+ ]
+
+ f0bak = f0.copy()
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ f0_coarse = np.rint(f0_mel).astype(np.int)
+
+ return f0_coarse, f0bak # 1-0
+
+ def vc(
+ self,
+ model,
+ net_g,
+ sid,
+ audio0,
+ pitch,
+ pitchf,
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ ): # ,file_index,file_big_npy
+ feats = torch.from_numpy(audio0)
+ if self.is_half:
+ feats = feats.half()
+ else:
+ feats = feats.float()
+ if feats.dim() == 2: # double channels
+ feats = feats.mean(-1)
+ assert feats.dim() == 1, feats.dim()
+ feats = feats.view(1, -1)
+ padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
+
+ inputs = {
+ "source": feats.to(self.device),
+ "padding_mask": padding_mask,
+ "output_layer": 9 if version == "v1" else 12,
+ }
+ t0 = ttime()
+ with torch.no_grad():
+ logits = model.extract_features(**inputs)
+ feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
+ if protect < 0.5 and pitch != None and pitchf != None:
+ feats0 = feats.clone()
+ if (
+ isinstance(index, type(None)) == False
+ and isinstance(big_npy, type(None)) == False
+ and index_rate != 0
+ ):
+ npy = feats[0].cpu().numpy()
+ if self.is_half:
+ npy = npy.astype("float32")
+
+ # _, I = index.search(npy, 1)
+ # npy = big_npy[I.squeeze()]
+
+ score, ix = index.search(npy, k=8)
+ weight = np.square(1 / score)
+ weight /= weight.sum(axis=1, keepdims=True)
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
+
+ if self.is_half:
+ npy = npy.astype("float16")
+ feats = (
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
+ + (1 - index_rate) * feats
+ )
+
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
+ if protect < 0.5 and pitch != None and pitchf != None:
+ feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
+ 0, 2, 1
+ )
+ t1 = ttime()
+ p_len = audio0.shape[0] // self.window
+ if feats.shape[1] < p_len:
+ p_len = feats.shape[1]
+ if pitch != None and pitchf != None:
+ pitch = pitch[:, :p_len]
+ pitchf = pitchf[:, :p_len]
+
+ if protect < 0.5 and pitch != None and pitchf != None:
+ pitchff = pitchf.clone()
+ pitchff[pitchf > 0] = 1
+ pitchff[pitchf < 1] = protect
+ pitchff = pitchff.unsqueeze(-1)
+ feats = feats * pitchff + feats0 * (1 - pitchff)
+ feats = feats.to(feats0.dtype)
+ p_len = torch.tensor([p_len], device=self.device).long()
+ with torch.no_grad():
+ if pitch != None and pitchf != None:
+ audio1 = (
+ (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
+ .data.cpu()
+ .float()
+ .numpy()
+ )
+ else:
+ audio1 = (
+ (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
+ )
+ del feats, p_len, padding_mask
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ t2 = ttime()
+ times[0] += t1 - t0
+ times[2] += t2 - t1
+ return audio1
+
+ def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g):
+ t = t // window * window
+ if if_f0 == 1:
+ return self.vc(
+ model,
+ net_g,
+ sid,
+ audio_pad[s : t + t_pad_tgt + window],
+ pitch[:, s // window : (t + t_pad_tgt) // window],
+ pitchf[:, s // window : (t + t_pad_tgt) // window],
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ )[t_pad_tgt : -t_pad_tgt]
+ else:
+ return self.vc(
+ model,
+ net_g,
+ sid,
+ audio_pad[s : t + t_pad_tgt + window],
+ None,
+ None,
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ )[t_pad_tgt : -t_pad_tgt]
+
+ def pipeline(self, model, net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method,
+ file_index, index_rate, if_f0, filter_radius, tgt_sr, resample_sr, rms_mix_rate,
+ version, protect, crepe_hop_length, f0_file=None, f0_min=50, f0_max=1100):
+
+ try:
+ if file_index == "":
+ print("File index was empty.")
+ index = None
+ big_npy = None
+ else:
+ if os.path.exists(file_index):
+ sys.stdout.write(f"Attempting to load {file_index}....\n")
+ sys.stdout.flush()
+ else:
+ sys.stdout.write(f"Attempting to load {file_index}.... (despite it not existing)\n")
+ sys.stdout.flush()
+ index = faiss.read_index(file_index)
+ big_npy = index.reconstruct_n(0, index.ntotal)
+ except Exception:
+ print("Could not open Faiss index file for reading.")
+ index = None
+ big_npy = None
+
+ audio = signal.filtfilt(bh, ah, audio)
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
+ opt_ts = []
+
+ if audio_pad.shape[0] > self.t_max:
+ audio_sum = np.zeros_like(audio)
+ for i in range(self.window):
+ audio_sum += audio_pad[i : i - self.window]
+
+ for t in range(self.t_center, audio.shape[0], self.t_center):
+ abs_audio_sum = np.abs(audio_sum[t - self.t_query : t + self.t_query])
+ min_abs_audio_sum = abs_audio_sum.min()
+ opt_ts.append(t - self.t_query + np.where(abs_audio_sum == min_abs_audio_sum)[0][0])
+
+ s = 0
+ audio_opt = []
+ t = None
+ t1 = ttime()
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
+ p_len = audio_pad.shape[0] // self.window
+ inp_f0 = None
+
+ if f0_file is not None:
+ try:
+ with open(f0_file.name, "r") as f:
+ inp_f0 = np.array([list(map(float, line.split(","))) for line in f.read().strip("\n").split("\n")], dtype="float32")
+ except:
+ traceback.print_exc()
+
+ sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
+ pitch, pitchf = None, None
+
+ if if_f0:
+ pitch, pitchf = self.get_f0(
+ input_audio_path, audio_pad, p_len, f0_up_key, f0_method,
+ filter_radius, crepe_hop_length, inp_f0, f0_min, f0_max)
+
+ pitch = pitch[:p_len].astype(np.int64 if self.device != 'mps' else np.float32)
+ pitchf = pitchf[:p_len].astype(np.float32)
+ pitch = torch.from_numpy(pitch).to(self.device).unsqueeze(0)
+ pitchf = torch.from_numpy(pitchf).to(self.device).unsqueeze(0)
+
+ t2 = ttime()
+ times[1] += t2 - t1
+
+ with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar:
+ for i, t in enumerate(opt_ts):
+ t = t // self.window * self.window
+ start = s
+ end = t + self.t_pad2 + self.window
+ audio_slice = audio_pad[start:end]
+ pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None
+ pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None
+ audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
+ s = t
+ pbar.update(1)
+ pbar.refresh()
+
+ audio_slice = audio_pad[t:]
+ pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch
+ pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf
+ audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
+
+ audio_opt = np.concatenate(audio_opt)
+ if rms_mix_rate != 1:
+ audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
+ if resample_sr >= 16000 and tgt_sr != resample_sr:
+ audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr)
+
+ max_int16 = 32768
+ audio_max = max(np.abs(audio_opt).max() / 0.99, 1)
+ audio_opt = (audio_opt * max_int16 / audio_max).astype(np.int16)
+
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+
+ print("Returning completed audio...")
+ print("-------------------")
+
+ output_folder = "audio-outputs"
+ output_filename = "generated_audio_{}.wav"
+ output_count = 1
+ while True:
+ current_output_path = os.path.join(output_folder, output_filename.format(output_count))
+ if not os.path.exists(current_output_path):
+ break
+ output_count += 1
+
+ # Guardar el audio generado como archivo WAV
+ wavfile.write(current_output_path, tgt_sr, audio_opt)
+
+ print(f"Generated audio saved to: {current_output_path}")
+
+ return audio_opt
diff --git a/venv.sh b/venv.sh
new file mode 100644
index 000000000..17f58bf2a
--- /dev/null
+++ b/venv.sh
@@ -0,0 +1 @@
+python3 -m venv .venv
diff --git a/weights/.gitignore b/weights/.gitignore
new file mode 100644
index 000000000..d6b7ef32c
--- /dev/null
+++ b/weights/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore