diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 4e622170b..000000000 --- a/.gitignore +++ /dev/null @@ -1,42 +0,0 @@ -.DS_Store -__pycache__ -/TEMP -/DATASETS -/RUNTIME -*.pyd -hubert_base.pt -.venv -alexforkINSTALL.bat -Changelog_CN.md -Changelog_EN.md -Changelog_KO.md -difdep.py -EasierGUI.py -envfilescheck.bat -export_onnx.py -export_onnx_old.py -ffmpeg.exe -ffprobe.exe -Fixes/Launch_Tensorboard.bat -Fixes/LOCAL_CREPE_FIX.bat -Fixes/local_fixes.py -Fixes/tensor-launch.py -gui.py -infer-web — backup.py -infer-webbackup.py -install_easy_dependencies.py -install_easyGUI.bat -installstft.bat -Launch_Tensorboard.bat -listdepend.bat -LOCAL_CREPE_FIX.bat -local_fixes.py -oldinfer.py -onnx_inference_demo.py -Praat.exe -requirementsNEW.txt -rmvpe.pt -run_easiergui.bat -tensor-launch.py -values1.json -使用需遵守的协议-LICENSE.txt \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 49f62d5f9..000000000 --- a/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -# syntax=docker/dockerfile:1 - -FROM python:3.10-bullseye - -EXPOSE 7865 - -WORKDIR /app - -COPY . . - -RUN pip3 install -r requirements.txt - -CMD ["python3", "infer-web.py"] \ No newline at end of file diff --git a/Fixes/local_fixes.py b/Fixes/local_fixes.py deleted file mode 100644 index e29d06efc..000000000 --- a/Fixes/local_fixes.py +++ /dev/null @@ -1,144 +0,0 @@ -import os -import sys -import time -import shutil -import requests -import zipfile - -file_name2 = 'go-web.bat' -text_to_insert2 = """python infer-web.py --pycmd runtime\python.exe --port 7897 -pause""" - -with open(file_name2, 'w') as archivo: - archivo.write(text_to_insert2) -print(f"Se ha modificado el contenido de '{file_name2}'.") - -def insert_new_line(file_name, line_to_find, text_to_insert): - lines = [] - with open(file_name, 'r', encoding='utf-8') as read_obj: - lines = read_obj.readlines() - already_exists = False - with open(file_name + '.tmp', 'w', encoding='utf-8') as write_obj: - for i in range(len(lines)): - write_obj.write(lines[i]) - if lines[i].strip() == line_to_find: - # If next line exists and starts with sys.path.append, skip - if i+1 < len(lines) and lines[i+1].strip().startswith("sys.path.append"): - print('¡Ya estaba arreglado! Se salta añadir una línea...') - already_exists = True - break - else: - write_obj.write(text_to_insert + '\n') - # If no existing sys.path.append line was found, replace the original file - if not already_exists: - os.replace(file_name + '.tmp', file_name) - return True - else: - # If existing line was found, delete temporary file - os.remove(file_name + '.tmp') - return False - -def replace_in_file(file_name, old_text, new_text): - with open(file_name, 'r', encoding='utf-8') as file: - file_contents = file.read() - - if old_text in file_contents: - file_contents = file_contents.replace(old_text, new_text) - with open(file_name, 'w', encoding='utf-8') as file: - file.write(file_contents) - return True - - return False - -if __name__ == "__main__": - current_path = os.getcwd() - file_name = 'extract_f0_print.py' - line_to_find = 'import numpy as np, logging' - text_to_insert = "sys.path.append(r'" + current_path + "')" - - - success_1 = insert_new_line(file_name, line_to_find, text_to_insert) - if success_1: - print('¡La primera operación fue un éxito!') - else: - print('¡Se saltó la primera operación porque ya estaba arreglada!') - - file_name = 'infer-web.py' - old_text = 'with gr.Blocks(theme=gr.themes.Soft()) as app:' - new_text = 'with gr.Blocks() as app:' - - success_2 = replace_in_file(file_name, old_text, new_text) - if success_2: - print('¡La segunda operación fue un éxito!') - else: - print('¡La segunda operación se omitió porque ya estaba arreglada!') - - print('¡Correcciones locales exitosas! Ahora debería poder inferir y entrenar localmente en Applio RVC Fork.') - - time.sleep(5) - -def find_torchcrepe_directory(directory): - """ - Busca recursivamente la carpeta de mayor jerarquía denominada 'torchcrepe' dentro de un directorio. - Devuelve la ruta del directorio encontrado o Ninguno si no se encuentra. - """ - for root, dirs, files in os.walk(directory): - if 'torchcrepe' in dirs: - return os.path.join(root, 'torchcrepe') - return None - -def download_and_extract_torchcrepe(): - url = 'https://github.com/maxrmorrison/torchcrepe/archive/refs/heads/master.zip' - temp_dir = 'temp_torchcrepe' - destination_dir = os.getcwd() - - try: - torchcrepe_dir_path = os.path.join(destination_dir, 'torchcrepe') - - if os.path.exists(torchcrepe_dir_path): - print("Saltando la descarga de torchcrepe. La carpeta ya existe.") - return - - # Download the file - print("Iniciando la descarga de torchcrepe...") - response = requests.get(url) - - # Raise an error if the GET request was unsuccessful - response.raise_for_status() - print("Descarga finalizada.") - - # Save the downloaded file - zip_file_path = os.path.join(temp_dir, 'master.zip') - os.makedirs(temp_dir, exist_ok=True) - with open(zip_file_path, 'wb') as file: - file.write(response.content) - print(f"Archivo zip guardado en {zip_file_path}") - - # Extract the zip file - print("Extrayendo contenidos...") - with zipfile.ZipFile(zip_file_path, 'r') as zip_file: - zip_file.extractall(temp_dir) - print("Extracción finalizada.") - - # Locate the torchcrepe folder and move it to the destination directory - torchcrepe_dir = find_torchcrepe_directory(temp_dir) - if torchcrepe_dir: - shutil.move(torchcrepe_dir, destination_dir) - print(f"Se movió el directorio torchcrepe a {destination_dir}!") - else: - print("No se pudo localizar el directorio de torchcrepe.") - - except Exception as e: - print("Torchcrepe no descargado con éxito?", e) - - # Clean up temporary directory - if os.path.exists(temp_dir): - shutil.rmtree(temp_dir) - -# Run the function -download_and_extract_torchcrepe() - -temp_dir = 'temp_torchcrepe' - -if os.path.exists(temp_dir): - shutil.rmtree(temp_dir) diff --git a/Fixes/tensor-launch.py b/Fixes/tensor-launch.py deleted file mode 100644 index 23f6107f8..000000000 --- a/Fixes/tensor-launch.py +++ /dev/null @@ -1,15 +0,0 @@ -import threading -import time -from tensorboard import program -import os - -log_path = "logs" - -if __name__ == "__main__": - tb = program.TensorBoard() - tb.configure(argv=[None, '--logdir', log_path]) - url = tb.launch() - print(f'Tensorboard can be accessed at: {url}') - - while True: - time.sleep(600) # Keep the main thread running \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 2b8d8ce41..000000000 --- a/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -MIT License - -Copyright (c) 2023 liujing04 -Copyright (c) 2023 源文雨 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/LazyImport.py b/LazyImport.py deleted file mode 100644 index 5bdb05ddd..000000000 --- a/LazyImport.py +++ /dev/null @@ -1,13 +0,0 @@ -from importlib.util import find_spec, LazyLoader, module_from_spec -from sys import modules - -def lazyload(name): - if name in modules: - return modules[name] - else: - spec = find_spec(name) - loader = LazyLoader(spec.loader) - module = module_from_spec(spec) - modules[name] = module - loader.exec_module(module) - return module \ No newline at end of file diff --git a/MDXNet.py b/MDXNet.py deleted file mode 100644 index 9b7eb4384..000000000 --- a/MDXNet.py +++ /dev/null @@ -1,272 +0,0 @@ -import soundfile as sf -import torch, pdb, os, warnings, librosa -import numpy as np -import onnxruntime as ort -from tqdm import tqdm -import torch - -dim_c = 4 - - -class Conv_TDF_net_trim: - def __init__( - self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024 - ): - super(Conv_TDF_net_trim, self).__init__() - - self.dim_f = dim_f - self.dim_t = 2**dim_t - self.n_fft = n_fft - self.hop = hop - self.n_bins = self.n_fft // 2 + 1 - self.chunk_size = hop * (self.dim_t - 1) - self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to( - device - ) - self.target_name = target_name - self.blender = "blender" in model_name - - out_c = dim_c * 4 if target_name == "*" else dim_c - self.freq_pad = torch.zeros( - [1, out_c, self.n_bins - self.dim_f, self.dim_t] - ).to(device) - - self.n = L // 2 - - def stft(self, x): - x = x.reshape([-1, self.chunk_size]) - x = torch.stft( - x, - n_fft=self.n_fft, - hop_length=self.hop, - window=self.window, - center=True, - return_complex=True, - ) - x = torch.view_as_real(x) - x = x.permute([0, 3, 1, 2]) - x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape( - [-1, dim_c, self.n_bins, self.dim_t] - ) - return x[:, :, : self.dim_f] - - def istft(self, x, freq_pad=None): - freq_pad = ( - self.freq_pad.repeat([x.shape[0], 1, 1, 1]) - if freq_pad is None - else freq_pad - ) - x = torch.cat([x, freq_pad], -2) - c = 4 * 2 if self.target_name == "*" else 2 - x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape( - [-1, 2, self.n_bins, self.dim_t] - ) - x = x.permute([0, 2, 3, 1]) - x = x.contiguous() - x = torch.view_as_complex(x) - x = torch.istft( - x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True - ) - return x.reshape([-1, c, self.chunk_size]) - - -def get_models(device, dim_f, dim_t, n_fft): - return Conv_TDF_net_trim( - device=device, - model_name="Conv-TDF", - target_name="vocals", - L=11, - dim_f=dim_f, - dim_t=dim_t, - n_fft=n_fft, - ) - - -warnings.filterwarnings("ignore") -cpu = torch.device("cpu") -if torch.cuda.is_available(): - device = torch.device("cuda:0") -elif torch.backends.mps.is_available(): - device = torch.device("mps") -else: - device = torch.device("cpu") - - -class Predictor: - def __init__(self, args): - self.args = args - self.model_ = get_models( - device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft - ) - self.model = ort.InferenceSession( - os.path.join(args.onnx, self.model_.target_name + ".onnx"), - providers=["CUDAExecutionProvider", "CPUExecutionProvider"], - ) - print("onnx load done") - - def demix(self, mix): - samples = mix.shape[-1] - margin = self.args.margin - chunk_size = self.args.chunks * 44100 - assert not margin == 0, "margin cannot be zero!" - if margin > chunk_size: - margin = chunk_size - - segmented_mix = {} - - if self.args.chunks == 0 or samples < chunk_size: - chunk_size = samples - - counter = -1 - for skip in range(0, samples, chunk_size): - counter += 1 - - s_margin = 0 if counter == 0 else margin - end = min(skip + chunk_size + margin, samples) - - start = skip - s_margin - - segmented_mix[skip] = mix[:, start:end].copy() - if end == samples: - break - - sources = self.demix_base(segmented_mix, margin_size=margin) - """ - mix:(2,big_sample) - segmented_mix:offset->(2,small_sample) - sources:(1,2,big_sample) - """ - return sources - - def demix_base(self, mixes, margin_size): - chunked_sources = [] - progress_bar = tqdm(total=len(mixes)) - progress_bar.set_description("Processing") - for mix in mixes: - cmix = mixes[mix] - sources = [] - n_sample = cmix.shape[1] - model = self.model_ - trim = model.n_fft // 2 - gen_size = model.chunk_size - 2 * trim - pad = gen_size - n_sample % gen_size - mix_p = np.concatenate( - (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1 - ) - mix_waves = [] - i = 0 - while i < n_sample + pad: - waves = np.array(mix_p[:, i : i + model.chunk_size]) - mix_waves.append(waves) - i += gen_size - mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu) - with torch.no_grad(): - _ort = self.model - spek = model.stft(mix_waves) - if self.args.denoise: - spec_pred = ( - -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5 - + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5 - ) - tar_waves = model.istft(torch.tensor(spec_pred)) - else: - tar_waves = model.istft( - torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0]) - ) - tar_signal = ( - tar_waves[:, :, trim:-trim] - .transpose(0, 1) - .reshape(2, -1) - .numpy()[:, :-pad] - ) - - start = 0 if mix == 0 else margin_size - end = None if mix == list(mixes.keys())[::-1][0] else -margin_size - if margin_size == 0: - end = None - sources.append(tar_signal[:, start:end]) - - progress_bar.update(1) - - chunked_sources.append(sources) - _sources = np.concatenate(chunked_sources, axis=-1) - # del self.model - progress_bar.close() - return _sources - - def prediction(self, m, vocal_root, others_root, format): - os.makedirs(vocal_root, exist_ok=True) - os.makedirs(others_root, exist_ok=True) - basename = os.path.basename(m) - mix, rate = librosa.load(m, mono=False, sr=44100) - if mix.ndim == 1: - mix = np.asfortranarray([mix, mix]) - mix = mix.T - sources = self.demix(mix.T) - opt = sources[0].T - if format in ["wav", "flac"]: - sf.write( - "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate - ) - sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate) - else: - path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename) - path_other = "%s/%s_others.wav" % (others_root, basename) - sf.write(path_vocal, mix - opt, rate) - sf.write(path_other, opt, rate) - if os.path.exists(path_vocal): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_vocal, path_vocal[:-4] + ".%s" % format) - ) - if os.path.exists(path_other): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path_other, path_other[:-4] + ".%s" % format) - ) - - -class MDXNetDereverb: - def __init__(self, chunks): - self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy" - self.shifts = 10 #'Predict with randomised equivariant stabilisation' - self.mixing = "min_mag" # ['default','min_mag','max_mag'] - self.chunks = chunks - self.margin = 44100 - self.dim_t = 9 - self.dim_f = 3072 - self.n_fft = 6144 - self.denoise = True - self.pred = Predictor(self) - - def _path_audio_(self, input, vocal_root, others_root, format): - self.pred.prediction(input, vocal_root, others_root, format) - - -if __name__ == "__main__": - dereverb = MDXNetDereverb(15) - from time import time as ttime - - t0 = ttime() - dereverb._path_audio_( - "雪雪伴奏对消HP5.wav", - "vocal", - "others", - ) - t1 = ttime() - print(t1 - t0) - - -""" - -runtime\python.exe MDXNet.py - -6G: -15/9:0.8G->6.8G -14:0.8G->6.5G -25:炸 - -half15:0.7G->6.6G,22.69s -fp32-15:0.7G->6.6G,20.85s - -""" diff --git "a/MIT\345\215\217\350\256\256\346\232\250\347\233\270\345\205\263\345\274\225\347\224\250\345\272\223\345\215\217\350\256\256" "b/MIT\345\215\217\350\256\256\346\232\250\347\233\270\345\205\263\345\274\225\347\224\250\345\272\223\345\215\217\350\256\256" deleted file mode 100644 index dbb6c6d51..000000000 --- "a/MIT\345\215\217\350\256\256\346\232\250\347\233\270\345\205\263\345\274\225\347\224\250\345\272\223\345\215\217\350\256\256" +++ /dev/null @@ -1,45 +0,0 @@ -本软件及其相关代码以MIT协议开源,作者不对软件具备任何控制力,使用软件者、传播软件导出的声音者自负全责。 -如不认可该条款,则不能使用或引用软件包内任何代码和文件。 - -特此授予任何获得本软件和相关文档文件(以下简称“软件”)副本的人免费使用、复制、修改、合并、出版、分发、再授权和/或销售本软件的权利,以及授予本软件所提供的人使用本软件的权利,但须符合以下条件: -上述版权声明和本许可声明应包含在软件的所有副本或实质部分中。 -软件是“按原样”提供的,没有任何明示或暗示的保证,包括但不限于适销性、适用于特定目的和不侵权的保证。在任何情况下,作者或版权持有人均不承担因软件或软件的使用或其他交易而产生、产生或与之相关的任何索赔、损害赔偿或其他责任,无论是在合同诉讼、侵权诉讼还是其他诉讼中。 - - -The LICENCEs for related libraries are as follows. -相关引用库协议如下: - -ContentVec -https://github.com/auspicious3000/contentvec/blob/main/LICENSE -MIT License - -VITS -https://github.com/jaywalnut310/vits/blob/main/LICENSE -MIT License - -HIFIGAN -https://github.com/jik876/hifi-gan/blob/master/LICENSE -MIT License - -gradio -https://github.com/gradio-app/gradio/blob/main/LICENSE -Apache License 2.0 - -ffmpeg -https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3 -https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2021-02-28-12-32/ffmpeg-n4.3.2-160-gfbb9368226-win64-lgpl-4.3.zip -LPGLv3 License -MIT License - -ultimatevocalremovergui -https://github.com/Anjok07/ultimatevocalremovergui/blob/master/LICENSE -https://github.com/yang123qwe/vocal_separation_by_uvr5 -MIT License - -audio-slicer -https://github.com/openvpi/audio-slicer/blob/main/LICENSE -MIT License - -PySimpleGUI -https://github.com/PySimpleGUI/PySimpleGUI/blob/master/license.txt -LPGLv3 License diff --git a/Makefile b/Makefile deleted file mode 100644 index 44de020e6..000000000 --- a/Makefile +++ /dev/null @@ -1,63 +0,0 @@ -.PHONY: -.ONESHELL: - -help: ## Show this help and exit - @grep -hE '^[A-Za-z0-9_ \-]*?:.*##.*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -install: ## Install dependencies (Do everytime you start up a paperspace machine) - apt-get -y install build-essential python3-dev ffmpeg - pip install --upgrade setuptools wheel - pip install --upgrade pip - pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.1 - pip install -r requirements.txt - pip install --upgrade lxml - apt-get update - apt -y install -qq aria2 - -basev1: ## Download version 1 pre-trained models (Do only once after cloning the fork) - mkdir -p pretrained uvr5_weights - git pull - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d pretrained -o D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d pretrained -o D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d pretrained -o D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d pretrained -o G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d pretrained -o G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d pretrained -o G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d pretrained -o f0D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d pretrained -o f0D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d pretrained -o f0D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d pretrained -o f0G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d pretrained -o f0G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d pretrained -o f0G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt - -basev2: ## Download version 2 pre-trained models (Do only once after cloning the fork) - mkdir -p pretrained_v2 uvr5_weights - git pull - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D32k.pth -d pretrained_v2 -o D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d pretrained_v2 -o D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D48k.pth -d pretrained_v2 -o D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G32k.pth -d pretrained_v2 -o G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d pretrained_v2 -o G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G48k.pth -d pretrained_v2 -o G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D32k.pth -d pretrained_v2 -o f0D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d pretrained_v2 -o f0D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D48k.pth -d pretrained_v2 -o f0D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G32k.pth -d pretrained_v2 -o f0G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d pretrained_v2 -o f0G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G48k.pth -d pretrained_v2 -o f0G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt - -run-ui: ## Run the python GUI - python infer-web.py --paperspace --pycmd python - -run-cli: ## Run the python CLI - python infer-web.py --pycmd python --is_cli - -tensorboard: ## Start the tensorboard (Run on separate terminal) - echo https://tensorboard-$$(hostname).clg07azjl.paperspacegradient.com - tensorboard --logdir logs --bind_all \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index 6c16d3c96..000000000 --- a/README.md +++ /dev/null @@ -1,53 +0,0 @@ -
-

🍏 Applio (Mangio-RVC-Fork)

-

- Applio is a user-friendly fork of Mangio, designed to provide an intuitive interface, especially for newcomers. -

-
- -
-

Links

-
- -
- Repository -
- External Code -
- Google Colab -
- -
-

Credits

-
- -
- RVC -
- Mangio-RVC-Fork -
- ContentVec -
- VITS -
- HIFIGAN -
- Gradio -
- FFmpeg -
- Ultimate Vocal Remover -
- audio-slicer -
- -
-

Contributors

-
- -
- - - - -
diff --git a/Retrieval_based_Voice_Conversion_WebUI.ipynb b/Retrieval_based_Voice_Conversion_WebUI.ipynb deleted file mode 100644 index 4890daf77..000000000 --- a/Retrieval_based_Voice_Conversion_WebUI.ipynb +++ /dev/null @@ -1,381 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "private_outputs": true, - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU", - "gpuClass": "standard" - }, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb)" - ], - "metadata": { - "id": "ZFFCx5J80SGa" - } - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GmFP6bN9dvOq" - }, - "outputs": [], - "source": [ - "#@title 查看显卡\n", - "!nvidia-smi" - ] - }, - { - "cell_type": "code", - "source": [ - "#@title 安装依赖\n", - "!apt-get -y install build-essential python3-dev ffmpeg\n", - "!pip3 install --upgrade setuptools wheel\n", - "!pip3 install --upgrade pip\n", - "!pip3 install faiss-cpu==1.7.2 fairseq gradio==3.14.0 ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2" - ], - "metadata": { - "id": "wjddIFr1oS3W" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 克隆仓库\n", - "\n", - "!git clone --depth=1 -b stable https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI\n", - "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n", - "!mkdir -p pretrained uvr5_weights" - ], - "metadata": { - "id": "ge_97mfpgqTm" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 更新仓库(一般无需执行)\n", - "!git pull" - ], - "metadata": { - "id": "BLDEZADkvlw1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 安装aria2\n", - "!apt -y install -qq aria2" - ], - "metadata": { - "id": "pqE0PrnuRqI2" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 下载底模\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D48k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G48k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D48k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G48k.pth" - ], - "metadata": { - "id": "UG3XpUwEomUz" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 下载人声分离模型\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth" - ], - "metadata": { - "id": "HugjmZqZRuiF" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 下载hubert_base\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o hubert_base.pt" - ], - "metadata": { - "id": "2RCaT9FTR0ej" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 挂载谷歌云盘\n", - "\n", - "from google.colab import drive\n", - "drive.mount('/content/drive')" - ], - "metadata": { - "id": "jwu07JgqoFON" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 从谷歌云盘加载打包好的数据集到/content/dataset\n", - "\n", - "#@markdown 数据集位置\n", - "DATASET = \"/content/drive/MyDrive/dataset/lulu20230327_32k.zip\" #@param {type:\"string\"}\n", - "\n", - "!mkdir -p /content/dataset\n", - "!unzip -d /content/dataset -B {DATASET}" - ], - "metadata": { - "id": "Mwk7Q0Loqzjx" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 重命名数据集中的重名文件\n", - "!ls -a /content/dataset/\n", - "!rename 's/(\\w+)\\.(\\w+)~(\\d*)/$1_$3.$2/' /content/dataset/*.*~*" - ], - "metadata": { - "id": "PDlFxWHWEynD" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 启动web\n", - "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n", - "# %load_ext tensorboard\n", - "# %tensorboard --logdir /content/Retrieval-based-Voice-Conversion-WebUI/logs\n", - "!python3 infer-web.py --colab --pycmd python3" - ], - "metadata": { - "id": "7vh6vphDwO0b" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 手动将训练后的模型文件备份到谷歌云盘\n", - "#@markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n", - "\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 模型epoch\n", - "MODELEPOCH = 9600 #@param {type:\"integer\"}\n", - "\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/added_*.index /content/drive/MyDrive/\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/total_*.npy /content/drive/MyDrive/\n", - "\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth" - ], - "metadata": { - "id": "FgJuNeAwx5Y_" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 从谷歌云盘恢复pth\n", - "#@markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n", - "\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 模型epoch\n", - "MODELEPOCH = 7500 #@param {type:\"integer\"}\n", - "\n", - "!mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n", - "\n", - "!cp /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n", - "!cp /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n", - "!cp /content/drive/MyDrive/*.index /content/\n", - "!cp /content/drive/MyDrive/*.npy /content/\n", - "!cp /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth" - ], - "metadata": { - "id": "OVQoLQJXS7WX" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 手动预处理(不推荐)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 采样率\n", - "BITRATE = 48000 #@param {type:\"integer\"}\n", - "#@markdown 使用的进程数\n", - "THREADCOUNT = 8 #@param {type:\"integer\"}\n", - "\n", - "!python3 trainset_preprocess_pipeline_print.py /content/dataset {BITRATE} {THREADCOUNT} logs/{MODELNAME} True\n" - ], - "metadata": { - "id": "ZKAyuKb9J6dz" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 手动提取特征(不推荐)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 使用的进程数\n", - "THREADCOUNT = 8 #@param {type:\"integer\"}\n", - "#@markdown 音高提取算法\n", - "ALGO = \"harvest\" #@param {type:\"string\"}\n", - "\n", - "!python3 extract_f0_print.py logs/{MODELNAME} {THREADCOUNT} {ALGO}\n", - "\n", - "!python3 extract_feature_print.py cpu 1 0 0 logs/{MODELNAME}\n" - ], - "metadata": { - "id": "CrxJqzAUKmPJ" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 手动训练(不推荐)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 使用的GPU\n", - "USEGPU = \"0\" #@param {type:\"string\"}\n", - "#@markdown 批大小\n", - "BATCHSIZE = 32 #@param {type:\"integer\"}\n", - "#@markdown 停止的epoch\n", - "MODELEPOCH = 3200 #@param {type:\"integer\"}\n", - "#@markdown 保存epoch间隔\n", - "EPOCHSAVE = 100 #@param {type:\"integer\"}\n", - "#@markdown 采样率\n", - "MODELSAMPLE = \"48k\" #@param {type:\"string\"}\n", - "#@markdown 是否缓存训练集\n", - "CACHEDATA = 1 #@param {type:\"integer\"}\n", - "#@markdown 是否仅保存最新的ckpt文件\n", - "ONLYLATEST = 0 #@param {type:\"integer\"}\n", - "\n", - "!python3 train_nsf_sim_cache_sid_load_pretrain.py -e lulu -sr {MODELSAMPLE} -f0 1 -bs {BATCHSIZE} -g {USEGPU} -te {MODELEPOCH} -se {EPOCHSAVE} -pg pretrained/f0G{MODELSAMPLE}.pth -pd pretrained/f0D{MODELSAMPLE}.pth -l {ONLYLATEST} -c {CACHEDATA}\n" - ], - "metadata": { - "id": "IMLPLKOaKj58" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 删除其它pth,只留选中的(慎点,仔细看代码)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 选中模型epoch\n", - "MODELEPOCH = 9600 #@param {type:\"integer\"}\n", - "\n", - "!echo \"备份选中的模型。。。\"\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n", - "\n", - "!echo \"正在删除。。。\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n", - "!rm /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*.pth\n", - "\n", - "!echo \"恢复选中的模型。。。\"\n", - "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth \n", - "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n", - "\n", - "!echo \"删除完成\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}" - ], - "metadata": { - "id": "haYA81hySuDl" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 清除项目下所有文件,只留选中的模型(慎点,仔细看代码)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 选中模型epoch\n", - "MODELEPOCH = 9600 #@param {type:\"integer\"}\n", - "\n", - "!echo \"备份选中的模型。。。\"\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n", - "\n", - "!echo \"正在删除。。。\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n", - "!rm -rf /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*\n", - "\n", - "!echo \"恢复选中的模型。。。\"\n", - "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth \n", - "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n", - "\n", - "!echo \"删除完成\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}" - ], - "metadata": { - "id": "QhSiPTVPoIRh" - }, - "execution_count": null, - "outputs": [] - } - ] -} diff --git a/Retrieval_based_Voice_Conversion_WebUI_v2.ipynb b/Retrieval_based_Voice_Conversion_WebUI_v2.ipynb deleted file mode 100644 index 9fad92cb5..000000000 --- a/Retrieval_based_Voice_Conversion_WebUI_v2.ipynb +++ /dev/null @@ -1,401 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "ZFFCx5J80SGa" - }, - "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI_v2.ipynb)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GmFP6bN9dvOq" - }, - "outputs": [], - "source": [ - "#@title 查看显卡\n", - "!nvidia-smi" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wjddIFr1oS3W" - }, - "outputs": [], - "source": [ - "#@title 安装依赖\n", - "!apt-get -y install build-essential python3-dev ffmpeg\n", - "!pip3 install --upgrade setuptools wheel\n", - "!pip3 install --upgrade pip\n", - "!pip3 install faiss-cpu==1.7.2 fairseq gradio==3.14.0 ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ge_97mfpgqTm" - }, - "outputs": [], - "source": [ - "#@title 克隆仓库\n", - "\n", - "!mkdir Retrieval-based-Voice-Conversion-WebUI\n", - "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n", - "!git init\n", - "!git remote add origin https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git\n", - "!git fetch origin cfd984812804ddc9247d65b14c82cd32e56c1133 --depth=1 \n", - "!git reset --hard FETCH_HEAD" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "BLDEZADkvlw1" - }, - "outputs": [], - "source": [ - "#@title 更新仓库(一般无需执行)\n", - "!git pull" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "pqE0PrnuRqI2" - }, - "outputs": [], - "source": [ - "#@title 安装aria2\n", - "!apt -y install -qq aria2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "UG3XpUwEomUz" - }, - "outputs": [], - "source": [ - "#@title 下载底模\n", - "\n", - "# v1\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D48k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G48k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D48k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G40k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G48k.pth\n", - "\n", - "#v2\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D40k.pth\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D48k.pth\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G40k.pth\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G48k.pth\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D40k.pth\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D48k.pth\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G32k.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G40k.pth\n", - "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G48k.pth" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "HugjmZqZRuiF" - }, - "outputs": [], - "source": [ - "#@title 下载人声分离模型\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "2RCaT9FTR0ej" - }, - "outputs": [], - "source": [ - "#@title 下载hubert_base\n", - "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o hubert_base.pt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "jwu07JgqoFON" - }, - "outputs": [], - "source": [ - "#@title 挂载谷歌云盘\n", - "\n", - "from google.colab import drive\n", - "drive.mount('/content/drive')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Mwk7Q0Loqzjx" - }, - "outputs": [], - "source": [ - "#@title 从谷歌云盘加载打包好的数据集到/content/dataset\n", - "\n", - "#@markdown 数据集位置\n", - "DATASET = \"/content/drive/MyDrive/dataset/lulu20230327_32k.zip\" #@param {type:\"string\"}\n", - "\n", - "!mkdir -p /content/dataset\n", - "!unzip -d /content/dataset -B {DATASET}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "PDlFxWHWEynD" - }, - "outputs": [], - "source": [ - "#@title 重命名数据集中的重名文件\n", - "!ls -a /content/dataset/\n", - "!rename 's/(\\w+)\\.(\\w+)~(\\d*)/$1_$3.$2/' /content/dataset/*.*~*" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7vh6vphDwO0b" - }, - "outputs": [], - "source": [ - "#@title 启动web\n", - "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n", - "# %load_ext tensorboard\n", - "# %tensorboard --logdir /content/Retrieval-based-Voice-Conversion-WebUI/logs\n", - "!python3 infer-web.py --colab --pycmd python3" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "FgJuNeAwx5Y_" - }, - "outputs": [], - "source": [ - "#@title 手动将训练后的模型文件备份到谷歌云盘\n", - "#@markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n", - "\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 模型epoch\n", - "MODELEPOCH = 9600 #@param {type:\"integer\"}\n", - "\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/added_*.index /content/drive/MyDrive/\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/total_*.npy /content/drive/MyDrive/\n", - "\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "OVQoLQJXS7WX" - }, - "outputs": [], - "source": [ - "#@title 从谷歌云盘恢复pth\n", - "#@markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n", - "\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 模型epoch\n", - "MODELEPOCH = 7500 #@param {type:\"integer\"}\n", - "\n", - "!mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n", - "\n", - "!cp /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n", - "!cp /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n", - "!cp /content/drive/MyDrive/*.index /content/\n", - "!cp /content/drive/MyDrive/*.npy /content/\n", - "!cp /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ZKAyuKb9J6dz" - }, - "outputs": [], - "source": [ - "#@title 手动预处理(不推荐)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 采样率\n", - "BITRATE = 48000 #@param {type:\"integer\"}\n", - "#@markdown 使用的进程数\n", - "THREADCOUNT = 8 #@param {type:\"integer\"}\n", - "\n", - "!python3 trainset_preprocess_pipeline_print.py /content/dataset {BITRATE} {THREADCOUNT} logs/{MODELNAME} True\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "CrxJqzAUKmPJ" - }, - "outputs": [], - "source": [ - "#@title 手动提取特征(不推荐)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 使用的进程数\n", - "THREADCOUNT = 8 #@param {type:\"integer\"}\n", - "#@markdown 音高提取算法\n", - "ALGO = \"harvest\" #@param {type:\"string\"}\n", - "\n", - "!python3 extract_f0_print.py logs/{MODELNAME} {THREADCOUNT} {ALGO}\n", - "\n", - "!python3 extract_feature_print.py cpu 1 0 0 logs/{MODELNAME}\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "IMLPLKOaKj58" - }, - "outputs": [], - "source": [ - "#@title 手动训练(不推荐)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 使用的GPU\n", - "USEGPU = \"0\" #@param {type:\"string\"}\n", - "#@markdown 批大小\n", - "BATCHSIZE = 32 #@param {type:\"integer\"}\n", - "#@markdown 停止的epoch\n", - "MODELEPOCH = 3200 #@param {type:\"integer\"}\n", - "#@markdown 保存epoch间隔\n", - "EPOCHSAVE = 100 #@param {type:\"integer\"}\n", - "#@markdown 采样率\n", - "MODELSAMPLE = \"48k\" #@param {type:\"string\"}\n", - "#@markdown 是否缓存训练集\n", - "CACHEDATA = 1 #@param {type:\"integer\"}\n", - "#@markdown 是否仅保存最新的ckpt文件\n", - "ONLYLATEST = 0 #@param {type:\"integer\"}\n", - "\n", - "!python3 train_nsf_sim_cache_sid_load_pretrain.py -e lulu -sr {MODELSAMPLE} -f0 1 -bs {BATCHSIZE} -g {USEGPU} -te {MODELEPOCH} -se {EPOCHSAVE} -pg pretrained/f0G{MODELSAMPLE}.pth -pd pretrained/f0D{MODELSAMPLE}.pth -l {ONLYLATEST} -c {CACHEDATA}\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "haYA81hySuDl" - }, - "outputs": [], - "source": [ - "#@title 删除其它pth,只留选中的(慎点,仔细看代码)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 选中模型epoch\n", - "MODELEPOCH = 9600 #@param {type:\"integer\"}\n", - "\n", - "!echo \"备份选中的模型。。。\"\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n", - "\n", - "!echo \"正在删除。。。\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n", - "!rm /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*.pth\n", - "\n", - "!echo \"恢复选中的模型。。。\"\n", - "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth \n", - "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n", - "\n", - "!echo \"删除完成\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "QhSiPTVPoIRh" - }, - "outputs": [], - "source": [ - "#@title 清除项目下所有文件,只留选中的模型(慎点,仔细看代码)\n", - "#@markdown 模型名\n", - "MODELNAME = \"lulu\" #@param {type:\"string\"}\n", - "#@markdown 选中模型epoch\n", - "MODELEPOCH = 9600 #@param {type:\"integer\"}\n", - "\n", - "!echo \"备份选中的模型。。。\"\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n", - "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n", - "\n", - "!echo \"正在删除。。。\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n", - "!rm -rf /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*\n", - "\n", - "!echo \"恢复选中的模型。。。\"\n", - "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth \n", - "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n", - "\n", - "!echo \"删除完成\"\n", - "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "private_outputs": true, - "provenance": [] - }, - "gpuClass": "standard", - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/app.py b/app.py deleted file mode 100644 index d8264b895..000000000 --- a/app.py +++ /dev/null @@ -1,319 +0,0 @@ -import os -import torch - -# os.system("wget -P cvec/ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt") -import gradio as gr -import librosa -import numpy as np -import logging -from fairseq import checkpoint_utils -from vc_infer_pipeline import VC -import traceback -from config import Config -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from i18n import I18nAuto - -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -i18n = I18nAuto() -i18n.print() - -config = Config() - -weight_root = "weights" -weight_uvr5_root = "uvr5_weights" -index_root = "logs" -names = [] -hubert_model = None -for name in os.listdir(weight_root): - if name.endswith(".pth"): - names.append(name) -index_paths = [] -for root, dirs, files in os.walk(index_root, topdown=False): - for name in files: - if name.endswith(".index") and "trained" not in name: - index_paths.append("%s/%s" % (root, name)) - - -def get_vc(sid): - global n_spk, tgt_sr, net_g, vc, cpt, version - if sid == "" or sid == []: - global hubert_model - if hubert_model != None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 - print("clean_empty_cache") - del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt - hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del net_g, cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - cpt = None - return {"visible": False, "__type__": "update"} - person = "%s/%s" % (weight_root, sid) - print("loading %s" % person) - cpt = torch.load(person, map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - n_spk = cpt["config"][-3] - return {"visible": True, "maximum": n_spk, "__type__": "update"} - - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - - -def vc_single( - sid, - input_audio_path, - f0_up_key, - f0_file, - f0_method, - file_index, - file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, -): # spk_item, input_audio0, vc_transform0,f0_file,f0method0 - global tgt_sr, net_g, vc, hubert_model, version - if input_audio_path is None: - return "You need to upload an audio", None - f0_up_key = int(f0_up_key) - try: - audio = input_audio_path[1] / 32768.0 - if len(audio.shape) == 2: - audio = np.mean(audio, -1) - audio = librosa.resample(audio, orig_sr=input_audio_path[0], target_sr=16000) - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - if hubert_model == None: - load_hubert() - if_f0 = cpt.get("f0", 1) - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - if file_index != "" - else file_index2 - ) # 防止小白写错,自动帮他替换掉 - # file_big_npy = ( - # file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - # ) - audio_opt = vc.pipeline( - hubert_model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=f0_file, - ) - if resample_sr >= 16000 and tgt_sr != resample_sr: - tgt_sr = resample_sr - index_info = ( - "Using index:%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss" % ( - index_info, - times[0], - times[1], - times[2], - ), (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - - -app = gr.Blocks() -with app: - with gr.Tabs(): - with gr.TabItem("在线demo"): - gr.Markdown( - value=""" - RVC 在线demo - """ - ) - sid = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names)) - with gr.Column(): - spk_item = gr.Slider( - minimum=0, - maximum=2333, - step=1, - label=i18n("请选择说话人id"), - value=0, - visible=False, - interactive=True, - ) - sid.change( - fn=get_vc, - inputs=[sid], - outputs=[spk_item], - ) - gr.Markdown( - value=i18n("男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ") - ) - vc_input3 = gr.Audio(label="上传音频(长度小于90秒)") - vc_transform0 = gr.Number(label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0) - f0method0 = gr.Radio( - label=i18n("选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU"), - choices=["pm", "harvest", "crepe"], - value="pm", - interactive=True, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - with gr.Column(): - file_index1 = gr.Textbox( - label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), - value="", - interactive=False, - visible=False, - ) - file_index2 = gr.Dropdown( - label=i18n("自动检测index路径,下拉式选择(dropdown)"), - choices=sorted(index_paths), - interactive=True, - ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=0.88, - interactive=True, - ) - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("后处理重采样至最终采样率,0为不进行重采样"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), - value=1, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n("保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"), - value=0.33, - step=0.01, - interactive=True, - ) - f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调")) - but0 = gr.Button(i18n("转换"), variant="primary") - vc_output1 = gr.Textbox(label=i18n("输出信息")) - vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) - but0.click( - vc_single, - [ - spk_item, - vc_input3, - vc_transform0, - f0_file, - f0method0, - file_index1, - file_index2, - # file_big_npy1, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - ], - [vc_output1, vc_output2], - ) - - -app.launch() diff --git a/audio-outputs/readme.txt b/audio-outputs/readme.txt deleted file mode 100644 index 516cf6fa3..000000000 --- a/audio-outputs/readme.txt +++ /dev/null @@ -1 +0,0 @@ -All audio output files if inferred with the CLI will save here. (Mangio-RVC-Fork Feature) \ No newline at end of file diff --git a/audios/.gitignore b/audios/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/avoid-shutdown.ipynb b/avoid-shutdown.ipynb deleted file mode 100644 index 1190b3d60..000000000 --- a/avoid-shutdown.ipynb +++ /dev/null @@ -1,60 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Mangio-RVC-Fork" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prevent Interactivity Automatic Shutdown" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import time\n", - "time.sleep(1000 * 60 * 60 * 24)" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "provenance": [ - { - "file_id": "https://github.com/34j/so-vits-svc-fork/blob/feat%2Fmain-feat/so-vits-svc-fork-4.0.ipynb", - "timestamp": 1678970434570 - } - ] - }, - "gpuClass": "standard", - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/config.py b/config.py deleted file mode 100644 index f1144b187..000000000 --- a/config.py +++ /dev/null @@ -1,213 +0,0 @@ -import argparse -import sys -import torch -import json -from multiprocessing import cpu_count - -global usefp16 -usefp16 = False - -def decide_fp_config(): - global usefp16 - usefp16 = False - device_capability = 0 - if torch.cuda.is_available(): - device = torch.device("cuda:0") # Assuming you have only one GPU (index 0). - device_capability = torch.cuda.get_device_capability(device)[0] - if device_capability >= 7: - usefp16 = True - for config_file in ["32k.json", "40k.json", "48k.json"]: - with open(f"configs/{config_file}", "r") as d: - data = json.load(d) - - if "train" in data and "fp16_run" in data["train"]: - data["train"]["fp16_run"] = True - - with open(f"configs/{config_file}", "w") as d: - json.dump(data, d, indent=4) - - - - with open( - "trainset_preprocess_pipeline_print.py", "r", encoding="utf-8" - ) as f: - strr = f.read() - - strr = strr.replace("3.0", "3.7") - - with open( - "trainset_preprocess_pipeline_print.py", "w", encoding="utf-8" - ) as f: - f.write(strr) - else: - for config_file in ["32k.json", "40k.json", "48k.json"]: - with open(f"configs/{config_file}", "r") as f: - data = json.load(f) - - if "train" in data and "fp16_run" in data["train"]: - data["train"]["fp16_run"] = False - - with open(f"configs/{config_file}", "w") as d: - json.dump(data, d, indent=4) - - print(f"Set fp16_run to false in {config_file}") - - with open( - "trainset_preprocess_pipeline_print.py", "r", encoding="utf-8" - ) as f: - strr = f.read() - - strr = strr.replace("3.7", "3.0") - - with open( - "trainset_preprocess_pipeline_print.py", "w", encoding="utf-8" - ) as f: - f.write(strr) - else: - print( - "CUDA is not available. Make sure you have an NVIDIA GPU and CUDA installed." - ) - return (usefp16, device_capability) - -class Config: - def __init__(self): - self.device = "cuda:0" - self.is_half = True - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - ( - self.python_cmd, - self.listen_port, - self.iscolab, - self.noparallel, - self.noautoopen, - self.paperspace, - self.is_cli, - self.grtheme, - ) = self.arg_parse() - - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - @staticmethod - def arg_parse() -> tuple: - exe = sys.executable or "python" - parser = argparse.ArgumentParser() - parser.add_argument("--port", type=int, default=7865, help="Listen port") - parser.add_argument("--pycmd", type=str, default=exe, help="Python command") - parser.add_argument("--colab", action="store_true", help="Launch in colab") - parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" - ) - parser.add_argument( - "--noautoopen", - action="store_true", - help="Do not open in browser automatically", - ) - parser.add_argument( # Fork Feature. Paperspace integration for web UI - "--paperspace", - action="store_true", - help="Note that this argument just shares a gradio link for the web UI. Thus can be used on other non-local CLI systems.", - ) - parser.add_argument( # Fork Feature. Embed a CLI into the infer-web.py - "--is_cli", - action="store_true", - help="Use the CLI instead of setting up a gradio UI. This flag will launch an RVC text interface where you can execute functions from infer-web.py!", - ) - - parser.add_argument( - "-t", - "--theme", - help = "Theme for Gradio. Format - `JohnSmith9982/small_and_pretty` (no backticks)", - default = "gradio/soft", - type = str - ) - - cmd_opts = parser.parse_args() - - cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865 - - return ( - cmd_opts.pycmd, - cmd_opts.port, - cmd_opts.colab, - cmd_opts.noparallel, - cmd_opts.noautoopen, - cmd_opts.paperspace, - cmd_opts.is_cli, - cmd_opts.theme, - ) - - # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. - # check `getattr` and try it for compatibility - @staticmethod - def has_mps() -> bool: - if not torch.backends.mps.is_available(): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - # or "1080" in self.gpu_name / This is commented out because fp16 apparently runs fast on 1080 - ): - print("Found GPU", self.gpu_name, ", force to fp32") - self.is_half = False - else: - decide_fp_config() - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - if self.gpu_mem <= 4: - with open("trainset_preprocess_pipeline_print.py", "r") as f: - strr = f.read().replace("3.7", "3.0") - with open("trainset_preprocess_pipeline_print.py", "w") as f: - f.write(strr) - elif self.has_mps(): - print("No supported Nvidia GPU found, using MPS instead") - self.device = "mps" - self.is_half = False - decide_fp_config() - else: - print("No supported Nvidia GPU found, using CPU instead") - self.device = "cpu" - self.is_half = False - decide_fp_config() - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max diff --git a/configs/32k.json b/configs/32k.json deleted file mode 100644 index 400b6be80..000000000 --- a/configs/32k.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "train": { - "log_interval": 200, - "seed": 1234, - "epochs": 20000, - "learning_rate": 1e-4, - "betas": [0.8, 0.99], - "eps": 1e-9, - "batch_size": 4, - "fp16_run": false, - "lr_decay": 0.999875, - "segment_size": 12800, - "init_lr_ratio": 1, - "warmup_epochs": 0, - "c_mel": 45, - "c_kl": 1.0 - }, - "data": { - "max_wav_value": 32768.0, - "sampling_rate": 32000, - "filter_length": 1024, - "hop_length": 320, - "win_length": 1024, - "n_mel_channels": 80, - "mel_fmin": 0.0, - "mel_fmax": null - }, - "model": { - "inter_channels": 192, - "hidden_channels": 192, - "filter_channels": 768, - "n_heads": 2, - "n_layers": 6, - "kernel_size": 3, - "p_dropout": 0, - "resblock": "1", - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - "upsample_rates": [10,4,2,2,2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16,16,4,4,4], - "use_spectral_norm": false, - "gin_channels": 256, - "spk_embed_dim": 109 - } -} diff --git a/configs/32k_v2.json b/configs/32k_v2.json deleted file mode 100644 index 70e534f4c..000000000 --- a/configs/32k_v2.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "train": { - "log_interval": 200, - "seed": 1234, - "epochs": 20000, - "learning_rate": 1e-4, - "betas": [0.8, 0.99], - "eps": 1e-9, - "batch_size": 4, - "fp16_run": true, - "lr_decay": 0.999875, - "segment_size": 12800, - "init_lr_ratio": 1, - "warmup_epochs": 0, - "c_mel": 45, - "c_kl": 1.0 - }, - "data": { - "max_wav_value": 32768.0, - "sampling_rate": 32000, - "filter_length": 1024, - "hop_length": 320, - "win_length": 1024, - "n_mel_channels": 80, - "mel_fmin": 0.0, - "mel_fmax": null - }, - "model": { - "inter_channels": 192, - "hidden_channels": 192, - "filter_channels": 768, - "n_heads": 2, - "n_layers": 6, - "kernel_size": 3, - "p_dropout": 0, - "resblock": "1", - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - "upsample_rates": [10,8,2,2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [20,16,4,4], - "use_spectral_norm": false, - "gin_channels": 256, - "spk_embed_dim": 109 - } -} diff --git a/configs/40k.json b/configs/40k.json deleted file mode 100644 index cb30b8be4..000000000 --- a/configs/40k.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "train": { - "log_interval": 200, - "seed": 1234, - "epochs": 20000, - "learning_rate": 1e-4, - "betas": [0.8, 0.99], - "eps": 1e-9, - "batch_size": 4, - "fp16_run": false, - "lr_decay": 0.999875, - "segment_size": 12800, - "init_lr_ratio": 1, - "warmup_epochs": 0, - "c_mel": 45, - "c_kl": 1.0 - }, - "data": { - "max_wav_value": 32768.0, - "sampling_rate": 40000, - "filter_length": 2048, - "hop_length": 400, - "win_length": 2048, - "n_mel_channels": 125, - "mel_fmin": 0.0, - "mel_fmax": null - }, - "model": { - "inter_channels": 192, - "hidden_channels": 192, - "filter_channels": 768, - "n_heads": 2, - "n_layers": 6, - "kernel_size": 3, - "p_dropout": 0, - "resblock": "1", - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - "upsample_rates": [10,10,2,2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16,16,4,4], - "use_spectral_norm": false, - "gin_channels": 256, - "spk_embed_dim": 109 - } -} diff --git a/configs/48k.json b/configs/48k.json deleted file mode 100644 index 687599100..000000000 --- a/configs/48k.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "train": { - "log_interval": 200, - "seed": 1234, - "epochs": 20000, - "learning_rate": 1e-4, - "betas": [0.8, 0.99], - "eps": 1e-9, - "batch_size": 4, - "fp16_run": false, - "lr_decay": 0.999875, - "segment_size": 11520, - "init_lr_ratio": 1, - "warmup_epochs": 0, - "c_mel": 45, - "c_kl": 1.0 - }, - "data": { - "max_wav_value": 32768.0, - "sampling_rate": 48000, - "filter_length": 2048, - "hop_length": 480, - "win_length": 2048, - "n_mel_channels": 128, - "mel_fmin": 0.0, - "mel_fmax": null - }, - "model": { - "inter_channels": 192, - "hidden_channels": 192, - "filter_channels": 768, - "n_heads": 2, - "n_layers": 6, - "kernel_size": 3, - "p_dropout": 0, - "resblock": "1", - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - "upsample_rates": [10,6,2,2,2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16,16,4,4,4], - "use_spectral_norm": false, - "gin_channels": 256, - "spk_embed_dim": 109 - } -} diff --git a/configs/48k_v2.json b/configs/48k_v2.json deleted file mode 100644 index 75f770cda..000000000 --- a/configs/48k_v2.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "train": { - "log_interval": 200, - "seed": 1234, - "epochs": 20000, - "learning_rate": 1e-4, - "betas": [0.8, 0.99], - "eps": 1e-9, - "batch_size": 4, - "fp16_run": true, - "lr_decay": 0.999875, - "segment_size": 17280, - "init_lr_ratio": 1, - "warmup_epochs": 0, - "c_mel": 45, - "c_kl": 1.0 - }, - "data": { - "max_wav_value": 32768.0, - "sampling_rate": 48000, - "filter_length": 2048, - "hop_length": 480, - "win_length": 2048, - "n_mel_channels": 128, - "mel_fmin": 0.0, - "mel_fmax": null - }, - "model": { - "inter_channels": 192, - "hidden_channels": 192, - "filter_channels": 768, - "n_heads": 2, - "n_layers": 6, - "kernel_size": 3, - "p_dropout": 0, - "resblock": "1", - "resblock_kernel_sizes": [3,7,11], - "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], - "upsample_rates": [12,10,2,2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [24,20,4,4], - "use_spectral_norm": false, - "gin_channels": 256, - "spk_embed_dim": 109 - } -} diff --git a/csvdb/stop.csv b/csvdb/stop.csv deleted file mode 100644 index bc59c12aa..000000000 --- a/csvdb/stop.csv +++ /dev/null @@ -1 +0,0 @@ -False diff --git a/docs/README.en.md b/docs/README.en.md deleted file mode 100644 index 40b357e1e..000000000 --- a/docs/README.en.md +++ /dev/null @@ -1,110 +0,0 @@ -
- -

Retrieval-based-Voice-Conversion-WebUI

-An easy-to-use Voice Conversion framework based on VITS.

- -[![madewithlove](https://forthebadge.com/images/badges/built-with-love.svg)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) - -
- -[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb) -[![Licence](https://img.shields.io/github/license/RVC-Project/Retrieval-based-Voice-Conversion-WebUI?style=for-the-badge)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/) - -[![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk) - -
- ------- -[**Changelog**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/Changelog_EN.md) | [**FAQ (Frequently Asked Questions)**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/FAQ-(Frequently-Asked-Questions)) - -[**English**](./README.en.md) | [**中文简体**](../README.md) | [**日本語**](./README.ja.md) | [**한국어**](./README.ko.md) ([**韓國語**](./README.ko.han.md)) - - -Check our [Demo Video](https://www.bilibili.com/video/BV1pm4y1z7Gm/) here! - -Realtime Voice Conversion Software using RVC : [w-okada/voice-changer](https://github.com/w-okada/voice-changer) - -> A online demo using RVC that convert Vocal to Acoustic Guitar audio:https://huggingface.co/spaces/lj1995/vocal2guitar - -> Vocal2Guitar demo video:https://www.bilibili.com/video/BV19W4y1D7tT/ - -> The dataset for the pre-training model uses nearly 50 hours of high quality VCTK open source dataset. - -> High quality licensed song datasets will be added to training-set one after another for your use, without worrying about copyright infringement. - -## Summary -This repository has the following features: -+ Reduce tone leakage by replacing source feature to training-set feature using top1 retrieval; -+ Easy and fast training, even on relatively poor graphics cards; -+ Training with a small amount of data also obtains relatively good results (>=10min low noise speech recommended); -+ Supporting model fusion to change timbres (using ckpt processing tab->ckpt merge); -+ Easy-to-use Webui interface; -+ Use the UVR5 model to quickly separate vocals and instruments. -## Preparing the environment -We recommend you install the dependencies through poetry. - -The following commands need to be executed in the environment of Python version 3.8 or higher: -```bash -# Install PyTorch-related core dependencies, skip if installed -# Reference: https://pytorch.org/get-started/locally/ -pip install torch torchvision torchaudio - -#For Windows + Nvidia Ampere Architecture(RTX30xx), you need to specify the cuda version corresponding to pytorch according to the experience of https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/issues/21 -#pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 - -# Install the Poetry dependency management tool, skip if installed -# Reference: https://python-poetry.org/docs/#installation -curl -sSL https://install.python-poetry.org | python3 - - -# Install the project dependencies -poetry install -``` -You can also use pip to install the dependencies - -```bash -pip install -r requirements.txt -``` - -## Preparation of other Pre-models -RVC requires other pre-models to infer and train. - -You need to download them from our [Huggingface space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/). - -Here's a list of Pre-models and other files that RVC needs: -```bash -hubert_base.pt - -./pretrained - -./uvr5_weights - -If you want to test the v2 version model (the v2 version model has changed the input from the 256 dimensional feature of 9-layer Hubert+final_proj to the 768 dimensional feature of 12-layer Hubert, and has added 3 period discriminators), you will need to download additional features - -./pretrained_v2 - -#If you are using Windows, you may also need this dictionary, skip if FFmpeg is installed -ffmpeg.exe -``` -Then use this command to start Webui: -```bash -python infer-web.py -``` -If you are using Windows, you can download and extract `RVC-beta.7z` to use RVC directly and use `go-web.bat` to start Webui. - -There's also a tutorial on RVC in Chinese and you can check it out if needed. - -## Credits -+ [ContentVec](https://github.com/auspicious3000/contentvec/) -+ [VITS](https://github.com/jaywalnut310/vits) -+ [HIFIGAN](https://github.com/jik876/hifi-gan) -+ [Gradio](https://github.com/gradio-app/gradio) -+ [FFmpeg](https://github.com/FFmpeg/FFmpeg) -+ [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui) -+ [audio-slicer](https://github.com/openvpi/audio-slicer) -## Thanks to all contributors for their efforts - - - - - diff --git a/docs/README.ja.md b/docs/README.ja.md deleted file mode 100644 index 26ce3af19..000000000 --- a/docs/README.ja.md +++ /dev/null @@ -1,104 +0,0 @@ -
- -

Retrieval-based-Voice-Conversion-WebUI

-VITSに基づく使いやすい音声変換(voice changer)framework

- -[![madewithlove](https://forthebadge.com/images/badges/built-with-love.svg)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) - -
- -[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb) -[![Licence](https://img.shields.io/github/license/RVC-Project/Retrieval-based-Voice-Conversion-WebUI?style=for-the-badge)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/) - -[![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk) - -
- ------- - -[**更新日誌**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/Changelog_CN.md) - -[**English**](./README.en.md) | [**中文简体**](../README.md) | [**日本語**](./README.ja.md) | [**한국어**](./README.ko.md) ([**韓國語**](./README.ko.han.md)) - -> デモ動画は[こちら](https://www.bilibili.com/video/BV1pm4y1z7Gm/)でご覧ください。 - -> RVCによるリアルタイム音声変換: [w-okada/voice-changer](https://github.com/w-okada/voice-changer) - -> 著作権侵害を心配することなく使用できるように、基底モデルは約50時間の高品質なオープンソースデータセットで訓練されています。 - -> 今後も、次々と使用許可のある高品質な歌声の資料集を追加し、基底モデルを訓練する予定です。 - -## はじめに -本リポジトリには下記の特徴があります。 - -+ Top1検索を用いることで、生の特徴量を訓練用データセット特徴量に変換し、トーンリーケージを削減します。 -+ 比較的貧弱なGPUでも、高速かつ簡単に訓練できます。 -+ 少量のデータセットからでも、比較的良い結果を得ることができます。(10分以上のノイズの少ない音声を推奨します。) -+ モデルを融合することで、音声を混ぜることができます。(ckpt processingタブの、ckpt mergeを使用します。) -+ 使いやすいWebUI。 -+ UVR5 Modelも含んでいるため、人の声とBGMを素早く分離できます。 - -## 環境構築 -Poetryで依存関係をインストールすることをお勧めします。 - -下記のコマンドは、Python3.8以上の環境で実行する必要があります: -```bash -# PyTorch関連の依存関係をインストール。インストール済の場合は省略。 -# 参照先: https://pytorch.org/get-started/locally/ -pip install torch torchvision torchaudio - -#Windows+ Nvidia Ampere Architecture(RTX30xx)の場合、 #21 に従い、pytorchに対応するcuda versionを指定する必要があります。 -#pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 - -# PyTorch関連の依存関係をインストール。インストール済の場合は省略。 -# 参照先: https://python-poetry.org/docs/#installation -curl -sSL https://install.python-poetry.org | python3 - - -# Poetry経由で依存関係をインストール -poetry install -``` - -pipでも依存関係のインストールが可能です: - -```bash -pip install -r requirements.txt -``` - -## 基底modelsを準備 -RVCは推論/訓練のために、様々な事前訓練を行った基底モデルを必要とします。 - -modelsは[Hugging Face space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)からダウンロードできます。 - -以下は、RVCに必要な基底モデルやその他のファイルの一覧です。 -```bash -hubert_base.pt - -./pretrained - -./uvr5_weights - -# ffmpegがすでにinstallされている場合は省略 -./ffmpeg -``` -その後、下記のコマンドでWebUIを起動します。 -```bash -python infer-web.py -``` -Windowsをお使いの方は、直接`RVC-beta.7z`をダウンロード後に展開し、`go-web.bat`をクリックすることで、WebUIを起動することができます。(7zipが必要です。) - -また、リポジトリに[小白简易教程.doc](./小白简易教程.doc)がありますので、参考にしてください(中国語版のみ)。 - -## 参考プロジェクト -+ [ContentVec](https://github.com/auspicious3000/contentvec/) -+ [VITS](https://github.com/jaywalnut310/vits) -+ [HIFIGAN](https://github.com/jik876/hifi-gan) -+ [Gradio](https://github.com/gradio-app/gradio) -+ [FFmpeg](https://github.com/FFmpeg/FFmpeg) -+ [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui) -+ [audio-slicer](https://github.com/openvpi/audio-slicer) - -## 貢献者(contributor)の皆様の尽力に感謝します - - - diff --git a/docs/README.ko.han.md b/docs/README.ko.han.md deleted file mode 100644 index cac9d70c4..000000000 --- a/docs/README.ko.han.md +++ /dev/null @@ -1,100 +0,0 @@ -
- -

Retrieval-based-Voice-Conversion-WebUI

-VITS基盤의 簡單하고使用하기 쉬운音聲變換틀

- -[![madewithlove](https://forthebadge.com/images/badges/built-with-love.svg)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) - -
- -[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb) -[![Licence](https://img.shields.io/github/license/RVC-Project/Retrieval-based-Voice-Conversion-WebUI?style=for-the-badge)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/) - -[![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk) - -
- ------- -[**更新日誌**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/Changelog_KO.md) - -[**English**](./README.en.md) | [**中文简体**](../README.md) | [**日本語**](./README.ja.md) | [**한국어**](./README.ko.md) ([**韓國語**](./README.ko.han.md)) - -> [示範映像](https://www.bilibili.com/video/BV1pm4y1z7Gm/)을 確認해 보세요! - -> RVC를活用한實時間音聲變換: [w-okada/voice-changer](https://github.com/w-okada/voice-changer) - -> 基本모델은 50時間假量의 高品質 오픈 소스 VCTK 데이터셋을 使用하였으므로, 著作權上의 念慮가 없으니 安心하고 使用하시기 바랍니다. - -> 著作權問題가 없는 高品質의 노래를 以後에도 繼續해서 訓練할 豫定입니다. - -## 紹介 -本Repo는 다음과 같은 特徵을 가지고 있습니다: -+ top1檢索을利用하여 入力音色特徵을 訓練세트音色特徵으로 代替하여 音色의漏出을 防止; -+ 相對的으로 낮은性能의 GPU에서도 빠른訓練可能; -+ 적은量의 데이터로 訓練해도 좋은 結果를 얻을 수 있음 (最小10分以上의 低雜음音聲데이터를 使用하는 것을 勸獎); -+ 모델融合을通한 音色의 變調可能 (ckpt處理탭->ckpt混合選擇); -+ 使用하기 쉬운 WebUI (웹 使用者인터페이스); -+ UVR5 모델을 利用하여 목소리와 背景音樂의 빠른 分離; - -## 環境의準備 -poetry를通해 依存를設置하는 것을 勸獎합니다. - -다음命令은 Python 버전3.8以上의環境에서 實行되어야 합니다: -```bash -# PyTorch 關聯主要依存設置, 이미設置되어 있는 境遇 건너뛰기 可能 -# 參照: https://pytorch.org/get-started/locally/ -pip install torch torchvision torchaudio - -# Windows + Nvidia Ampere Architecture(RTX30xx)를 使用하고 있다面, #21 에서 명시된 것과 같이 PyTorch에 맞는 CUDA 버전을 指定해야 합니다. -#pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 - -# Poetry 設置, 이미設置되어 있는 境遇 건너뛰기 可能 -# Reference: https://python-poetry.org/docs/#installation -curl -sSL https://install.python-poetry.org | python3 - - -# 依存設置 -poetry install -``` -pip를 活用하여依存를 設置하여도 無妨합니다. - -```bash -pip install -r requirements.txt -``` - -## 其他預備모델準備 -RVC 모델은 推論과訓練을 依하여 다른 預備모델이 必要합니다. - -[Huggingface space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)를 通해서 다운로드 할 수 있습니다. - -다음은 RVC에 必要한 預備모델 및 其他 파일 目錄입니다: -```bash -hubert_base.pt - -./pretrained - -./uvr5_weights - -# Windows를 使用하는境遇 이 사전도 必要할 수 있습니다. FFmpeg가 設置되어 있으면 건너뛰어도 됩니다. -ffmpeg.exe -``` -그後 以下의 命令을 使用하여 WebUI를 始作할 수 있습니다: -```bash -python infer-web.py -``` -Windows를 使用하는境遇 `RVC-beta.7z`를 다운로드 및 壓縮解除하여 RVC를 直接使用하거나 `go-web.bat`을 使用하여 WebUi를 直接할 수 있습니다. - -## 參考 -+ [ContentVec](https://github.com/auspicious3000/contentvec/) -+ [VITS](https://github.com/jaywalnut310/vits) -+ [HIFIGAN](https://github.com/jik876/hifi-gan) -+ [Gradio](https://github.com/gradio-app/gradio) -+ [FFmpeg](https://github.com/FFmpeg/FFmpeg) -+ [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui) -+ [audio-slicer](https://github.com/openvpi/audio-slicer) -## 모든寄與者분들의勞力에感謝드립니다 - - - - - diff --git a/docs/README.ko.md b/docs/README.ko.md deleted file mode 100644 index abea8e6a2..000000000 --- a/docs/README.ko.md +++ /dev/null @@ -1,112 +0,0 @@ -
- -

Retrieval-based-Voice-Conversion-WebUI

-VITS 기반의 간단하고 사용하기 쉬운 음성 변환 프레임워크.

- -[![madewithlove](https://forthebadge.com/images/badges/built-with-love.svg)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) - -
- -[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb) -[![Licence](https://img.shields.io/github/license/RVC-Project/Retrieval-based-Voice-Conversion-WebUI?style=for-the-badge)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/) - -[![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk) - -
- ---- - -[**업데이트 로그**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/Changelog_KO.md) - -[**English**](./README.en.md) | [**中文简体**](../README.md) | [**日本語**](./README.ja.md) | [**한국어**](./README.ko.md) ([**韓國語**](./README.ko.han.md)) - -> [데모 영상](https://www.bilibili.com/video/BV1pm4y1z7Gm/)을 확인해 보세요! - -> RVC를 활용한 실시간 음성변환: [w-okada/voice-changer](https://github.com/w-okada/voice-changer) - -> 기본 모델은 50시간 가량의 고퀄리티 오픈 소스 VCTK 데이터셋을 사용하였으므로, 저작권상의 염려가 없으니 안심하고 사용하시기 바랍니다. - -> 저작권 문제가 없는 고퀄리티의 노래를 이후에도 계속해서 훈련할 예정입니다. - -## 소개 - -본 Repo는 다음과 같은 특징을 가지고 있습니다: - -- top1 검색을 이용하여 입력 음색 특징을 훈련 세트 음색 특징으로 대체하여 음색의 누출을 방지; -- 상대적으로 낮은 성능의 GPU에서도 빠른 훈련 가능; -- 적은 양의 데이터로 훈련해도 좋은 결과를 얻을 수 있음 (최소 10분 이상의 저잡음 음성 데이터를 사용하는 것을 권장); -- 모델 융합을 통한 음색의 변조 가능 (ckpt 처리 탭->ckpt 병합 선택); -- 사용하기 쉬운 WebUI (웹 인터페이스); -- UVR5 모델을 이용하여 목소리와 배경음악의 빠른 분리; - -## 환경의 준비 - -poetry를 통해 dependecies를 설치하는 것을 권장합니다. - -다음 명령은 Python 버전 3.8 이상의 환경에서 실행되어야 합니다: - -```bash -# PyTorch 관련 주요 dependencies 설치, 이미 설치되어 있는 경우 건너뛰기 가능 -# 참조: https://pytorch.org/get-started/locally/ -pip install torch torchvision torchaudio - -# Windows + Nvidia Ampere Architecture(RTX30xx)를 사용하고 있다면, https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/issues/21 에서 명시된 것과 같이 PyTorch에 맞는 CUDA 버전을 지정해야 합니다. -#pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 - -# Poetry 설치, 이미 설치되어 있는 경우 건너뛰기 가능 -# Reference: https://python-poetry.org/docs/#installation -curl -sSL https://install.python-poetry.org | python3 - - -# Dependecies 설치 -poetry install -``` - -pip를 활용하여 dependencies를 설치하여도 무방합니다. - -```bash -pip install -r requirements.txt -``` - -## 기타 사전 모델 준비 - -RVC 모델은 추론과 훈련을 위하여 다른 사전 모델이 필요합니다. - -[Huggingface space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)를 통해서 다운로드 할 수 있습니다. - -다음은 RVC에 필요한 사전 모델 및 기타 파일 목록입니다: - -```bash -hubert_base.pt - -./pretrained - -./uvr5_weights - -# Windows를 사용하는 경우 이 사전도 필요할 수 있습니다. FFmpeg가 설치되어 있으면 건너뛰어도 됩니다. -ffmpeg.exe -``` - -그 후 이하의 명령을 사용하여 WebUI를 시작할 수 있습니다: - -```bash -python infer-web.py -``` - -Windows를 사용하는 경우 `RVC-beta.7z`를 다운로드 및 압축 해제하여 RVC를 직접 사용하거나 `go-web.bat`을 사용하여 WebUi를 시작할 수 있습니다. - -## 참고 - -- [ContentVec](https://github.com/auspicious3000/contentvec/) -- [VITS](https://github.com/jaywalnut310/vits) -- [HIFIGAN](https://github.com/jik876/hifi-gan) -- [Gradio](https://github.com/gradio-app/gradio) -- [FFmpeg](https://github.com/FFmpeg/FFmpeg) -- [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui) -- [audio-slicer](https://github.com/openvpi/audio-slicer) - -## 모든 기여자 분들의 노력에 감사드립니다. - - - - diff --git a/docs/faiss_tips_en.md b/docs/faiss_tips_en.md deleted file mode 100644 index aafad6ed6..000000000 --- a/docs/faiss_tips_en.md +++ /dev/null @@ -1,102 +0,0 @@ -faiss tuning TIPS -================== -# about faiss -faiss is a library of neighborhood searches for dense vectors, developed by facebook research, which efficiently implements many approximate neighborhood search methods. -Approximate Neighbor Search finds similar vectors quickly while sacrificing some accuracy. - -## faiss in RVC -In RVC, for the embedding of features converted by HuBERT, we search for embeddings similar to the embedding generated from the training data and mix them to achieve a conversion that is closer to the original speech. However, since this search takes time if performed naively, high-speed conversion is realized by using approximate neighborhood search. - -# implementation overview -In '/logs/your-experiment/3_feature256' where the model is located, features extracted by HuBERT from each voice data are located. -From here we read the npy files in order sorted by filename and concatenate the vectors to create big_npy. (This vector has shape [N, 256].) -After saving big_npy as /logs/your-experiment/total_fea.npy, train it with faiss. - -In this article, I will explain the meaning of these parameters. - -# Explanation of the method -## index factory -An index factory is a unique faiss notation that expresses a pipeline that connects multiple approximate neighborhood search methods as a string. -This allows you to try various approximate neighborhood search methods simply by changing the index factory string. -In RVC it is used like this: - -```python -index = faiss.index_factory(256, "IVF%s,Flat" % n_ivf) -``` -Among the arguments of index_factory, the first is the number of dimensions of the vector, the second is the index factory string, and the third is the distance to use. - -For more detailed notation -https://github.com/facebookresearch/faiss/wiki/The-index-factory - -## index for distance -There are two typical indexes used as similarity of embedding as follows. - -- Euclidean distance (METRIC_L2) -- inner product (METRIC_INNER_PRODUCT) - -Euclidean distance takes the squared difference in each dimension, sums the differences in all dimensions, and then takes the square root. This is the same as the distance in 2D and 3D that we use on a daily basis. -The inner product is not used as an index of similarity as it is, and the cosine similarity that takes the inner product after being normalized by the L2 norm is generally used. - -Which is better depends on the case, but cosine similarity is often used in embedding obtained by word2vec and similar image retrieval models learned by ArcFace. If you want to do l2 normalization on vector X with numpy, you can do it with the following code with eps small enough to avoid 0 division. - -```python -X_normed = X / np.maximum(eps, np.linalg.norm(X, ord=2, axis=-1, keepdims=True)) -``` - -Also, for the index factory, you can change the distance index used for calculation by choosing the value to pass as the third argument. - -```python -index = faiss.index_factory(dimention, text, faiss.METRIC_INNER_PRODUCT) -``` - -## IVF -IVF (Inverted file indexes) is an algorithm similar to the inverted index in full-text search. -During learning, the search target is clustered with kmeans, and Voronoi partitioning is performed using the cluster center. Each data point is assigned a cluster, so we create a dictionary that looks up the data points from the clusters. - -For example, if clusters are assigned as follows -|index|Cluster| -|-----|-------| -|1|A| -|2|B| -|3|A| -|4|C| -|5|B| - -The resulting inverted index looks like this: - -|cluster|index| -|-------|-----| -|A|1, 3| -|B|2, 5| -|C|4| - -When searching, we first search n_probe clusters from the clusters, and then calculate the distances for the data points belonging to each cluster. - -# recommend parameter -There are official guidelines on how to choose an index, so I will explain accordingly. -https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index - -For datasets below 1M, 4bit-PQ is the most efficient method available in faiss as of April 2023. -Combining this with IVF, narrowing down the candidates with 4bit-PQ, and finally recalculating the distance with an accurate index can be described by using the following index factory. - -```python -index = faiss.index_factory(256, "IVF1024,PQ128x4fs,RFlat") -``` - -## Recommended parameters for IVF -Consider the case of too many IVFs. For example, if coarse quantization by IVF is performed for the number of data, this is the same as a naive exhaustive search and is inefficient. -For 1M or less, IVF values are recommended between 4*sqrt(N) ~ 16*sqrt(N) for N number of data points. - -Since the calculation time increases in proportion to the number of n_probes, please consult with the accuracy and choose appropriately. Personally, I don't think RVC needs that much accuracy, so n_probe = 1 is fine. - -## FastScan -FastScan is a method that enables high-speed approximation of distances by Cartesian product quantization by performing them in registers. -Cartesian product quantization performs clustering independently for each d dimension (usually d = 2) during learning, calculates the distance between clusters in advance, and creates a lookup table. At the time of prediction, the distance of each dimension can be calculated in O(1) by looking at the lookup table. -So the number you specify after PQ usually specifies half the dimension of the vector. - -For a more detailed description of FastScan, please refer to the official documentation. -https://github.com/facebookresearch/faiss/wiki/Fast-accumulation-of-PQ-and-AQ-codes-(FastScan) - -## RFlat -RFlat is an instruction to recalculate the rough distance calculated by FastScan with the exact distance specified by the third argument of index factory. -When getting k neighbors, k*k_factor points are recalculated. diff --git a/docs/faiss_tips_ja.md b/docs/faiss_tips_ja.md deleted file mode 100644 index 89cf5ba56..000000000 --- a/docs/faiss_tips_ja.md +++ /dev/null @@ -1,101 +0,0 @@ -faiss tuning TIPS -================== -# about faiss -faissはfacebook researchの開発する、密なベクトルに対する近傍探索をまとめたライブラリで、多くの近似近傍探索の手法を効率的に実装しています。 -近似近傍探索はある程度精度を犠牲にしながら高速に類似するベクトルを探します。 - -## faiss in RVC -RVCではHuBERTで変換した特徴量のEmbeddingに対し、学習データから生成されたEmbeddingと類似するものを検索し、混ぜることでより元の音声に近い変換を実現しています。ただ、この検索は愚直に行うと時間がかかるため、近似近傍探索を用いることで高速な変換を実現しています。 - -# 実装のoverview -モデルが配置されている '/logs/your-experiment/3_feature256'には各音声データからHuBERTで抽出された特徴量が配置されています。 -ここからnpyファイルをファイル名でソートした順番で読み込み、ベクトルを連結してbig_npyを作成しfaissを学習させます。(このベクトルのshapeは[N, 256]です。) - -本Tipsではまずこれらのパラメータの意味を解説します。 - -# 手法の解説 -## index factory -index factoryは複数の近似近傍探索の手法を繋げるパイプラインをstringで表記するfaiss独自の記法です。 -これにより、index factoryの文字列を変更するだけで様々な近似近傍探索の手法を試せます。 -RVCでは以下のように使われています。 - -```python -index = faiss.index_factory(256, "IVF%s,Flat" % n_ivf) -``` -index_factoryの引数のうち、1つ目はベクトルの次元数、2つ目はindex factoryの文字列で、3つ目には用いる距離を指定することができます。 - -より詳細な記法については -https://github.com/facebookresearch/faiss/wiki/The-index-factory - -## 距離指標 -embeddingの類似度として用いられる代表的な指標として以下の二つがあります。 - -- ユークリッド距離(METRIC_L2) -- 内積(METRIC_INNER_PRODUCT) - -ユークリッド距離では各次元において二乗の差をとり、全次元の差を足してから平方根をとります。これは日常的に用いる2次元、3次元での距離と同じです。 -内積はこのままでは類似度の指標として用いず、一般的にはL2ノルムで正規化してから内積をとるコサイン類似度を用います。 - -どちらがよいかは場合によりますが、word2vec等で得られるembeddingやArcFace等で学習した類似画像検索のモデルではコサイン類似度が用いられることが多いです。ベクトルXに対してl2正規化をnumpyで行う場合は、0 divisionを避けるために十分に小さな値をepsとして以下のコードで可能です。 - -```python -X_normed = X / np.maximum(eps, np.linalg.norm(X, ord=2, axis=-1, keepdims=True)) -``` - -また、index factoryには第3引数に渡す値を選ぶことで計算に用いる距離指標を変更できます。 - -```python -index = faiss.index_factory(dimention, text, faiss.METRIC_INNER_PRODUCT) -``` - -## IVF -IVF(Inverted file indexes)は全文検索における転置インデックスと似たようなアルゴリズムです。 -学習時には検索対象に対してkmeansでクラスタリングを行い、クラスタ中心を用いてボロノイ分割を行います。各データ点には一つずつクラスタが割り当てられるので、クラスタからデータ点を逆引きする辞書を作成します。 - -例えば以下のようにクラスタが割り当てられた場合 -|index|クラスタ| -|-----|-------| -|1|A| -|2|B| -|3|A| -|4|C| -|5|B| - -作成される転置インデックスは以下のようになります。 - -|クラスタ|index| -|-------|-----| -|A|1, 3| -|B|2, 5| -|C|4| - -検索時にはまずクラスタからn_probe個のクラスタを検索し、次にそれぞれのクラスタに属するデータ点について距離を計算します。 - -# 推奨されるパラメータ -indexの選び方については公式にガイドラインがあるので、それに準じて説明します。 -https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index - -1M以下のデータセットにおいては4bit-PQが2023年4月時点ではfaissで利用できる最も効率的な手法です。 -これをIVFと組み合わせ、4bit-PQで候補を絞り、最後に正確な指標で距離を再計算するには以下のindex factoryを用いることで記載できます。 - -```python -index = faiss.index_factory(256, "IVF1024,PQ128x4fs,RFlat") -``` - -## IVFの推奨パラメータ -IVFの数が多すぎる場合、たとえばデータ数の数だけIVFによる粗量子化を行うと、これは愚直な全探索と同じになり効率が悪いです。 -1M以下の場合ではIVFの値はデータ点の数Nに対して4*sqrt(N) ~ 16*sqrt(N)に推奨しています。 - -n_probeはn_probeの数に比例して計算時間が増えるので、精度と相談して適切に選んでください。個人的にはRVCにおいてそこまで精度は必要ないと思うのでn_probe = 1で良いと思います。 - -## FastScan -FastScanは直積量子化で大まかに距離を近似するのを、レジスタ内で行うことにより高速に行うようにした手法です。 -直積量子化は学習時にd次元ごと(通常はd=2)に独立してクラスタリングを行い、クラスタ同士の距離を事前計算してlookup tableを作成します。予測時はlookup tableを見ることで各次元の距離をO(1)で計算できます。 -そのため、PQの次に指定する数字は通常ベクトルの半分の次元を指定します。 - -FastScanに関するより詳細な説明は公式のドキュメントを参照してください。 -https://github.com/facebookresearch/faiss/wiki/Fast-accumulation-of-PQ-and-AQ-codes-(FastScan) - -## RFlat -RFlatはFastScanで計算した大まかな距離を、index factoryの第三引数で指定した正確な距離で再計算する指示です。 -k個の近傍を取得する際は、k*k_factor個の点について再計算が行われます。 diff --git a/docs/faiss_tips_ko.md b/docs/faiss_tips_ko.md deleted file mode 100644 index ecd518ca2..000000000 --- a/docs/faiss_tips_ko.md +++ /dev/null @@ -1,132 +0,0 @@ -Facebook AI Similarity Search (Faiss) 팁 -================== -# Faiss에 대하여 -Faiss 는 Facebook Research가 개발하는, 고밀도 벡터 이웃 검색 라이브러리입니다. 근사 근접 탐색법 (Approximate Neigbor Search)은 약간의 정확성을 희생하여 유사 벡터를 고속으로 찾습니다. - -## RVC에 있어서 Faiss -RVC에서는 HuBERT로 변환한 feature의 embedding을 위해 훈련 데이터에서 생성된 embedding과 유사한 embadding을 검색하고 혼합하여 원래의 음성에 더욱 가까운 변환을 달성합니다. 그러나, 이 탐색법은 단순히 수행하면 시간이 다소 소모되므로, 근사 근접 탐색법을 통해 고속 변환을 가능케 하고 있습니다. - -# 구현 개요 -모델이 위치한 `/logs/your-experiment/3_feature256`에는 각 음성 데이터에서 HuBERT가 추출한 feature들이 있습니다. 여기에서 파일 이름별로 정렬된 npy 파일을 읽고, 벡터를 연결하여 big_npy ([N, 256] 모양의 벡터) 를 만듭니다. big_npy를 `/logs/your-experiment/total_fea.npy`로 저장한 후, Faiss로 학습시킵니다. - -2023/04/18 기준으로, Faiss의 Index Factory 기능을 이용해, L2 거리에 근거하는 IVF를 이용하고 있습니다. IVF의 분할수(n_ivf)는 N//39로, n_probe는 int(np.power(n_ivf, 0.3))가 사용되고 있습니다. (infer-web.py의 train_index 주위를 찾으십시오.) - -이 팁에서는 먼저 이러한 매개 변수의 의미를 설명하고, 개발자가 추후 더 나은 index를 작성할 수 있도록 하는 조언을 작성합니다. - -# 방법의 설명 -## Index factory -index factory는 여러 근사 근접 탐색법을 문자열로 연결하는 pipeline을 문자열로 표기하는 Faiss만의 독자적인 기법입니다. 이를 통해 index factory의 문자열을 변경하는 것만으로 다양한 근사 근접 탐색을 시도해 볼 수 있습니다. RVC에서는 다음과 같이 사용됩니다: - -```python -index = Faiss.index_factory(256, "IVF%s,Flat" % n_ivf) -``` -`index_factory`의 인수들 중 첫 번째는 벡터의 차원 수이고, 두번째는 index factory 문자열이며, 세번째에는 사용할 거리를 지정할 수 있습니다. - -기법의 보다 자세한 설명은 https://github.com/facebookresearch/Faiss/wiki/The-index-factory 를 확인해 주십시오. - -## 거리에 대한 index -embedding의 유사도로서 사용되는 대표적인 지표로서 이하의 2개가 있습니다. - -- 유클리드 거리 (METRIC_L2) -- 내적(内積) (METRIC_INNER_PRODUCT) - -유클리드 거리에서는 각 차원에서 제곱의 차를 구하고, 각 차원에서 구한 차를 모두 더한 후 제곱근을 취합니다. 이것은 일상적으로 사용되는 2차원, 3차원에서의 거리의 연산법과 같습니다. 내적은 그 값을 그대로 유사도 지표로 사용하지 않고, L2 정규화를 한 이후 내적을 취하는 코사인 유사도를 사용합니다. - -어느 쪽이 더 좋은지는 경우에 따라 다르지만, word2vec에서 얻은 embedding 및 ArcFace를 활용한 이미지 검색 모델은 코사인 유사성이 이용되는 경우가 많습니다. numpy를 사용하여 벡터 X에 대해 L2 정규화를 하고자 하는 경우, 0 division을 피하기 위해 충분히 작은 값을 eps로 한 뒤 이하에 코드를 활용하면 됩니다. - -```python -X_normed = X / np.maximum(eps, np.linalg.norm(X, ord=2, axis=-1, keepdims=True)) -``` - -또한, `index factory`의 3번째 인수에 건네주는 값을 선택하는 것을 통해 계산에 사용하는 거리 index를 변경할 수 있습니다. - -```python -index = Faiss.index_factory(dimention, text, Faiss.METRIC_INNER_PRODUCT) -``` - -## IVF -IVF (Inverted file indexes)는 역색인 탐색법과 유사한 알고리즘입니다. 학습시에는 검색 대상에 대해 k-평균 군집법을 실시하고 클러스터 중심을 이용해 보로노이 분할을 실시합니다. 각 데이터 포인트에는 클러스터가 할당되므로, 클러스터에서 데이터 포인트를 조회하는 dictionary를 만듭니다. - -예를 들어, 클러스터가 다음과 같이 할당된 경우 -|index|Cluster| -|-----|-------| -|1|A| -|2|B| -|3|A| -|4|C| -|5|B| - -IVF 이후의 결과는 다음과 같습니다: - -|cluster|index| -|-------|-----| -|A|1, 3| -|B|2, 5| -|C|4| - -탐색 시, 우선 클러스터에서 `n_probe`개의 클러스터를 탐색한 다음, 각 클러스터에 속한 데이터 포인트의 거리를 계산합니다. - -# 권장 매개변수 -index의 선택 방법에 대해서는 공식적으로 가이드 라인이 있으므로, 거기에 준해 설명합니다. -https://github.com/facebookresearch/Faiss/wiki/Guidelines-to-choose-an-index - -1M 이하의 데이터 세트에 있어서는 4bit-PQ가 2023년 4월 시점에서는 Faiss로 이용할 수 있는 가장 효율적인 수법입니다. 이것을 IVF와 조합해, 4bit-PQ로 후보를 추려내고, 마지막으로 이하의 index factory를 이용하여 정확한 지표로 거리를 재계산하면 됩니다. - -```python -index = Faiss.index_factory(256, "IVF1024,PQ128x4fs,RFlat") -``` - -## IVF 권장 매개변수 -IVF의 수가 너무 많으면, 가령 데이터 수의 수만큼 IVF로 양자화(Quantization)를 수행하면, 이것은 완전탐색과 같아져 효율이 나빠지게 됩니다. 1M 이하의 경우 IVF 값은 데이터 포인트 수 N에 대해 4sqrt(N) ~ 16sqrt(N)를 사용하는 것을 권장합니다. - -n_probe는 n_probe의 수에 비례하여 계산 시간이 늘어나므로 정확도와 시간을 적절히 균형을 맞추어 주십시오. 개인적으로 RVC에 있어서 그렇게까지 정확도는 필요 없다고 생각하기 때문에 n_probe = 1이면 된다고 생각합니다. - -## FastScan -FastScan은 직적 양자화를 레지스터에서 수행함으로써 거리의 고속 근사를 가능하게 하는 방법입니다.직적 양자화는 학습시에 d차원마다(보통 d=2)에 독립적으로 클러스터링을 실시해, 클러스터끼리의 거리를 사전 계산해 lookup table를 작성합니다. 예측시는 lookup table을 보면 각 차원의 거리를 O(1)로 계산할 수 있습니다. 따라서 PQ 다음에 지정하는 숫자는 일반적으로 벡터의 절반 차원을 지정합니다. - -FastScan에 대한 자세한 설명은 공식 문서를 참조하십시오. -https://github.com/facebookresearch/Faiss/wiki/Fast-accumulation-of-PQ-and-AQ-codes-(FastScan) - -## RFlat -RFlat은 FastScan이 계산한 대략적인 거리를 index factory의 3번째 인수로 지정한 정확한 거리로 다시 계산하라는 인스트럭션입니다. k개의 근접 변수를 가져올 때 k*k_factor개의 점에 대해 재계산이 이루어집니다. - -# Embedding 테크닉 -## Alpha 쿼리 확장 -퀴리 확장이란 탐색에서 사용되는 기술로, 예를 들어 전문 탐색 시, 입력된 검색문에 단어를 몇 개를 추가함으로써 검색 정확도를 올리는 방법입니다. 백터 탐색을 위해서도 몇가지 방법이 제안되었는데, 그 중 α-쿼리 확장은 추가 학습이 필요 없는 매우 효과적인 방법으로 알려져 있습니다. [Attention-Based Query Expansion Learning](https://arxiv.org/abs/2007.08019)와 [2nd place solution of kaggle shopee competition](https://www.kaggle.com/code/lyakaap/2nd-place-solution/notebook) 논문에서 소개된 바 있습니다.. - -α-쿼리 확장은 한 벡터에 인접한 벡터를 유사도의 α곱한 가중치로 더해주면 됩니다. 코드로 예시를 들어 보겠습니다. big_npy를 α query expansion로 대체합니다. - -```python -alpha = 3. -index = Faiss.index_factory(256, "IVF512,PQ128x4fs,RFlat") -original_norm = np.maximum(np.linalg.norm(big_npy, ord=2, axis=1, keepdims=True), 1e-9) -big_npy /= original_norm -index.train(big_npy) -index.add(big_npy) -dist, neighbor = index.search(big_npy, num_expand) - -expand_arrays = [] -ixs = np.arange(big_npy.shape[0]) -for i in range(-(-big_npy.shape[0]//batch_size)): - ix = ixs[i*batch_size:(i+1)*batch_size] - weight = np.power(np.einsum("nd,nmd->nm", big_npy[ix], big_npy[neighbor[ix]]), alpha) - expand_arrays.append(np.sum(big_npy[neighbor[ix]] * np.expand_dims(weight, axis=2),axis=1)) -big_npy = np.concatenate(expand_arrays, axis=0) - -# index version 정규화 -big_npy = big_npy / np.maximum(np.linalg.norm(big_npy, ord=2, axis=1, keepdims=True), 1e-9) -``` - -위 테크닉은 탐색을 수행하는 쿼리에도, 탐색 대상 DB에도 적응 가능한 테크닉입니다. - -## MiniBatch KMeans에 의한 embedding 압축 - -total_fea.npy가 너무 클 경우 K-means를 이용하여 벡터를 작게 만드는 것이 가능합니다. 이하 코드로 embedding의 압축이 가능합니다. n_clusters에 압축하고자 하는 크기를 지정하고 batch_size에 256 * CPU의 코어 수를 지정함으로써 CPU 병렬화의 혜택을 충분히 얻을 수 있습니다. - -```python -import multiprocessing -from sklearn.cluster import MiniBatchKMeans -kmeans = MiniBatchKMeans(n_clusters=10000, batch_size=256 * multiprocessing.cpu_count(), init="random") -kmeans.fit(big_npy) -sample_npy = kmeans.cluster_centers_ -``` \ No newline at end of file diff --git a/docs/faq.md b/docs/faq.md deleted file mode 100644 index 74eff82d9..000000000 --- a/docs/faq.md +++ /dev/null @@ -1,89 +0,0 @@ -## Q1:ffmpeg error/utf8 error. - -大概率不是ffmpeg问题,而是音频路径问题;
-ffmpeg读取路径带空格、()等特殊符号,可能出现ffmpeg error;训练集音频带中文路径,在写入filelist.txt的时候可能出现utf8 error;
- -## Q2:一键训练结束没有索引 - -显示"Training is done. The program is closed."则模型训练成功,后续紧邻的报错是假的;
- -一键训练结束完成没有added开头的索引文件,可能是因为训练集太大卡住了添加索引的步骤;已通过批处理add索引解决内存add索引对内存需求过大的问题。临时可尝试再次点击"训练索引"按钮。
- -## Q3:训练结束推理没看到训练集的音色 -点刷新音色再看看,如果还没有看看训练有没有报错,控制台和webui的截图,logs/实验名下的log,都可以发给开发者看看。
- -## Q4:如何分享模型 -  rvc_root/logs/实验名 下面存储的pth不是用来分享模型用来推理的,而是为了存储实验状态供复现,以及继续训练用的。用来分享的模型应该是weights文件夹下大小为60+MB的pth文件;
-  后续将把weights/exp_name.pth和logs/exp_name/added_xxx.index合并打包成weights/exp_name.zip省去填写index的步骤,那么zip文件用来分享,不要分享pth文件,除非是想换机器继续训练;
-  如果你把logs文件夹下的几百MB的pth文件复制/分享到weights文件夹下强行用于推理,可能会出现f0,tgt_sr等各种key不存在的报错。你需要用ckpt选项卡最下面,手工或自动(本地logs下如果能找到相关信息则会自动)选择是否携带音高、目标音频采样率的选项后进行ckpt小模型提取(输入路径填G开头的那个),提取完在weights文件夹下会出现60+MB的pth文件,刷新音色后可以选择使用。
- -## Q5:Connection Error. -也许你关闭了控制台(黑色窗口)。
- -## Q6:WebUI弹出Expecting value: line 1 column 1 (char 0). -请关闭系统局域网代理/全局代理。
- -这个不仅是客户端的代理,也包括服务端的代理(例如你使用autodl设置了http_proxy和https_proxy学术加速,使用时也需要unset关掉)
- -## Q7:不用WebUI如何通过命令训练推理 -训练脚本:
-可先跑通WebUI,消息窗内会显示数据集处理和训练用命令行;
- -推理脚本:
-https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/myinfer.py
- -例子:
- -runtime\python.exe myinfer.py 0 "E:\codes\py39\RVC-beta\todo-songs\1111.wav" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "test.wav" "weights/mi-test.pth" 0.6 cuda:0 True
- -f0up_key=sys.argv[1]
-input_path=sys.argv[2]
-index_path=sys.argv[3]
-f0method=sys.argv[4]#harvest or pm
-opt_path=sys.argv[5]
-model_path=sys.argv[6]
-index_rate=float(sys.argv[7])
-device=sys.argv[8]
-is_half=bool(sys.argv[9])
- -## Q8:Cuda error/Cuda out of memory. -小概率是cuda配置问题、设备不支持;大概率是显存不够(out of memory);
- -训练的话缩小batch size(如果缩小到1还不够只能更换显卡训练),推理的话酌情缩小config.py结尾的x_pad,x_query,x_center,x_max。4G以下显存(例如1060(3G)和各种2G显卡)可以直接放弃,4G显存显卡还有救。
- -## Q9:total_epoch调多少比较好 - -如果训练集音质差底噪大,20~30足够了,调太高,底模音质无法带高你的低音质训练集
-如果训练集音质高底噪低时长多,可以调高,200是ok的(训练速度很快,既然你有条件准备高音质训练集,显卡想必条件也不错,肯定不在乎多一些训练时间)
- -## Q10:需要多少训练集时长 -  推荐10min至50min
-  保证音质高底噪低的情况下,如果有个人特色的音色统一,则多多益善
-  高水平的训练集(精简+音色有特色),5min至10min也是ok的,仓库作者本人就经常这么玩
-  也有人拿1min至2min的数据来训练并且训练成功的,但是成功经验是其他人不可复现的,不太具备参考价值。这要求训练集音色特色非常明显(比如说高频气声较明显的萝莉少女音),且音质高;
-  1min以下时长数据目前没见有人尝试(成功)过。不建议进行这种鬼畜行为。
- -## Q11:index rate干嘛用的,怎么调(科普) -  如果底模和推理源的音质高于训练集的音质,他们可以带高推理结果的音质,但代价可能是音色往底模/推理源的音色靠,这种现象叫做"音色泄露";
-  index rate用来削减/解决音色泄露问题。调到1,则理论上不存在推理源的音色泄露问题,但音质更倾向于训练集。如果训练集音质比推理源低,则index rate调高可能降低音质。调到0,则不具备利用检索混合来保护训练集音色的效果;
-  如果训练集优质时长多,可调高total_epoch,此时模型本身不太会引用推理源和底模的音色,很少存在"音色泄露"问题,此时index_rate不重要,你甚至可以不建立/分享index索引文件。
- -## Q11:推理怎么选gpu -config.py文件里device cuda:后面选择卡号;
-卡号和显卡的映射关系,在训练选项卡的显卡信息栏里能看到。
- -## Q12:如何推理训练中间保存的pth -通过ckpt选项卡最下面提取小模型。
- - -## Q13:如何中断和继续训练 -现阶段只能关闭WebUI控制台双击go-web.bat重启程序。网页参数也要刷新重新填写;
-继续训练:相同网页参数点训练模型,就会接着上次的checkpoint继续训练。
- -## Q14:训练时出现文件页面/内存error -进程开太多了,内存炸了。你可能可以通过如下方式解决
-1、"提取音高和处理数据使用的CPU进程数" 酌情拉低;
-2、训练集音频手工切一下,不要太长。
- - - diff --git a/docs/faq_en.md b/docs/faq_en.md deleted file mode 100644 index 05f03ec04..000000000 --- a/docs/faq_en.md +++ /dev/null @@ -1,95 +0,0 @@ -## Q1:ffmpeg error/utf8 error. -It is most likely not a FFmpeg issue, but rather an audio path issue; - -FFmpeg may encounter an error when reading paths containing special characters like spaces and (), which may cause an FFmpeg error; and when the training set's audio contains Chinese paths, writing it into filelist.txt may cause a utf8 error.
- -## Q2:Cannot find index file after "One-click Training". -If it displays "Training is done. The program is closed," then the model has been trained successfully, and the subsequent errors are fake; - -The lack of an 'added' index file after One-click training may be due to the training set being too large, causing the addition of the index to get stuck; this has been resolved by using batch processing to add the index, which solves the problem of memory overload when adding the index. As a temporary solution, try clicking the "Train Index" button again.
- -## Q3:Cannot find the model in “Inferencing timbre” after training -Click “Refresh timbre list” and check again; if still not visible, check if there are any errors during training and send screenshots of the console, web UI, and logs/experiment_name/*.log to the developers for further analysis.
- -## Q4:How to share a model/How to use others' models? -The pth files stored in rvc_root/logs/experiment_name are not meant for sharing or inference, but for storing the experiment checkpoits for reproducibility and further training. The model to be shared should be the 60+MB pth file in the weights folder; - -In the future, weights/exp_name.pth and logs/exp_name/added_xxx.index will be merged into a single weights/exp_name.zip file to eliminate the need for manual index input; so share the zip file, not the pth file, unless you want to continue training on a different machine; - -Copying/sharing the several hundred MB pth files from the logs folder to the weights folder for forced inference may result in errors such as missing f0, tgt_sr, or other keys. You need to use the ckpt tab at the bottom to manually or automatically (if the information is found in the logs/exp_name), select whether to include pitch infomation and target audio sampling rate options and then extract the smaller model. After extraction, there will be a 60+ MB pth file in the weights folder, and you can refresh the voices to use it.
- -## Q5:Connection Error. -You may have closed the console (black command line window).
- -## Q6:WebUI popup 'Expecting value: line 1 column 1 (char 0)'. -Please disable system LAN proxy/global proxy and then refresh.
- -## Q7:How to train and infer without the WebUI? -Training script:
-You can run training in WebUI first, and the command-line versions of dataset preprocessing and training will be displayed in the message window.
- -Inference script:
-https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/myinfer.py
- - -e.g.
- -runtime\python.exe myinfer.py 0 "E:\codes\py39\RVC-beta\todo-songs\1111.wav" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "test.wav" "weights/mi-test.pth" 0.6 cuda:0 True
- - -f0up_key=sys.argv[1]
-input_path=sys.argv[2]
-index_path=sys.argv[3]
-f0method=sys.argv[4]#harvest or pm
-opt_path=sys.argv[5]
-model_path=sys.argv[6]
-index_rate=float(sys.argv[7])
-device=sys.argv[8]
-is_half=bool(sys.argv[9])
- -## Q8:Cuda error/Cuda out of memory. -There is a small chance that there is a problem with the CUDA configuration or the device is not supported; more likely, there is not enough memory (out of memory).
- -For training, reduce the batch size (if reducing to 1 is still not enough, you may need to change the graphics card); for inference, adjust the x_pad, x_query, x_center, and x_max settings in the config.py file as needed. 4G or lower memory cards (e.g. 1060(3G) and various 2G cards) can be abandoned, while 4G memory cards still have a chance.
- -## Q9:How many total_epoch are optimal? -If the training dataset's audio quality is poor and the noise floor is high, 20-30 epochs are sufficient. Setting it too high won't improve the audio quality of your low-quality training set.
- -If the training set audio quality is high, the noise floor is low, and there is sufficient duration, you can increase it. 200 is acceptable (since training is fast, and if you're able to prepare a high-quality training set, your GPU likely can handle a longer training duration without issue).
- -## Q10:How much training set duration is needed? - -A dataset of around 10min to 50min is recommended.
- -With guaranteed high sound quality and low bottom noise, more can be added if the dataset's timbre is uniform.
- -For a high-level training set (lean + distinctive tone), 5min to 10min is fine.
- -There are some people who have trained successfully with 1min to 2min data, but the success is not reproducible by others and is not very informative.
This requires that the training set has a very distinctive timbre (e.g. a high-frequency airy anime girl sound) and the quality of the audio is high; -Data of less than 1min duration has not been successfully attempted so far. This is not recommended.
- - -## Q11:What is the index rate for and how to adjust it? -If the tone quality of the pre-trained model and inference source is higher than that of the training set, they can bring up the tone quality of the inference result, but at the cost of a possible tone bias towards the tone of the underlying model/inference source rather than the tone of the training set, which is generally referred to as "tone leakage".
- -The index rate is used to reduce/resolve the timbre leakage problem. If the index rate is set to 1, theoretically there is no timbre leakage from the inference source and the timbre quality is more biased towards the training set. If the training set has a lower sound quality than the inference source, then a higher index rate may reduce the sound quality. Turning it down to 0 does not have the effect of using retrieval blending to protect the training set tones.
- -If the training set has good audio quality and long duration, turn up the total_epoch, when the model itself is less likely to refer to the inferred source and the pretrained underlying model, and there is little "tone leakage", the index_rate is not important and you can even not create/share the index file.
- -## Q12:How to choose the gpu when inferring? -In the config.py file, select the card number after "device cuda:".
- -The mapping between card number and graphics card can be seen in the graphics card information section of the training tab.
- -## Q13:How to use the model saved in the middle of training? -Save via model extraction at the bottom of the ckpt processing tab. - -## Q14:File/memory error(when training)? -Too many processes and your memory is not enough. You may fix it by: - -1、decrease the input in field "Threads of CPU". - -2、pre-cut trainset to shorter audio files. - - - diff --git a/docs/training_tips_en.md b/docs/training_tips_en.md deleted file mode 100644 index ab9b1f876..000000000 --- a/docs/training_tips_en.md +++ /dev/null @@ -1,65 +0,0 @@ -Instructions and tips for RVC training -====================================== -This TIPS explains how data training is done. - -# Training flow -I will explain along the steps in the training tab of the GUI. - -## step1 -Set the experiment name here. - -You can also set here whether the model should take pitch into account. -If the model doesn't consider pitch, the model will be lighter, but not suitable for singing. - -Data for each experiment is placed in `/logs/your-experiment-name/`. - -## step2a -Loads and preprocesses audio. - -### load audio -If you specify a folder with audio, the audio files in that folder will be read automatically. -For example, if you specify `C:Users\hoge\voices`, `C:Users\hoge\voices\voice.mp3` will be loaded, but `C:Users\hoge\voices\dir\voice.mp3` will Not loaded. - -Since ffmpeg is used internally for reading audio, if the extension is supported by ffmpeg, it will be read automatically. -After converting to int16 with ffmpeg, convert to float32 and normalize between -1 to 1. - -### denoising -The audio is smoothed by scipy's filtfilt. - -### Audio Split -First, the input audio is divided by detecting parts of silence that last longer than a certain period (max_sil_kept=5 seconds?). After splitting the audio on silence, split the audio every 4 seconds with an overlap of 0.3 seconds. For audio separated within 4 seconds, after normalizing the volume, convert the wav file to `/logs/your-experiment-name/0_gt_wavs` and then convert it to 16k sampling rate to `/logs/your-experiment-name/1_16k_wavs ` as a wav file. - -## step2b -### Extract pitch -Extract pitch information from wav files. Extract the pitch information (=f0) using the method built into parselmouth or pyworld and save it in `/logs/your-experiment-name/2a_f0`. Then logarithmically convert the pitch information to an integer between 1 and 255 and save it in `/logs/your-experiment-name/2b-f0nsf`. - -### Extract feature_print -Convert the wav file to embedding in advance using HuBERT. Read the wav file saved in `/logs/your-experiment-name/1_16k_wavs`, convert the wav file to 256-dimensional features with HuBERT, and save in npy format in `/logs/your-experiment-name/3_feature256`. - -## step3 -train the model. -### Glossary for Beginners -In deep learning, the data set is divided and the learning proceeds little by little. In one model update (step), batch_size data are retrieved and predictions and error corrections are performed. Doing this once for a dataset counts as one epoch. - -Therefore, the learning time is the learning time per step x (the number of data in the dataset / batch size) x the number of epochs. In general, the larger the batch size, the more stable the learning becomes (learning time per step ÷ batch size) becomes smaller, but it uses more GPU memory. GPU RAM can be checked with the nvidia-smi command. Learning can be done in a short time by increasing the batch size as much as possible according to the machine of the execution environment. - -### Specify pretrained model -RVC starts training the model from pretrained weights instead of from 0, so it can be trained with a small dataset. - -By default - -- If you consider pitch, it loads `rvc-location/pretrained/f0G40k.pth` and `rvc-location/pretrained/f0D40k.pth`. -- If you don't consider pitch, it loads `rvc-location/pretrained/f0G40k.pth` and `rvc-location/pretrained/f0D40k.pth`. - -When learning, model parameters are saved in `logs/your-experiment-name/G_{}.pth` and `logs/your-experiment-name/D_{}.pth` for each save_every_epoch, but by specifying this path, you can start learning. You can restart or start training from model weights learned in a different experiment. - -### learning index -RVC saves the HuBERT feature values used during training, and during inference, searches for feature values that are similar to the feature values used during learning to perform inference. In order to perform this search at high speed, the index is learned in advance. -For index learning, we use the approximate neighborhood search library faiss. Read the feature value of `logs/your-experiment-name/3_feature256` and use it to learn the index, and save it as `logs/your-experiment-name/add_XXX.index`. - -(From the 20230428update version, it is read from the index, and saving / specifying is no longer necessary.) - -### Button description -- Train model: After executing step2b, press this button to train the model. -- Train feature index: After training the model, perform index learning. -- One-click training: step2b, model training and feature index training all at once. \ No newline at end of file diff --git a/docs/training_tips_ja.md b/docs/training_tips_ja.md deleted file mode 100644 index c5b06f2fd..000000000 --- a/docs/training_tips_ja.md +++ /dev/null @@ -1,64 +0,0 @@ -RVCの訓練における説明、およびTIPS -=============================== -本TIPSではどのようにデータの訓練が行われているかを説明します。 - -# 訓練の流れ -GUIの訓練タブのstepに沿って説明します。 - -## step1 -実験名の設定を行います。 - -また、モデルに音高ガイド(ピッチ)を考慮させるかもここで設定できます。考慮させない場合はモデルは軽量になりますが、歌唱には向かなくなります。 - -各実験のデータは`/logs/実験名/`に配置されます。 - -## step2a -音声の読み込みと前処理を行います。 - -### load audio -音声のあるフォルダを指定すると、そのフォルダ内にある音声ファイルを自動で読み込みます。 -例えば`C:Users\hoge\voices`を指定した場合、`C:Users\hoge\voices\voice.mp3`は読み込まれますが、`C:Users\hoge\voices\dir\voice.mp3`は読み込まれません。 - -音声の読み込みには内部でffmpegを利用しているので、ffmpegで対応している拡張子であれば自動的に読み込まれます。 -ffmpegでint16に変換した後、float32に変換し、-1 ~ 1の間に正規化されます。 - -### denoising -音声についてscipyのfiltfiltによる平滑化を行います。 - -### 音声の分割 -入力した音声はまず、一定期間(max_sil_kept=5秒?)より長く無音が続く部分を検知して音声を分割します。無音で音声を分割した後は、0.3秒のoverlapを含む4秒ごとに音声を分割します。4秒以内に区切られた音声は、音量の正規化を行った後wavファイルを`/logs/実験名/0_gt_wavs`に、そこから16kのサンプリングレートに変換して`/logs/実験名/1_16k_wavs`にwavファイルで保存します。 - -## step2b -### ピッチの抽出 -wavファイルからピッチ(音の高低)の情報を抽出します。parselmouthやpyworldに内蔵されている手法でピッチ情報(=f0)を抽出し、`/logs/実験名/2a_f0`に保存します。その後、ピッチ情報を対数で変換して1~255の整数に変換し、`/logs/実験名/2b-f0nsf`に保存します。 - -### feature_printの抽出 -HuBERTを用いてwavファイルを事前にembeddingに変換します。`/logs/実験名/1_16k_wavs`に保存したwavファイルを読み込み、HuBERTでwavファイルを256次元の特徴量に変換し、npy形式で`/logs/実験名/3_feature256`に保存します。 - -## step3 -モデルのトレーニングを行います。 -### 初心者向け用語解説 -深層学習ではデータセットを分割し、少しずつ学習を進めていきます。一回のモデルの更新(step)では、batch_size個のデータを取り出し予測と誤差の修正を行います。これをデータセットに対して一通り行うと一epochと数えます。 - -そのため、学習時間は 1step当たりの学習時間 x (データセット内のデータ数 ÷ バッチサイズ) x epoch数 かかります。一般にバッチサイズを大きくするほど学習は安定し、(1step当たりの学習時間÷バッチサイズ)は小さくなりますが、その分GPUのメモリを多く使用します。GPUのRAMはnvidia-smiコマンド等で確認できます。実行環境のマシンに合わせてバッチサイズをできるだけ大きくするとより短時間で学習が可能です。 - -### pretrained modelの指定 -RVCではモデルの訓練を0からではなく、事前学習済みの重みから開始するため、少ないデータセットで学習を行えます。 - -デフォルトでは - -- 音高ガイドを考慮する場合、`RVCのある場所/pretrained/f0G40k.pth`と`RVCのある場所/pretrained/f0D40k.pth`を読み込みます。 -- 音高ガイドを考慮しない場合、`RVCのある場所/pretrained/G40k.pth`と`RVCのある場所/pretrained/D40k.pth`を読み込みます。 - -学習時はsave_every_epochごとにモデルのパラメータが`logs/実験名/G_{}.pth`と`logs/実験名/D_{}.pth`に保存されますが、このパスを指定することで学習を再開したり、もしくは違う実験で学習したモデルの重みから学習を開始できます。 - -### indexの学習 -RVCでは学習時に使われたHuBERTの特徴量を保存し、推論時は学習時の特徴量から近い特徴量を探してきて推論を行います。この検索を高速に行うために事前にindexの学習を行います。 -indexの学習には近似近傍探索ライブラリのfaissを用います。`/logs/実験名/3_feature256`の特徴量を読み込み、それを用いて学習したindexを`/logs/実験名/add_XXX.index`として保存します。 -(20230428updateよりtotal_fea.npyはindexから読み込むので不要になりました。) - -### ボタンの説明 -- モデルのトレーニング: step2bまでを実行した後、このボタンを押すとモデルの学習を行います。 -- 特徴インデックスのトレーニング: モデルのトレーニング後、indexの学習を行います。 -- ワンクリックトレーニング: step2bまでとモデルのトレーニング、特徴インデックスのトレーニングを一括で行います。 - diff --git a/docs/training_tips_ko.md b/docs/training_tips_ko.md deleted file mode 100644 index 8b3b62458..000000000 --- a/docs/training_tips_ko.md +++ /dev/null @@ -1,53 +0,0 @@ -RVC 훈련에 대한 설명과 팁들 -====================================== -본 팁에서는 어떻게 데이터 훈련이 이루어지고 있는지 설명합니다. - -# 훈련의 흐름 -GUI의 훈련 탭의 단계를 따라 설명합니다. - -## step1 -실험 이름을 지정합니다. 또한, 모델이 피치(소리의 높낮이)를 고려해야 하는지 여부를 여기에서 설정할 수도 있습니다.. -각 실험을 위한 데이터는 `/logs/experiment name/`에 배치됩니다.. - -## step2a -음성 파일을 불러오고 전처리합니다. - -### 음성 파일 불러오기 -음성 파일이 있는 폴더를 지정하면 해당 폴더에 있는 음성 파일이 자동으로 가져와집니다. -예를 들어 `C:Users\hoge\voices`를 지정하면 `C:Users\hoge\voices\voice.mp3`가 읽히지만 `C:Users\hoge\voices\dir\voice.mp3`는 읽히지 않습니다. - -음성 로드에는 내부적으로 ffmpeg를 이용하고 있으므로, ffmpeg로 대응하고 있는 확장자라면 자동적으로 읽힙니다. -ffmpeg에서 int16으로 변환한 후 float32로 변환하고 -1과 1 사이에 정규화됩니다. - -### 잡음 제거 -음성 파일에 대해 scipy의 filtfilt를 이용하여 잡음을 처리합니다. - -### 음성 분할 -입력한 음성 파일은 먼저 일정 기간(max_sil_kept=5초?)보다 길게 무음이 지속되는 부분을 감지하여 음성을 분할합니다.무음으로 음성을 분할한 후에는 0.3초의 overlap을 포함하여 4초마다 음성을 분할합니다.4초 이내에 구분된 음성은 음량의 정규화를 실시한 후 wav 파일을 `/logs/실험명/0_gt_wavs`로, 거기에서 16k의 샘플링 레이트로 변환해 `/logs/실험명/1_16k_wavs`에 wav 파일로 저장합니다. - -## step2b -### 피치 추출 -wav 파일에서 피치(소리의 높낮이) 정보를 추출합니다. parselmouth나 pyworld에 내장되어 있는 메서드으로 피치 정보(=f0)를 추출해, `/logs/실험명/2a_f0`에 저장합니다. 그 후 피치 정보를 로그로 변환하여 1~255 정수로 변환하고 `/logs/실험명/2b-f0nsf`에 저장합니다. - -### feature_print 추출 -HuBERT를 이용하여 wav 파일을 미리 embedding으로 변환합니다. `/logs/실험명/1_16k_wavs`에 저장한 wav 파일을 읽고 HuBERT에서 wav 파일을 256차원 feature들로 변환한 후 npy 형식으로 `/logs/실험명/3_feature256`에 저장합니다. - -## step3 -모델의 훈련을 진행합니다. - -### 초보자용 용어 해설 -심층학습(딥러닝)에서는 데이터셋을 분할하여 조금씩 학습을 진행합니다.한 번의 모델 업데이트(step) 단계 당 batch_size개의 데이터를 탐색하여 예측과 오차를 수정합니다. 데이터셋 전부에 대해 이 작업을 한 번 수행하는 이를 하나의 epoch라고 계산합니다. - -따라서 학습 시간은 단계당 학습 시간 x (데이터셋 내 데이터의 수 / batch size) x epoch 수가 소요됩니다. 일반적으로 batch size가 클수록 학습이 안정적이게 됩니다. (step당 학습 시간 ÷ batch size)는 작아지지만 GPU 메모리를 더 많이 사용합니다. GPU RAM은 nvidia-smi 명령어를 통해 확인할 수 있습니다. 실행 환경에 따라 배치 크기를 최대한 늘리면 짧은 시간 내에 학습이 가능합니다. - -### 사전 학습된 모델 지정 -RVC는 적은 데이터셋으로도 훈련이 가능하도록 사전 훈련된 가중치에서 모델 훈련을 시작합니다. 기본적으로 `rvc-location/pretrained/f0G40k.pth` 및 `rvc-location/pretrained/f0D40k.pth`를 불러옵니다. 학습을 할 시에, 모델 파라미터는 각 save_every_epoch별로 `logs/experiment name/G_{}.pth` 와 `logs/experiment name/D_{}.pth`로 저장이 되는데, 이 경로를 지정함으로써 학습을 재개하거나, 다른 실험에서 학습한 모델의 가중치에서 학습을 시작할 수 있습니다. - -### index의 학습 -RVC에서는 학습시에 사용된 HuBERT의 feature값을 저장하고, 추론 시에는 학습 시 사용한 feature값과 유사한 feature 값을 탐색해 추론을 진행합니다. 이 탐색을 고속으로 수행하기 위해 사전에 index을 학습하게 됩니다. -Index 학습에는 근사 근접 탐색법 라이브러리인 Faiss를 사용하게 됩니다. `/logs/실험명/3_feature256`의 feature값을 불러와, 이를 모두 결합시킨 feature값을 `/logs/실험명/total_fea.npy`로서 저장, 그것을 사용해 학습한 index를`/logs/실험명/add_XXX.index`로 저장합니다. - -### 버튼 설명 -- モデルのトレーニング (모델 학습): step2b까지 실행한 후, 이 버튼을 눌러 모델을 학습합니다. -- 特徴インデックスのトレーニング (특징 지수 훈련): 모델의 훈련 후, index를 학습합니다. -- ワンクリックトレーニング (원클릭 트레이닝): step2b까지의 모델 훈련, feature index 훈련을 일괄로 실시합니다. \ No newline at end of file diff --git "a/docs/\345\260\217\347\231\275\347\256\200\346\230\223\346\225\231\347\250\213.doc" "b/docs/\345\260\217\347\231\275\347\256\200\346\230\223\346\225\231\347\250\213.doc" deleted file mode 100644 index 2e2918913..000000000 Binary files "a/docs/\345\260\217\347\231\275\347\256\200\346\230\223\346\225\231\347\250\213.doc" and /dev/null differ diff --git a/easy_infer.py b/easy_infer.py deleted file mode 100644 index 9f5f94f29..000000000 --- a/easy_infer.py +++ /dev/null @@ -1,1503 +0,0 @@ -import subprocess -import os -import sys -import errno -import shutil -import yt_dlp -from mega import Mega -import datetime -import unicodedata -import torch -import glob -import gradio as gr -import gdown -import zipfile -import traceback -import json -import requests -import wget -import ffmpeg -import hashlib -now_dir = os.getcwd() -sys.path.append(now_dir) -from unidecode import unidecode -import re -import time -from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM -from vc_infer_pipeline import VC -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from MDXNet import MDXNetDereverb -from config import Config -from infer_uvr5 import _audio_pre_, _audio_pre_new -from huggingface_hub import HfApi, list_models -from huggingface_hub import login -from i18n import I18nAuto -i18n = I18nAuto() -from bs4 import BeautifulSoup -from sklearn.cluster import MiniBatchKMeans - -config = Config() -tmp = os.path.join(now_dir, "TEMP") -shutil.rmtree(tmp, ignore_errors=True) -os.environ["TEMP"] = tmp -weight_root = "weights" -weight_uvr5_root = "uvr5_weights" -index_root = "./logs/" -audio_root = "audios" -names = [] -for name in os.listdir(weight_root): - if name.endswith(".pth"): - names.append(name) -index_paths = [] - -global indexes_list -indexes_list = [] - -audio_paths = [] -for root, dirs, files in os.walk(index_root, topdown=False): - for name in files: - if name.endswith(".index") and "trained" not in name: - index_paths.append("%s\\%s" % (root, name)) - -for root, dirs, files in os.walk(audio_root, topdown=False): - for name in files: - audio_paths.append("%s/%s" % (root, name)) - -uvr5_names = [] -for name in os.listdir(weight_uvr5_root): - if name.endswith(".pth") or "onnx" in name: - uvr5_names.append(name.replace(".pth", "")) - -def calculate_md5(file_path): - hash_md5 = hashlib.md5() - with open(file_path, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): - hash_md5.update(chunk) - return hash_md5.hexdigest() - -def silentremove(filename): - try: - os.remove(filename) - except OSError as e: # this would be "except OSError, e:" before Python 2.6 - if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory - raise # re-raise exception if a different error occurred -def get_md5(temp_folder): - for root, subfolders, files in os.walk(temp_folder): - for file in files: - if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file: - md5_hash = calculate_md5(os.path.join(root, file)) - return md5_hash - - return None - -def find_parent(search_dir, file_name): - for dirpath, dirnames, filenames in os.walk(search_dir): - if file_name in filenames: - return os.path.abspath(dirpath) - return None - -def find_folder_parent(search_dir, folder_name): - for dirpath, dirnames, filenames in os.walk(search_dir): - if folder_name in dirnames: - return os.path.abspath(dirpath) - return None - -def get_drive_folder_id(url): - if "drive.google.com" in url: - if "file/d/" in url: - file_id = url.split("file/d/")[1].split("/")[0] - elif "id=" in url: - file_id = url.split("id=")[1].split("&")[0] - else: - return None - -def download_from_url(url): - parent_path = find_folder_parent(".", "pretrained_v2") - zips_path = os.path.join(parent_path, 'zips') - - if url != '': - print(i18n("下载文件:") + f"{url}") - if "drive.google.com" in url: - if "file/d/" in url: - file_id = url.split("file/d/")[1].split("/")[0] - elif "id=" in url: - file_id = url.split("id=")[1].split("&")[0] - else: - return None - - if file_id: - os.chdir('./zips') - result = subprocess.run(["gdown", f"https://drive.google.com/uc?id={file_id}", "--fuzzy"], capture_output=True, text=True, encoding='utf-8') - if "Too many users have viewed or downloaded this file recently" in str(result.stderr): - return "demasiado uso" - if "Cannot retrieve the public link of the file." in str(result.stderr): - return "link privado" - print(result.stderr) - - elif "/blob/" in url: - os.chdir('./zips') - url = url.replace("blob", "resolve") - # print("Resolved URL:", url) # Print the resolved URL - wget.download(url) - elif "mega.nz" in url: - if "#!" in url: - file_id = url.split("#!")[1].split("!")[0] - elif "file/" in url: - file_id = url.split("file/")[1].split("/")[0] - else: - return None - if file_id: - m = Mega() - m.download_url(url, zips_path) - elif "/tree/main" in url: - response = requests.get(url) - soup = BeautifulSoup(response.content, 'html.parser') - temp_url = '' - for link in soup.find_all('a', href=True): - if link['href'].endswith('.zip'): - temp_url = link['href'] - break - if temp_url: - url = temp_url - # print("Updated URL:", url) # Print the updated URL - url = url.replace("blob", "resolve") - # print("Resolved URL:", url) # Print the resolved URL - - if "huggingface.co" not in url: - url = "https://huggingface.co" + url - - wget.download(url) - else: - print("No .zip file found on the page.") - # Handle the case when no .zip file is found - else: - os.chdir('./zips') - wget.download(url) - - os.chdir(parent_path) - print(i18n("完整下载")) - return "downloaded" - else: - return None - -class error_message(Exception): - def __init__(self, mensaje): - self.mensaje = mensaje - super().__init__(mensaje) - -# 一个选项卡全局只能有一个音色 -def get_vc(sid, to_return_protect0, to_return_protect1): - global n_spk, tgt_sr, net_g, vc, cpt, version - if sid == "" or sid == []: - global hubert_model - if hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 - print("clean_empty_cache") - del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt - hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del net_g, cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - cpt = None - return ( - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - ) - person = "%s/%s" % (weight_root, sid) - print("loading %s" % person) - cpt = torch.load(person, map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - if if_f0 == 0: - to_return_protect0 = to_return_protect1 = { - "visible": False, - "value": 0.5, - "__type__": "update", - } - else: - to_return_protect0 = { - "visible": True, - "value": to_return_protect0, - "__type__": "update", - } - to_return_protect1 = { - "visible": True, - "value": to_return_protect1, - "__type__": "update", - } - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - n_spk = cpt["config"][-3] - return ( - {"visible": True, "maximum": n_spk, "__type__": "update"}, - to_return_protect0, - to_return_protect1, - ) - -def load_downloaded_model(url): - parent_path = find_folder_parent(".", "pretrained_v2") - try: - infos = [] - logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768'] - zips_path = os.path.join(parent_path, 'zips') - unzips_path = os.path.join(parent_path, 'unzips') - weights_path = os.path.join(parent_path, 'weights') - logs_dir = "" - - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - if os.path.exists(unzips_path): - shutil.rmtree(unzips_path) - - os.mkdir(zips_path) - os.mkdir(unzips_path) - - download_file = download_from_url(url) - if not download_file: - print(i18n("无法下载模型。")) - infos.append(i18n("无法下载模型。")) - yield "\n".join(infos) - elif download_file == "downloaded": - print(i18n("模型下载成功。")) - infos.append(i18n("模型下载成功。")) - yield "\n".join(infos) - elif download_file == "demasiado uso": - raise Exception(i18n("最近查看或下载此文件的用户过多")) - elif download_file == "link privado": - raise Exception(i18n("无法从该私人链接获取文件")) - - # Descomprimir archivos descargados - for filename in os.listdir(zips_path): - if filename.endswith(".zip"): - zipfile_path = os.path.join(zips_path,filename) - print(i18n("继续提取...")) - infos.append(i18n("继续提取...")) - shutil.unpack_archive(zipfile_path, unzips_path, 'zip') - model_name = os.path.basename(zipfile_path) - logs_dir = os.path.join(parent_path,'logs', os.path.normpath(str(model_name).replace(".zip",""))) - yield "\n".join(infos) - else: - print(i18n("解压缩出错。")) - infos.append(i18n("解压缩出错。")) - yield "\n".join(infos) - - index_file = False - model_file = False - D_file = False - G_file = False - - # Copiar archivo pth - for path, subdirs, files in os.walk(unzips_path): - for item in files: - item_path = os.path.join(path, item) - if not 'G_' in item and not 'D_' in item and item.endswith('.pth'): - model_file = True - model_name = item.replace(".pth","") - logs_dir = os.path.join(parent_path,'logs', model_name) - if os.path.exists(logs_dir): - shutil.rmtree(logs_dir) - os.mkdir(logs_dir) - if not os.path.exists(weights_path): - os.mkdir(weights_path) - if os.path.exists(os.path.join(weights_path, item)): - os.remove(os.path.join(weights_path, item)) - if os.path.exists(item_path): - shutil.move(item_path, weights_path) - - if not model_file and not os.path.exists(logs_dir): - os.mkdir(logs_dir) - # Copiar index - for path, subdirs, files in os.walk(unzips_path): - for item in files: - item_path = os.path.join(path, item) - if item.startswith('added_') and item.endswith('.index'): - index_file = True - if os.path.exists(item_path): - if os.path.exists(os.path.join(logs_dir, item)): - os.remove(os.path.join(logs_dir, item)) - shutil.move(item_path, logs_dir) - if item.startswith('total_fea.npy') or item.startswith('events.'): - if os.path.exists(item_path): - if os.path.exists(os.path.join(logs_dir, item)): - os.remove(os.path.join(logs_dir, item)) - shutil.move(item_path, logs_dir) - - - result = "" - if model_file: - if index_file: - print(i18n("该模型可用于推理,并有 .index 文件。")) - infos.append("\n" + i18n("该模型可用于推理,并有 .index 文件。")) - yield "\n".join(infos) - else: - print(i18n("该模型可用于推理,但没有 .index 文件。")) - infos.append("\n" + i18n("该模型可用于推理,但没有 .index 文件。")) - yield "\n".join(infos) - - if not index_file and not model_file: - print(i18n("未找到可上传的相关文件")) - infos.append(i18n("未找到可上传的相关文件")) - yield "\n".join(infos) - - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - if os.path.exists(unzips_path): - shutil.rmtree(unzips_path) - os.chdir(parent_path) - return result - except Exception as e: - os.chdir(parent_path) - if "demasiado uso" in str(e): - print(i18n("最近查看或下载此文件的用户过多")) - yield i18n("最近查看或下载此文件的用户过多") - elif "link privado" in str(e): - print(i18n("无法从该私人链接获取文件")) - yield i18n("无法从该私人链接获取文件") - else: - print(e) - yield i18n("下载模型时发生错误。") - finally: - os.chdir(parent_path) - -def load_dowloaded_dataset(url): - parent_path = find_folder_parent(".", "pretrained_v2") - infos = [] - try: - zips_path = os.path.join(parent_path, 'zips') - unzips_path = os.path.join(parent_path, 'unzips') - datasets_path = os.path.join(parent_path, 'datasets') - audio_extenions =["flac","wav"] - - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - if os.path.exists(unzips_path): - shutil.rmtree(unzips_path) - - if not os.path.exists(datasets_path): - os.mkdir(datasets_path) - - os.mkdir(zips_path) - os.mkdir(unzips_path) - - download_file = download_from_url(url) - - if not download_file: - print(i18n("下载模型时发生错误。")) - infos.append(i18n("下载模型时发生错误。")) - yield "\n".join(infos) - raise Exception(i18n("下载模型时发生错误。")) - elif download_file == "downloaded": - print(i18n("模型下载成功。")) - infos.append(i18n("模型下载成功。")) - yield "\n".join(infos) - elif download_file == "demasiado uso": - raise Exception(i18n("最近查看或下载此文件的用户过多")) - elif download_file == "link privado": - raise Exception(i18n("无法从该私人链接获取文件")) - - zip_path = os.listdir(zips_path) - foldername = "" - for file in zip_path: - if file.endswith('.zip'): - file_path = os.path.join(zips_path, file) - print("....") - foldername = file.replace(".zip","").replace(" ","").replace("-","_") - dataset_path = os.path.join(datasets_path, foldername) - print(i18n("继续提取...")) - infos.append(i18n("继续提取...")) - yield "\n".join(infos) - shutil.unpack_archive(file_path, unzips_path, 'zip') - if os.path.exists(dataset_path): - shutil.rmtree(dataset_path) - - os.mkdir(dataset_path) - - for root, subfolders, songs in os.walk(unzips_path): - for song in songs: - song_path = os.path.join(root, song) - if song.endswith(tuple(audio_extenions)): - shutil.move(song_path, dataset_path) - else: - print(i18n("解压缩出错。")) - infos.append(i18n("解压缩出错。")) - yield "\n".join(infos) - - - - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - if os.path.exists(unzips_path): - shutil.rmtree(unzips_path) - - print(i18n("数据集加载成功。")) - infos.append(i18n("数据集加载成功。")) - yield "\n".join(infos) - except Exception as e: - os.chdir(parent_path) - if "demasiado uso" in str(e): - print(i18n("最近查看或下载此文件的用户过多")) - yield i18n("最近查看或下载此文件的用户过多") - elif "link privado" in str(e): - print(i18n("无法从该私人链接获取文件")) - yield i18n("无法从该私人链接获取文件") - else: - print(e) - yield i18n("下载模型时发生错误。") - finally: - os.chdir(parent_path) - -def save_model(modelname, save_action): - - parent_path = find_folder_parent(".", "pretrained_v2") - zips_path = os.path.join(parent_path, 'zips') - dst = os.path.join(zips_path,modelname) - logs_path = os.path.join(parent_path, 'logs', modelname) - weights_path = os.path.join(parent_path, 'weights', f"{modelname}.pth") - save_folder = parent_path - infos = [] - - try: - if not os.path.exists(logs_path): - raise Exception("No model found.") - - if not 'content' in parent_path: - save_folder = os.path.join(parent_path, 'RVC_Backup') - else: - save_folder = '/content/drive/MyDrive/RVC_Backup' - - infos.append(i18n("保存模型...")) - yield "\n".join(infos) - - # Si no existe el folder RVC para guardar los modelos - if not os.path.exists(save_folder): - os.mkdir(save_folder) - if not os.path.exists(os.path.join(save_folder, 'ManualTrainingBackup')): - os.mkdir(os.path.join(save_folder, 'ManualTrainingBackup')) - if not os.path.exists(os.path.join(save_folder, 'Finished')): - os.mkdir(os.path.join(save_folder, 'Finished')) - - # Si ya existe el folders zips borro su contenido por si acaso - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - - os.mkdir(zips_path) - added_file = glob.glob(os.path.join(logs_path, "added_*.index")) - d_file = glob.glob(os.path.join(logs_path, "D_*.pth")) - g_file = glob.glob(os.path.join(logs_path, "G_*.pth")) - - if save_action == i18n("选择模型保存方法"): - raise Exception("No method choosen.") - - if save_action == i18n("保存所有"): - print(i18n("保存所有")) - save_folder = os.path.join(save_folder, 'ManualTrainingBackup') - shutil.copytree(logs_path, dst) - else: - # Si no existe el folder donde se va a comprimir el modelo - if not os.path.exists(dst): - os.mkdir(dst) - - if save_action == i18n("保存 D 和 G"): - print(i18n("保存 D 和 G")) - save_folder = os.path.join(save_folder, 'ManualTrainingBackup') - if len(d_file) > 0: - shutil.copy(d_file[0], dst) - if len(g_file) > 0: - shutil.copy(g_file[0], dst) - - if len(added_file) > 0: - shutil.copy(added_file[0], dst) - else: - infos.append(i18n("保存时未编制索引...")) - - if save_action == i18n("保存声音"): - print(i18n("保存声音")) - save_folder = os.path.join(save_folder, 'Finished') - if len(added_file) > 0: - shutil.copy(added_file[0], dst) - else: - infos.append(i18n("保存时未编制索引...")) - #raise gr.Error("¡No ha generado el archivo added_*.index!") - - yield "\n".join(infos) - # Si no existe el archivo del modelo no copiarlo - if not os.path.exists(weights_path): - infos.append(i18n("无模型保存(PTH)")) - #raise gr.Error("¡No ha generado el modelo pequeño!") - else: - shutil.copy(weights_path, dst) - - yield "\n".join(infos) - infos.append("\n" + i18n("这可能需要几分钟时间,请稍候...")) - yield "\n".join(infos) - - shutil.make_archive(os.path.join(zips_path,f"{modelname}"), 'zip', zips_path) - shutil.move(os.path.join(zips_path,f"{modelname}.zip"), os.path.join(save_folder, f'{modelname}.zip')) - - shutil.rmtree(zips_path) - #shutil.rmtree(zips_path) - - infos.append("\n" + i18n("正确存储模型")) - yield "\n".join(infos) - - except Exception as e: - print(e) - if "No model found." in str(e): - infos.append(i18n("您要保存的模型不存在,请确保输入的名称正确。")) - else: - infos.append(i18n("保存模型时发生错误")) - - yield "\n".join(infos) - -def load_downloaded_backup(url): - parent_path = find_folder_parent(".", "pretrained_v2") - try: - infos = [] - logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768'] - zips_path = os.path.join(parent_path, 'zips') - unzips_path = os.path.join(parent_path, 'unzips') - weights_path = os.path.join(parent_path, 'weights') - logs_dir = os.path.join(parent_path, 'logs') - - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - if os.path.exists(unzips_path): - shutil.rmtree(unzips_path) - - os.mkdir(zips_path) - os.mkdir(unzips_path) - - download_file = download_from_url(url) - if not download_file: - print(i18n("无法下载模型。")) - infos.append(i18n("无法下载模型。")) - yield "\n".join(infos) - elif download_file == "downloaded": - print(i18n("模型下载成功。")) - infos.append(i18n("模型下载成功。")) - yield "\n".join(infos) - elif download_file == "demasiado uso": - raise Exception(i18n("最近查看或下载此文件的用户过多")) - elif download_file == "link privado": - raise Exception(i18n("无法从该私人链接获取文件")) - - # Descomprimir archivos descargados - for filename in os.listdir(zips_path): - if filename.endswith(".zip"): - zipfile_path = os.path.join(zips_path,filename) - zip_dir_name = os.path.splitext(filename)[0] - unzip_dir = unzips_path - print(i18n("继续提取...")) - infos.append(i18n("继续提取...")) - shutil.unpack_archive(zipfile_path, unzip_dir, 'zip') - - if os.path.exists(os.path.join(unzip_dir, zip_dir_name)): - # Move the inner directory with the same name - shutil.move(os.path.join(unzip_dir, zip_dir_name), logs_dir) - else: - # Create a folder with the same name and move files - new_folder_path = os.path.join(logs_dir, zip_dir_name) - os.mkdir(new_folder_path) - for item_name in os.listdir(unzip_dir): - item_path = os.path.join(unzip_dir, item_name) - if os.path.isfile(item_path): - shutil.move(item_path, new_folder_path) - elif os.path.isdir(item_path): - shutil.move(item_path, new_folder_path) - - yield "\n".join(infos) - else: - print(i18n("解压缩出错。")) - infos.append(i18n("解压缩出错。")) - yield "\n".join(infos) - - result = "" - - for filename in os.listdir(unzips_path): - if filename.endswith(".zip"): - silentremove(filename) - - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - if os.path.exists(os.path.join(parent_path, 'unzips')): - shutil.rmtree(os.path.join(parent_path, 'unzips')) - print(i18n("备份已成功上传。")) - infos.append("\n" + i18n("备份已成功上传。")) - yield "\n".join(infos) - os.chdir(parent_path) - return result - except Exception as e: - os.chdir(parent_path) - if "demasiado uso" in str(e): - print(i18n("最近查看或下载此文件的用户过多")) - yield i18n("最近查看或下载此文件的用户过多") - elif "link privado" in str(e): - print(i18n("无法从该私人链接获取文件")) - yield i18n("无法从该私人链接获取文件") - else: - print(e) - yield i18n("下载模型时发生错误。") - finally: - os.chdir(parent_path) - -def save_to_wav(record_button): - if record_button is None: - pass - else: - path_to_file=record_button - new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav' - new_path='./audios/'+new_name - shutil.move(path_to_file,new_path) - return new_name - -def save_to_wav2(dropbox): - file_path = dropbox.name - target_path = os.path.join('./audios', os.path.basename(file_path)) - - if os.path.exists(target_path): - os.remove(target_path) - # print('Replacing old dropdown file...') - - shutil.move(file_path, target_path) - return target_path - -def change_choices2(): - audio_paths=[] - for filename in os.listdir("./audios"): - if filename.endswith(('wav', 'mp3', 'flac', 'ogg', 'opus', - 'm4a', 'mp4', 'aac', 'alac', 'wma', - 'aiff', 'webm', 'ac3')): - audio_paths.append(os.path.join('./audios',filename).replace('\\', '/')) - return {"choices": sorted(audio_paths), "__type__": "update"}, {"__type__": "update"} - -def get_models_by_name(modelname): - url = "https://script.google.com/macros/s/AKfycbzyrdLZzUww9qbjxnbnI08budD4yxbmRPHkWbp3UEJ9h3Id5cnNNVg0UtfFAnqqX5Rr/exec" - - response = requests.post(url, json={ - 'type': 'search_by_filename', - 'filename': unidecode(modelname.strip().lower()) - }) - - response_json = response.json() - models = response_json['ocurrences'] - - result = [] - message = "Busqueda realizada" - if len(models) == 0: - message = "No se han encontrado resultados." - else: - message = f"Se han encontrado {len(models)} resultados para {modelname}" - - for i in range(20): - if i < len(models): - urls = models[i].get('url') - url = eval(urls)[0] - name = str(models[i].get('name')) - filename = str(models[i].get('filename')) if not name or name.strip() == "" else name - # Nombre - result.append( - { - "visible": True, - "value": str("### ") + filename, - "__type__": "update", - }) - # Url - result.append( - { - "visible": False, - "value": url, - "__type__": "update", - }) - # Boton - result.append({ - "visible": True, - "__type__": "update", - }) - - # Linea separadora - if i == len(models) - 1: - result.append({ - "visible": False, - "__type__": "update", - }) - else: - result.append({ - "visible": True, - "__type__": "update", - }) - - # Row - result.append( - { - "visible": True, - "__type__": "update", - }) - else: - # Nombre - result.append( - { - "visible": False, - "__type__": "update", - }) - # Url - result.append( - { - "visible": False, - "value": False, - "__type__": "update", - }) - # Boton - result.append({ - "visible": False, - "__type__": "update", - }) - # Linea - result.append({ - "visible": False, - "__type__": "update", - }) - # Row - result.append( - { - "visible": False, - "__type__": "update", - }) - # Result - result.append( - { - "value": message, - "__type__": "update", - } - ) - - return result - -def search_model(): - gr.Markdown(value="# Buscar un modelo") - with gr.Row(): - model_name = gr.inputs.Textbox(lines=1, label="Término de búsqueda") - search_model_button=gr.Button("Buscar modelo") - - models = [] - results = gr.Textbox(label="Resultado", value="", max_lines=20) - with gr.Row(visible=False) as row1: - l1 = gr.Markdown(value="", visible=False) - l1_url = gr.Textbox("Label 1", visible=False) - b1 = gr.Button("Cargar modelo", visible=False) - - mk1 = gr.Markdown(value="---", visible=False) - b1.click(fn=load_downloaded_model, inputs=l1_url, outputs=results) - - with gr.Row(visible=False) as row2: - l2 = gr.Markdown(value="", visible=False) - l2_url = gr.Textbox("Label 1", visible=False) - b2 = gr.Button("Cargar modelo", visible=False) - - mk2 = gr.Markdown(value="---", visible=False) - b2.click(fn=load_downloaded_model, inputs=l2_url, outputs=results) - - with gr.Row(visible=False) as row3: - l3 = gr.Markdown(value="", visible=False) - l3_url = gr.Textbox("Label 1", visible=False) - b3 = gr.Button("Cargar modelo", visible=False) - - mk3 = gr.Markdown(value="---", visible=False) - b3.click(fn=load_downloaded_model, inputs=l3_url, outputs=results) - - with gr.Row(visible=False) as row4: - l4 = gr.Markdown(value="", visible=False) - l4_url = gr.Textbox("Label 1", visible=False) - b4 = gr.Button("Cargar modelo", visible=False) - mk4 = gr.Markdown(value="---", visible=False) - b4.click(fn=load_downloaded_model, inputs=l4_url, outputs=results) - - with gr.Row(visible=False) as row5: - l5 = gr.Markdown(value="", visible=False) - l5_url = gr.Textbox("Label 1", visible=False) - b5 = gr.Button("Cargar modelo", visible=False) - - mk5 = gr.Markdown(value="---", visible=False) - b5.click(fn=load_downloaded_model, inputs=l5_url, outputs=results) - - with gr.Row(visible=False) as row6: - l6 = gr.Markdown(value="", visible=False) - l6_url = gr.Textbox("Label 1", visible=False) - b6 = gr.Button("Cargar modelo", visible=False) - - mk6 = gr.Markdown(value="---", visible=False) - b6.click(fn=load_downloaded_model, inputs=l6_url, outputs=results) - - with gr.Row(visible=False) as row7: - l7 = gr.Markdown(value="", visible=False) - l7_url = gr.Textbox("Label 1", visible=False) - b7 = gr.Button("Cargar modelo", visible=False) - - mk7 = gr.Markdown(value="---", visible=False) - b7.click(fn=load_downloaded_model, inputs=l7_url, outputs=results) - - with gr.Row(visible=False) as row8: - l8 = gr.Markdown(value="", visible=False) - l8_url = gr.Textbox("Label 1", visible=False) - b8 = gr.Button("Cargar modelo", visible=False) - - mk8 = gr.Markdown(value="---", visible=False) - b8.click(fn=load_downloaded_model, inputs=l8_url, outputs=results) - - with gr.Row(visible=False) as row9: - l9 = gr.Markdown(value="", visible=False) - l9_url = gr.Textbox("Label 1", visible=False) - b9 = gr.Button("Cargar modelo", visible=False) - - mk9 = gr.Markdown(value="---", visible=False) - b9.click(fn=load_downloaded_model, inputs=l9_url, outputs=results) - - with gr.Row(visible=False) as row10: - l10 = gr.Markdown(value="", visible=False) - l10_url = gr.Textbox("Label 1", visible=False) - b10 = gr.Button("Cargar modelo", visible=False) - - mk10 = gr.Markdown(value="---", visible=False) - b10.click(fn=load_downloaded_model, inputs=l10_url, outputs=results) - - with gr.Row(visible=False) as row11: - l11 = gr.Markdown(value="", visible=False) - l11_url = gr.Textbox("Label 1", visible=False) - b11 = gr.Button("Cargar modelo", visible=False) - - mk11 = gr.Markdown(value="---", visible=False) - b11.click(fn=load_downloaded_model, inputs=l11_url, outputs=results) - - with gr.Row(visible=False) as row12: - l12 = gr.Markdown(value="", visible=False) - l12_url = gr.Textbox("Label 1", visible=False) - b12 = gr.Button("Cargar modelo", visible=False) - - mk12 = gr.Markdown(value="---", visible=False) - b12.click(fn=load_downloaded_model, inputs=l12_url, outputs=results) - - with gr.Row(visible=False) as row13: - l13 = gr.Markdown(value="", visible=False) - l13_url = gr.Textbox("Label 1", visible=False) - b13 = gr.Button("Cargar modelo", visible=False) - - mk13 = gr.Markdown(value="---", visible=False) - b13.click(fn=load_downloaded_model, inputs=l13_url, outputs=results) - - with gr.Row(visible=False) as row14: - l14 = gr.Markdown(value="", visible=False) - l14_url = gr.Textbox("Label 1", visible=False) - b14 = gr.Button("Cargar modelo", visible=False) - - mk14 = gr.Markdown(value="---", visible=False) - b14.click(fn=load_downloaded_model, inputs=l14_url, outputs=results) - - with gr.Row(visible=False) as row15: - l15 = gr.Markdown(value="", visible=False) - l15_url = gr.Textbox("Label 1", visible=False) - b15 = gr.Button("Cargar modelo", visible=False) - - mk15 = gr.Markdown(value="---", visible=False) - b15.click(fn=load_downloaded_model, inputs=l15_url, outputs=results) - - with gr.Row(visible=False) as row16: - l16 = gr.Markdown(value="", visible=False) - l16_url = gr.Textbox("Label 1", visible=False) - b16 = gr.Button("Cargar modelo", visible=False) - - mk16 = gr.Markdown(value="---", visible=False) - b16.click(fn=load_downloaded_model, inputs=l16_url, outputs=results) - - with gr.Row(visible=False) as row17: - l17 = gr.Markdown(value="", visible=False) - l17_url = gr.Textbox("Label 1", visible=False) - b17 = gr.Button("Cargar modelo", visible=False) - - mk17 = gr.Markdown(value="---", visible=False) - b17.click(fn=load_downloaded_model, inputs=l17_url, outputs=results) - - with gr.Row(visible=False) as row18: - l18 = gr.Markdown(value="", visible=False) - l18_url = gr.Textbox("Label 1", visible=False) - b18 = gr.Button("Cargar modelo", visible=False) - - mk18 = gr.Markdown(value="---", visible=False) - b18.click(fn=load_downloaded_model, inputs=l18_url, outputs=results) - - with gr.Row(visible=False) as row19: - l19 = gr.Markdown(value="", visible=False) - l19_url = gr.Textbox("Label 1", visible=False) - b19 = gr.Button("Cargar modelo", visible=False) - - mk19 = gr.Markdown(value="---", visible=False) - b19.click(fn=load_downloaded_model, inputs=l19_url, outputs=results) - - with gr.Row(visible=False) as row20: - l20 = gr.Markdown(value="", visible=False) - l20_url = gr.Textbox("Label 1", visible=False) - b20 = gr.Button("Cargar modelo", visible=False) - - mk20 = gr.Markdown(value="---", visible=False) - b20.click(fn=load_downloaded_model, inputs=l20_url, outputs=results) - - # to_return_protect1 = - - search_model_button.click(fn=get_models_by_name, inputs=model_name, outputs=[l1,l1_url, b1, mk1, row1, - l2,l2_url, b2, mk2, row2, - l3,l3_url, b3, mk3, row3, - l4,l4_url, b4, mk4, row4, - l5,l5_url, b5, mk5, row5, - l6,l6_url, b6, mk6, row6, - l7,l7_url, b7, mk7, row7, - l8,l8_url, b8, mk8, row8, - l9,l9_url, b9, mk9, row9, - l10,l10_url, b10, mk10, row10, - l11,l11_url, b11, mk11, row11, - l12,l12_url, b12, mk12, row12, - l13,l13_url, b13, mk13, row13, - l14,l14_url, b14, mk14, row14, - l15,l15_url, b15, mk15, row15, - l16,l16_url, b16, mk16, row16, - l17,l17_url, b17, mk17, row17, - l18,l18_url, b18, mk18, row18, - l19,l19_url, b19, mk19, row19, - l20,l20_url, b20, mk20, row20, - results - ]) - - -def descargar_desde_drive(url, name, output_file): - - print(f"Descargando {name} de drive") - - try: - downloaded_file = gdown.download(url, output=output_file, fuzzy=True) - return downloaded_file - except: - print("El intento de descargar con drive no funcionó") - return None - -def descargar_desde_mega(url, name): - response = False - try: - file_id = None - - if "#!" in url: - file_id = url.split("#!")[1].split("!")[0] - elif "file/" in url: - file_id = url.split("file/")[1].split("/")[0] - else: - file_id = None - - if file_id: - mega = Mega() - m = mega.login() - - print(f"Descargando {name} de mega") - downloaded_file = m.download_url(url) - - return downloaded_file - else: - return None - - except Exception as e: - print("Error**") - print(e) - return None - -def descargar_desde_url_basica(url, name, output_file): - try: - print(f"Descargando {name} de URL BASICA") - filename = wget.download(url=url, out=output_file) - return filename - except Exception as e: - print(f"Error al descargar el archivo: {str(e)}") - -def is_valid_model(name): - parent_path = find_folder_parent(".", "pretrained_v2") - unzips_path = os.path.join(parent_path, 'unzips') - - response = [] - file_path = os.path.join(unzips_path, name) - - has_model = False - has_index = False - - for root, subfolders, files in os.walk(file_path): - for file in files: - current_file_path = os.path.join(root, file) - if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file: - has_model = True - if file.startswith('added_') and file.endswith('.index'): - has_index = True - - #if has_model and has_index: - if has_index: - response.append(".index") - - if has_model: - response.append(".pth") - - return response - - -def create_zip(new_name): - - parent_path = find_folder_parent(".", "pretrained_v2") - temp_folder_path = os.path.join(parent_path, 'temp_models') - unzips_path = os.path.join(parent_path, 'unzips') - zips_path = os.path.join(parent_path, 'zips') - - file_path = os.path.join(unzips_path, new_name) - file_name = os.path.join(temp_folder_path, new_name) - - if not os.path.exists(zips_path): - os.mkdir(zips_path) - - if os.path.exists(file_name): - shutil.rmtree(file_name) - - os.mkdir(file_name) - - while not os.path.exists(file_name): - time.sleep(1) - - for root, subfolders, files in os.walk(file_path): - for file in files: - current_file_path = os.path.join(root, file) - if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file: - print(f'Copiando {current_file_path} a {os.path.join(temp_folder_path, new_name)}') - shutil.copy(current_file_path, file_name) - if file.startswith('added_') and file.endswith('.index'): - print(f'Copiando {current_file_path} a {os.path.join(temp_folder_path, new_name)}') - shutil.copy(current_file_path, file_name) - - print("Comprimiendo modelo") - zip_path = os.path.join(zips_path, new_name) - - print(f"Comprimiendo {file_name} en {zip_path}") - shutil.make_archive(zip_path, 'zip', file_name) - -def upload_to_huggingface(file_path, new_filename): - api = HfApi() - login(token="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") - api.upload_file( - path_or_fileobj=file_path, - path_in_repo=new_filename, - repo_id="juuxn/RVCModels", - repo_type="model", - ) - return f"https://huggingface.co/juuxn/RVCModels/resolve/main/{new_filename}" - - -def publish_model_clicked(model_name, model_url, model_version, model_creator): - - web_service_url = "https://script.google.com/macros/s/AKfycbzyrdLZzUww9qbjxnbnI08budD4yxbmRPHkWbp3UEJ9h3Id5cnNNVg0UtfFAnqqX5Rr/exec" - name = unidecode(model_name) - new_name = unidecode(name.strip().replace(" ","_").replace("'","")) - - downloaded_path = "" - url = model_url - version = model_version - creator = model_creator - parent_path = find_folder_parent(".", "pretrained_v2") - output_folder = os.path.join(parent_path, 'archivos_descargados') - output_file = os.path.join(output_folder, f'{new_name}.zip') - unzips_path = os.path.join(parent_path, 'unzips') - zips_path = os.path.join(parent_path, 'zips') - temp_folder_path = os.path.join(parent_path, 'temp_models') - - if os.path.exists(output_folder): - shutil.rmtree(output_folder) - os.mkdir(output_folder) - - if os.path.exists(temp_folder_path): - shutil.rmtree(temp_folder_path) - os.mkdir(temp_folder_path) - - - if url and 'drive.google.com' in url: - # Descargar el elemento si la URL es de Google Drive - downloaded_path = descargar_desde_drive(url, new_name, output_file) - elif url and 'mega.nz' in url: - downloaded_path = descargar_desde_mega(url, new_name, output_file) - elif url and 'pixeldrain' in url: - print("No se puede descargar de pixeldrain") - else: - downloaded_path = descargar_desde_url_basica(url, new_name, output_file) - - if not downloaded_path: - print(f"No se pudo descargar: {name}") - else: - filename = name.strip().replace(" ","_") - dst =f'{filename}.zip' - shutil.unpack_archive(downloaded_path, os.path.join(unzips_path, filename)) - md5_hash = get_md5(os.path.join(unzips_path, filename)) - - if not md5_hash: - print("No tiene modelo pequeño") - return - - md5_response_raw = requests.post(web_service_url, json={ - 'type': 'check_md5', - 'md5_hash': md5_hash - }) - - md5_response = md5_response_raw.json() - ok = md5_response["ok"] - exists = md5_response["exists"] - message = md5_response["message"] - - is_valid = is_valid_model(filename) - - if md5_hash and exists: - print(f"El archivo ya se ha publicado en spreadsheet con md5: {md5_hash}") - return f"El archivo ya se ha publicado con md5: {md5_hash}" - - if ".pth" in is_valid and not exists: - - create_zip(filename) - huggingface_url = upload_to_huggingface(os.path.join(zips_path,dst), dst) - - response = requests.post(web_service_url, json={ - 'type': 'save_model', - 'elements': [{ - 'name': name, - 'filename': filename, - 'url': [huggingface_url], - 'version': version, - 'creator': creator, - 'md5_hash': md5_hash, - 'content': is_valid - }]}) - - response_data = response.json() - ok = response_data["ok"] - message = response_data["message"] - - print({ - 'name': name, - 'filename': filename, - 'url': [huggingface_url], - 'version': version, - 'creator': creator, - 'md5_hash': md5_hash, - 'content': is_valid - }) - - if ok: - return f"El archivo se ha publicado con md5: {md5_hash}" - else: - print(message) - return message - - # Eliminar folder donde se decarga el modelo zip - if os.path.exists(output_folder): - shutil.rmtree(output_folder) - - # Eliminar folder de zips, donde se descomprimio el modelo descargado - if os.path.exists(unzips_path): - shutil.rmtree(unzips_path) - - # Eliminar folder donde se copiaron los archivos indispensables del modelo - if os.path.exists(temp_folder_path): - shutil.rmtree(temp_folder_path) - - # Eliminar folder donde se comprimio el modelo para enviarse a huggingface - if os.path.exists(zips_path): - shutil.rmtree(zips_path) - -def uvr(input_url, output_path, model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): - carpeta_a_eliminar = "yt_downloads" - if os.path.exists(carpeta_a_eliminar) and os.path.isdir(carpeta_a_eliminar): - # Eliminar todos los archivos en la carpeta - for archivo in os.listdir(carpeta_a_eliminar): - ruta_archivo = os.path.join(carpeta_a_eliminar, archivo) - if os.path.isfile(ruta_archivo): - os.remove(ruta_archivo) - elif os.path.isdir(ruta_archivo): - shutil.rmtree(ruta_archivo) # Eliminar subcarpetas recursivamente - - def format_title(title): - # Eliminar caracteres no alfanuméricos excepto espacios y guiones bajos - formatted_title = re.sub(r'[^\w\s-]', '', title) - # Reemplazar espacios por guiones bajos - formatted_title = formatted_title.replace(" ", "_") - return formatted_title - - ydl_opts = { - 'no-windows-filenames': True, - 'restrict-filenames': True, - 'extract_audio': True, - 'format': 'bestaudio', - 'quiet': True, - 'no-warnings': True, - } - - try: - print("Descargando audio del video...") - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - info_dict = ydl.extract_info(input_url, download=False) - formatted_title = format_title(info_dict.get('title', 'default_title')) - formatted_outtmpl = output_path + '/' + formatted_title + '.wav' - ydl_opts['outtmpl'] = formatted_outtmpl - ydl = yt_dlp.YoutubeDL(ydl_opts) - ydl.download([input_url]) - print("Audio descargado!") - except Exception as error: - print("Ocurrió un error:", error) - - # Variables necesarias hechas para ser editables - actual_directory = os.path.dirname(__file__) # Ruta de destino del instrumental y su ruta definitiva - instrumental_source_directory = os.path.join(actual_directory, "wav") - instrumental_directory = os.path.join(actual_directory, "audio-others") - instrumental_formatted = f"instrument_{formatted_title}.wav.reformatted.wav_10.wav" # Resultado del instrumental al hacer la separación - instrumental_audio_path = os.path.join(instrumental_directory, instrumental_formatted) # Ruta de destino del instrumental y su ruta definitiva - old_instrumental_audio_path = os.path.join(instrumental_source_directory, instrumental_formatted) # Ruta anerior de destino del instrumental - format0 = "wav" - - infos = [] - pre_fun = None - try: - print("Separando audio...") - inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") if isinstance(x, str) else x for x in [inp_root, save_root_vocal, save_root_ins]] - if model_name == "onnx_dereverb_By_FoxJoy": - pre_fun = MDXNetDereverb(15) - else: - func = _audio_pre_ if "DeEcho" not in model_name else _audio_pre_new - pre_fun = func( - agg=10, - model_path=os.path.join(weight_uvr5_root, model_name + ".pth"), - device=config.device, - is_half=config.is_half, - ) - if inp_root != "": - paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)] - else: - paths = [path.name for path in paths] - for path in paths: - inp_path = os.path.join(inp_root, path) - need_reformat = 1 - done = 0 - try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if ( - info["streams"][0]["channels"] == 2 - and info["streams"][0]["sample_rate"] == "44100" - ): - need_reformat = 0 - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 - ) - done = 1 - except: - need_reformat = 1 - traceback.print_exc() - if need_reformat == 1: - tmp_path = "%s/%s.reformatted.wav" % (tmp, os.path.basename(inp_path)) - os.system( - "ffmpeg -loglevel fatal -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" - % (inp_path, tmp_path) - ) - inp_path = tmp_path - try: - if done == 0: - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 - ) - infos.append("%s->Success" % (os.path.basename(inp_path))) - yield "\n".join(infos) - except: - infos.append( - "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) - ) - yield "\n".join(infos) - except: - infos.append(traceback.format_exc()) - yield "\n".join(infos) - finally: - try: - if pre_fun is not None: # Verificar si pre_fun existe antes de eliminarlo - if model_name == "onnx_dereverb_By_FoxJoy": - del pre_fun.pred.model - del pre_fun.pred.model_ - else: - del pre_fun.model - del pre_fun - except: - traceback.print_exc() - print("clean_empty_cache") - if torch.cuda.is_available(): - torch.cuda.empty_cache() - yield "\n".join(infos) - - try: - # Verificar si el archivo de origen existe - if os.path.exists(old_instrumental_audio_path): - # Verificar si el directorio de destino existe, si no, crearlo - if not os.path.exists(instrumental_directory): - os.makedirs(instrumental_directory) - - # Mover el archivo al directorio destino - shutil.move(old_instrumental_audio_path, instrumental_audio_path) - print("Archivo movido exitosamente.") - print("Terminado!") - else: - print("El archivo de origen no existe.") - except Exception as e: - print("Error al mover el archivo:", e) - - - -def publish_models(): - with gr.Column(): - gr.Markdown("# Publicar un modelo en la comunidad") - gr.Markdown("El modelo se va a verificar antes de publicarse. Importante que contenga el archivo **.pth** del modelo para que no sea rechazado.") - - model_name = gr.inputs.Textbox(lines=1, label="Nombre descriptivo del modelo Ej: (Ben 10 [Latino] - RVC V2 - 250 Epoch)") - url = gr.inputs.Textbox(lines=1, label="Enlace del modelo") - moder_version = gr.Radio( - label="Versión", - choices=["RVC v1", "RVC v2"], - value="RVC v1", - interactive=True, - ) - model_creator = gr.inputs.Textbox(lines=1, label="ID de discord del creador del modelo Ej: <@123455656>") - publish_model_button=gr.Button("Publicar modelo") - results = gr.Textbox(label="Resultado", value="", max_lines=20) - - publish_model_button.click(fn=publish_model_clicked, inputs=[model_name, url, moder_version, model_creator], outputs=results) - -def download_model(): - gr.Markdown(value="# " + i18n("下载模型")) - gr.Markdown(value=i18n("它用于下载您的推理模型。")) - with gr.Row(): - model_url=gr.Textbox(label=i18n("网址")) - with gr.Row(): - download_model_status_bar=gr.Textbox(label=i18n("地位")) - with gr.Row(): - download_button=gr.Button(i18n("下载")) - download_button.click(fn=load_downloaded_model, inputs=[model_url], outputs=[download_model_status_bar]) - -def download_backup(): - gr.Markdown(value="# " + i18n("下载备份")) - gr.Markdown(value=i18n("它用于下载您的训练备份。")) - with gr.Row(): - model_url=gr.Textbox(label=i18n("网址")) - with gr.Row(): - download_model_status_bar=gr.Textbox(label=i18n("地位")) - with gr.Row(): - download_button=gr.Button(i18n("下载")) - download_button.click(fn=load_downloaded_backup, inputs=[model_url], outputs=[download_model_status_bar]) - -def update_dataset_list(name): - new_datasets = [] - for foldername in os.listdir("./datasets"): - if "." not in foldername: - new_datasets.append(os.path.join(find_folder_parent(".","pretrained"),"datasets",foldername)) - return gr.Dropdown.update(choices=new_datasets) - -def download_dataset(trainset_dir4): - gr.Markdown(value="# " + i18n("下载数据集")) - gr.Markdown(value=i18n("下载兼容格式(.wav/.flac)的音频数据集以训练模型。")) - with gr.Row(): - dataset_url=gr.Textbox(label=i18n("网址")) - with gr.Row(): - load_dataset_status_bar=gr.Textbox(label=i18n("地位")) - with gr.Row(): - load_dataset_button=gr.Button(i18n("下载")) - load_dataset_button.click(fn=load_dowloaded_dataset, inputs=[dataset_url], outputs=[load_dataset_status_bar]) - load_dataset_status_bar.change(update_dataset_list, dataset_url, trainset_dir4) - -def youtube_separator(): - gr.Markdown(value="# " + i18n("单独的 YouTube 曲目")) - gr.Markdown(value=i18n("下载 YouTube 视频的音频并自动分离声音和伴奏轨道")) - with gr.Row(): - input_url = gr.inputs.Textbox(label=i18n("粘贴 YouTube 链接")) - output_path = gr.Textbox( - label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), - value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/yt_downloads", - visible=False, - ) - save_root_ins = gr.Textbox( - label=i18n("输入待处理音频文件夹路径"), - value=((os.getcwd()).replace('\\', '/') + "/yt_downloads"), - visible=False, - ) - model_choose = gr.Textbox( - value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/uvr5_weights/HP5_only_main_vocal", - visible=False, - ) - save_root_vocal = gr.Textbox( - label=i18n("指定输出主人声文件夹"), value="audios", - visible=False, - ) - opt_ins_root = gr.Textbox( - label=i18n("指定输出非主人声文件夹"), value="opt", - visible=False, - ) - format0 = gr.Radio( - label=i18n("导出文件格式"), - choices=["wav", "flac", "mp3", "m4a"], - value="wav", - interactive=True, - visible=False, - ) - with gr.Row(): - vc_output4 = gr.Textbox(label=i18n("地位")) - with gr.Row(): - but2 = gr.Button(i18n("下载并分离")) - but2.click( - uvr, - [ - input_url, - output_path, - model_choose, - save_root_ins, - save_root_vocal, - opt_ins_root, - format0, - ], - [vc_output4], - ) diff --git a/environment_dml.yaml b/environment_dml.yaml deleted file mode 100644 index 0fb3f2225..000000000 --- a/environment_dml.yaml +++ /dev/null @@ -1,186 +0,0 @@ -name: pydml -channels: - - pytorch - - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main - - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/ - - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ - - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/ - - defaults - - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/fastai/ - - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/ - - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/bioconda/ -dependencies: - - abseil-cpp=20211102.0=hd77b12b_0 - - absl-py=1.3.0=py310haa95532_0 - - aiohttp=3.8.3=py310h2bbff1b_0 - - aiosignal=1.2.0=pyhd3eb1b0_0 - - async-timeout=4.0.2=py310haa95532_0 - - attrs=22.1.0=py310haa95532_0 - - blas=1.0=mkl - - blinker=1.4=py310haa95532_0 - - bottleneck=1.3.5=py310h9128911_0 - - brotli=1.0.9=h2bbff1b_7 - - brotli-bin=1.0.9=h2bbff1b_7 - - brotlipy=0.7.0=py310h2bbff1b_1002 - - bzip2=1.0.8=he774522_0 - - c-ares=1.19.0=h2bbff1b_0 - - ca-certificates=2023.05.30=haa95532_0 - - cachetools=4.2.2=pyhd3eb1b0_0 - - certifi=2023.5.7=py310haa95532_0 - - cffi=1.15.1=py310h2bbff1b_3 - - charset-normalizer=2.0.4=pyhd3eb1b0_0 - - click=8.0.4=py310haa95532_0 - - colorama=0.4.6=py310haa95532_0 - - contourpy=1.0.5=py310h59b6b97_0 - - cryptography=39.0.1=py310h21b164f_0 - - cycler=0.11.0=pyhd3eb1b0_0 - - fonttools=4.25.0=pyhd3eb1b0_0 - - freetype=2.12.1=ha860e81_0 - - frozenlist=1.3.3=py310h2bbff1b_0 - - giflib=5.2.1=h8cc25b3_3 - - glib=2.69.1=h5dc1a3c_2 - - google-auth=2.6.0=pyhd3eb1b0_0 - - google-auth-oauthlib=0.4.4=pyhd3eb1b0_0 - - grpc-cpp=1.48.2=hf108199_0 - - grpcio=1.48.2=py310hf108199_0 - - gst-plugins-base=1.18.5=h9e645db_0 - - gstreamer=1.18.5=hd78058f_0 - - icu=58.2=ha925a31_3 - - idna=3.4=py310haa95532_0 - - intel-openmp=2023.1.0=h59b6b97_46319 - - jpeg=9e=h2bbff1b_1 - - kiwisolver=1.4.4=py310hd77b12b_0 - - krb5=1.19.4=h5b6d351_0 - - lerc=3.0=hd77b12b_0 - - libbrotlicommon=1.0.9=h2bbff1b_7 - - libbrotlidec=1.0.9=h2bbff1b_7 - - libbrotlienc=1.0.9=h2bbff1b_7 - - libclang=14.0.6=default_hb5a9fac_1 - - libclang13=14.0.6=default_h8e68704_1 - - libdeflate=1.17=h2bbff1b_0 - - libffi=3.4.4=hd77b12b_0 - - libiconv=1.16=h2bbff1b_2 - - libogg=1.3.5=h2bbff1b_1 - - libpng=1.6.39=h8cc25b3_0 - - libprotobuf=3.20.3=h23ce68f_0 - - libtiff=4.5.0=h6c2663c_2 - - libuv=1.44.2=h2bbff1b_0 - - libvorbis=1.3.7=he774522_0 - - libwebp=1.2.4=hbc33d0d_1 - - libwebp-base=1.2.4=h2bbff1b_1 - - libxml2=2.10.3=h0ad7f3c_0 - - libxslt=1.1.37=h2bbff1b_0 - - lz4-c=1.9.4=h2bbff1b_0 - - markdown=3.4.1=py310haa95532_0 - - markupsafe=2.1.1=py310h2bbff1b_0 - - matplotlib=3.7.1=py310haa95532_1 - - matplotlib-base=3.7.1=py310h4ed8f06_1 - - mkl=2023.1.0=h8bd8f75_46356 - - mkl-service=2.4.0=py310h2bbff1b_1 - - mkl_fft=1.3.6=py310h4ed8f06_1 - - mkl_random=1.2.2=py310h4ed8f06_1 - - multidict=6.0.2=py310h2bbff1b_0 - - munkres=1.1.4=py_0 - - numexpr=2.8.4=py310h2cd9be0_1 - - numpy=1.24.3=py310h055cbcc_1 - - numpy-base=1.24.3=py310h65a83cf_1 - - oauthlib=3.2.2=py310haa95532_0 - - openssl=1.1.1t=h2bbff1b_0 - - packaging=23.0=py310haa95532_0 - - pandas=1.5.3=py310h4ed8f06_0 - - pcre=8.45=hd77b12b_0 - - pillow=9.4.0=py310hd77b12b_0 - - pip=23.0.1=py310haa95532_0 - - ply=3.11=py310haa95532_0 - - protobuf=3.20.3=py310hd77b12b_0 - - pyasn1=0.4.8=pyhd3eb1b0_0 - - pyasn1-modules=0.2.8=py_0 - - pycparser=2.21=pyhd3eb1b0_0 - - pyjwt=2.4.0=py310haa95532_0 - - pyopenssl=23.0.0=py310haa95532_0 - - pyparsing=3.0.9=py310haa95532_0 - - pyqt=5.15.7=py310hd77b12b_0 - - pyqt5-sip=12.11.0=py310hd77b12b_0 - - pysocks=1.7.1=py310haa95532_0 - - python=3.10.11=h966fe2a_2 - - python-dateutil=2.8.2=pyhd3eb1b0_0 - - pytorch-mutex=1.0=cpu - - pytz=2022.7=py310haa95532_0 - - pyyaml=6.0=py310h2bbff1b_1 - - qt-main=5.15.2=he8e5bd7_8 - - qt-webengine=5.15.9=hb9a9bb5_5 - - qtwebkit=5.212=h2bbfb41_5 - - re2=2022.04.01=hd77b12b_0 - - requests=2.29.0=py310haa95532_0 - - requests-oauthlib=1.3.0=py_0 - - rsa=4.7.2=pyhd3eb1b0_1 - - setuptools=67.8.0=py310haa95532_0 - - sip=6.6.2=py310hd77b12b_0 - - six=1.16.0=pyhd3eb1b0_1 - - sqlite=3.41.2=h2bbff1b_0 - - tbb=2021.8.0=h59b6b97_0 - - tensorboard=2.10.0=py310haa95532_0 - - tensorboard-data-server=0.6.1=py310haa95532_0 - - tensorboard-plugin-wit=1.8.1=py310haa95532_0 - - tk=8.6.12=h2bbff1b_0 - - toml=0.10.2=pyhd3eb1b0_0 - - tornado=6.2=py310h2bbff1b_0 - - tqdm=4.65.0=py310h9909e9c_0 - - typing_extensions=4.5.0=py310haa95532_0 - - tzdata=2023c=h04d1e81_0 - - urllib3=1.26.16=py310haa95532_0 - - vc=14.2=h21ff451_1 - - vs2015_runtime=14.27.29016=h5e58377_2 - - werkzeug=2.2.3=py310haa95532_0 - - wheel=0.38.4=py310haa95532_0 - - win_inet_pton=1.1.0=py310haa95532_0 - - xz=5.4.2=h8cc25b3_0 - - yaml=0.2.5=he774522_0 - - yarl=1.8.1=py310h2bbff1b_0 - - zlib=1.2.13=h8cc25b3_0 - - zstd=1.5.5=hd43e919_0 - - pip: - - antlr4-python3-runtime==4.8 - - appdirs==1.4.4 - - audioread==3.0.0 - - bitarray==2.7.4 - - cython==0.29.35 - - decorator==5.1.1 - - fairseq==0.12.2 - - faiss-cpu==1.7.4 - - filelock==3.12.0 - - hydra-core==1.0.7 - - jinja2==3.1.2 - - joblib==1.2.0 - - lazy-loader==0.2 - - librosa==0.10.0.post2 - - llvmlite==0.40.0 - - lxml==4.9.2 - - mpmath==1.3.0 - - msgpack==1.0.5 - - networkx==3.1 - - noisereduce==2.0.1 - - numba==0.57.0 - - omegaconf==2.0.6 - - opencv-python==4.7.0.72 - - pooch==1.6.0 - - portalocker==2.7.0 - - pysimplegui==4.60.5 - - pywin32==306 - - pyworld==0.3.3 - - regex==2023.5.5 - - sacrebleu==2.3.1 - - scikit-learn==1.2.2 - - scipy==1.10.1 - - sounddevice==0.4.6 - - soundfile==0.12.1 - - soxr==0.3.5 - - sympy==1.12 - - tabulate==0.9.0 - - threadpoolctl==3.1.0 - - torch==2.0.0 - - torch-directml==0.2.0.dev230426 - - torchaudio==2.0.1 - - torchvision==0.15.1 - - wget==3.2 -prefix: D:\ProgramData\anaconda3_\envs\pydml diff --git a/extract_f0_print.py b/extract_f0_print.py deleted file mode 100644 index aab2762e4..000000000 --- a/extract_f0_print.py +++ /dev/null @@ -1,294 +0,0 @@ -import os -import traceback -import sys -import parselmouth - -now_dir = os.getcwd() -sys.path.append(now_dir) -from LazyImport import lazyload -from my_utils import load_audio -import pyworld -import numpy as np, logging -torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess -torch = lazyload("torch") -#from torch import Tensor # Fork Feature. Used for pitch prediction for torch crepe. -tqdm = lazyload("tqdm") - -logging.getLogger("numba").setLevel(logging.WARNING) - -import multiprocessing - -exp_dir = sys.argv[1] -f = open(f"{exp_dir}/extract_f0_feature.log", "a+") - -DoFormant = False -Quefrency = 1.0 -Timbre = 1.0 - -def printt(strr): - print(strr) - f.write(f"{strr}\n") - f.flush() - - -n_p = int(sys.argv[2]) -f0method = sys.argv[3] -extraction_crepe_hop_length = 0 -try: - extraction_crepe_hop_length = int(sys.argv[4]) -except: - print("Temp Issue. echl is not being passed with argument!") - extraction_crepe_hop_length = 128 - -# print("EXTRACTION CREPE HOP LENGTH: " + str(extraction_crepe_hop_length)) -# print("EXTRACTION CREPE HOP LENGTH TYPE: " + str(type(extraction_crepe_hop_length))) - - -class FeatureInput(object): - def __init__(self, samplerate=16000, hop_size=160): - self.fs = samplerate - self.hop = hop_size - - self.f0_method_dict = self.get_f0_method_dict() - - self.f0_bin = 256 - self.f0_max = 1100.0 - self.f0_min = 50.0 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - - # EXPERIMENTAL. PROBABLY BUGGY - def mncrepe(self, method, x, p_len, crepe_hop_length): - f0 = None - torch_device_index = 0 - torch_device = torch.device( - f"cuda:{torch_device_index % torch.cuda.device_count()}" - ) if torch.cuda.is_available() \ - else torch.device("mps") if torch.backends.mps.is_available() \ - else torch.device("cpu") - - audio = torch.from_numpy(x.astype(np.float32)).to(torch_device, copy=True) - audio /= torch.quantile(torch.abs(audio), 0.999) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - - if method == 'mangio-crepe': - pitch: torch.Tensor = torchcrepe.predict( - audio, - self.fs, - crepe_hop_length, - self.f0_min, - self.f0_max, - "full", - batch_size=crepe_hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // crepe_hop_length - # Resize the pitch - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - - elif method == 'crepe': - batch_size = 512 - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.fs, - 160, - self.f0_min, - self.f0_max, - "full", - batch_size=batch_size, - device=torch_device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - f0 = f0[1:] # Get rid of extra first frame - - return f0 - - def get_pm(self, x, p_len): - f0 = parselmouth.Sound(x, self.fs).to_pitch_ac( - time_step=160 / 16000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ).selected_array["frequency"] - - return np.pad( - f0, - [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]], - mode="constant" - ) - - def get_harvest(self, x): - f0_spectral = pyworld.harvest( - x.astype(np.double), - fs=self.fs, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop / self.fs, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs) - - def get_dio(self, x): - f0_spectral = pyworld.dio( - x.astype(np.double), - fs=self.fs, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop / self.fs, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.fs) - - def get_rmvpe(self, x): - if not hasattr(self, "model_rmvpe"): - from rmvpe import RMVPE - self.model_rmvpe = RMVPE("rmvpe.pt", is_half=False, device="cuda:0") - - return self.model_rmvpe.infer_from_audio(x, thred=0.03) - - def get_f0_method_dict(self): - return { - "pm": self.get_pm, - "harvest": self.get_harvest, - "dio": self.get_dio, - "rmvpe": self.get_rmvpe - } - - def get_f0_hybrid_computation( - self, - methods_str, - x, - p_len, - crepe_hop_length, - ): - # Get various f0 methods from input to use in the computation stack - s = methods_str - s = s.split("hybrid")[1] - s = s.replace("[", "").replace("]", "") - methods = s.split("+") - f0_computation_stack = [] - - for method in methods: - if method in self.f0_method_dict: - f0 = self.f0_method_dict[method](x, p_len) if method == 'pm' else self.f0_method_dict[method](x) - f0_computation_stack.append(f0) - elif method == 'crepe' or method == 'mangio-crepe': - self.the_other_complex_function(x, method, crepe_hop_length) - - if len(f0_computation_stack) != 0: - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) if len(f0_computation_stack)>1 else f0_computation_stack[0] - return f0_median_hybrid - else: - raise ValueError("No valid methods were provided") - - def compute_f0(self, path, f0_method, crepe_hop_length): - x = load_audio(path, self.fs, DoFormant, Quefrency, Timbre) - p_len = x.shape[0] // self.hop - - if f0_method in self.f0_method_dict: - f0 = self.f0_method_dict[f0_method](x, p_len) if f0_method == 'pm' else self.f0_method_dict[f0_method](x) - elif f0_method in ['crepe', 'mangio-crepe']: - f0 = self.mncrepe(f0_method, x, p_len, crepe_hop_length) - elif "hybrid" in f0_method: # EXPERIMENTAL - # Perform hybrid median pitch estimation - f0 = self.get_f0_hybrid_computation( - f0_method, - x, - p_len, - crepe_hop_length, - ) - # Mangio-RVC-Fork Feature: Add hybrid f0 inference to feature extraction. EXPERIMENTAL... - - return f0 - - def coarse_f0(self, f0): - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( - self.f0_bin - 2 - ) / (self.f0_mel_max - self.f0_mel_min) + 1 - - # use 0 or 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 - f0_coarse = np.rint(f0_mel).astype(int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( - f0_coarse.max(), - f0_coarse.min(), - ) - return f0_coarse - - def go(self, paths, f0_method, crepe_hop_length, thread_n): - if not paths: - printt("no-f0-todo") - return - - with tqdm.tqdm(total=len(paths), leave=True, position=thread_n) as pbar: - description = f"thread:{thread_n}, f0ing, Hop-Length:{crepe_hop_length}" - pbar.set_description(description) - - for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): - try: - if ( - os.path.exists(opt_path1 + ".npy") - and os.path.exists(opt_path2 + ".npy") - ): - pbar.update(1) - continue - - featur_pit = self.compute_f0(inp_path, f0_method, crepe_hop_length) - np.save(opt_path2, featur_pit, allow_pickle=False) # nsf - - coarse_pit = self.coarse_f0(featur_pit) - np.save(opt_path1, coarse_pit, allow_pickle=False) # ori - - pbar.update(1) - except Exception as e: - printt(f"f0fail-{idx}-{inp_path}-{traceback.format_exc()}") - - -if __name__ == "__main__": - # exp_dir=r"E:\codes\py39\dataset\mi-test" - # n_p=16 - # f = open("%s/log_extract_f0.log"%exp_dir, "w") - printt(sys.argv) - featureInput = FeatureInput() - paths = [] - inp_root = "%s/1_16k_wavs" % (exp_dir) - opt_root1 = "%s/2a_f0" % (exp_dir) - opt_root2 = "%s/2b-f0nsf" % (exp_dir) - - os.makedirs(opt_root1, exist_ok=True) - os.makedirs(opt_root2, exist_ok=True) - for name in sorted(list(os.listdir(inp_root))): - inp_path = "%s/%s" % (inp_root, name) - if "spec" in inp_path: - continue - opt_path1 = "%s/%s" % (opt_root1, name) - opt_path2 = "%s/%s" % (opt_root2, name) - paths.append([inp_path, opt_path1, opt_path2]) - - ps = [] - print("Using f0 method: " + f0method) - for i in range(n_p): - p = multiprocessing.Process( - target=featureInput.go, - args=(paths[i::n_p], f0method, extraction_crepe_hop_length, i), - ) - ps.append(p) - p.start() - for i in range(n_p): - ps[i].join() diff --git a/extract_feature_print.py b/extract_feature_print.py deleted file mode 100644 index 3aba191e8..000000000 --- a/extract_feature_print.py +++ /dev/null @@ -1,123 +0,0 @@ -import os, sys, traceback - -os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" -os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0" - -# device=sys.argv[1] -n_part = int(sys.argv[2]) -i_part = int(sys.argv[3]) -if len(sys.argv) == 6: - exp_dir = sys.argv[4] - version = sys.argv[5] -else: - i_gpu = sys.argv[4] - exp_dir = sys.argv[5] - os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu) - version = sys.argv[6] -import torch -import torch.nn.functional as F -import soundfile as sf -import numpy as np -from fairseq import checkpoint_utils - -device = "cpu" -if torch.cuda.is_available(): - device = "cuda" -elif torch.backends.mps.is_available(): - device = "mps" - -f = open("%s/extract_f0_feature.log" % exp_dir, "a+") - - -def printt(strr): - print(strr) - f.write("%s\n" % strr) - f.flush() - - -printt(sys.argv) -model_path = "hubert_base.pt" - -printt(exp_dir) -wavPath = "%s/1_16k_wavs" % exp_dir -outPath = ( - "%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir -) -os.makedirs(outPath, exist_ok=True) - - -# wave must be 16k, hop_size=320 -def readwave(wav_path, normalize=False): - wav, sr = sf.read(wav_path) - assert sr == 16000 - feats = torch.from_numpy(wav).float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - if normalize: - with torch.no_grad(): - feats = F.layer_norm(feats, feats.shape) - feats = feats.view(1, -1) - return feats - - -# HuBERT model -printt("load model(s) from {}".format(model_path)) -# if hubert model is exist -if os.access(model_path, os.F_OK) == False: - printt( - "Error: Extracting is shut down because %s does not exist, you may download it from https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main" - % model_path - ) - exit(0) -models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", -) -model = models[0] -model = model.to(device) -printt("move model to %s" % device) -if device not in ["mps", "cpu"]: - model = model.half() -model.eval() - -todo = sorted(list(os.listdir(wavPath)))[i_part::n_part] -n = max(1, len(todo) // 10) # 最多打印十条 -if len(todo) == 0: - printt("no-feature-todo") -else: - printt("all-feature-%s" % len(todo)) - for idx, file in enumerate(todo): - try: - if file.endswith(".wav"): - wav_path = "%s/%s" % (wavPath, file) - out_path = "%s/%s" % (outPath, file.replace("wav", "npy")) - - if os.path.exists(out_path): - continue - - feats = readwave(wav_path, normalize=saved_cfg.task.normalize) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.half().to(device) - if device not in ["mps", "cpu"] - else feats.to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9 if version == "v1" else 12, # layer 9 - } - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = ( - model.final_proj(logits[0]) if version == "v1" else logits[0] - ) - - feats = feats.squeeze(0).float().cpu().numpy() - if np.isnan(feats).sum() == 0: - np.save(out_path, feats, allow_pickle=False) - else: - printt("%s-contains nan" % file) - if idx % n == 0: - printt("now-%s,all-%s,%s,%s" % (idx, len(todo), file, feats.shape)) - except: - printt(traceback.format_exc()) - printt("all-feature-done") diff --git a/extract_locale.py b/extract_locale.py deleted file mode 100644 index 0f0ff82b8..000000000 --- a/extract_locale.py +++ /dev/null @@ -1,34 +0,0 @@ -import json -import re - -# Define regular expression patterns -pattern = r"""i18n\([\s\n\t]*(["'][^"']+["'])[\s\n\t]*\)""" - -# Initialize the dictionary to store key-value pairs -data = {} - - -def process(fn: str): - global data - with open(fn, "r", encoding="utf-8") as f: - contents = f.read() - matches = re.findall(pattern, contents) - for key in matches: - key = eval(key) - print("extract:", key) - data[key] = key - - -print("processing infer-web.py") -process("infer-web.py") - -print("processing gui_v0.py") -process("gui_v0.py") - -print("processing gui_v1.py") -process("gui_v1.py") - -# Save as a JSON file -with open("./i18n/zh_CN.json", "w", encoding="utf-8") as f: - json.dump(data, f, ensure_ascii=False, indent=4) - f.write("\n") diff --git a/formantshiftcfg/Put your formantshift presets here as a txt file b/formantshiftcfg/Put your formantshift presets here as a txt file deleted file mode 100644 index e69de29bb..000000000 diff --git a/formantshiftcfg/f2m.txt b/formantshiftcfg/f2m.txt deleted file mode 100644 index 40356a80c..000000000 --- a/formantshiftcfg/f2m.txt +++ /dev/null @@ -1,2 +0,0 @@ -1.0 -0.8 \ No newline at end of file diff --git a/formantshiftcfg/m2f.txt b/formantshiftcfg/m2f.txt deleted file mode 100644 index fa69b52dc..000000000 --- a/formantshiftcfg/m2f.txt +++ /dev/null @@ -1,2 +0,0 @@ -1.0 -1.2 \ No newline at end of file diff --git a/formantshiftcfg/random.txt b/formantshiftcfg/random.txt deleted file mode 100644 index 427be5c80..000000000 --- a/formantshiftcfg/random.txt +++ /dev/null @@ -1,2 +0,0 @@ -32.0 -9.8 \ No newline at end of file diff --git a/go-realtime-gui-v0.bat b/go-realtime-gui-v0.bat deleted file mode 100644 index ed23f3f2f..000000000 --- a/go-realtime-gui-v0.bat +++ /dev/null @@ -1,2 +0,0 @@ -runtime\python.exe gui_v0.py -pause diff --git a/go-realtime-gui-v1.bat b/go-realtime-gui-v1.bat deleted file mode 100644 index 21f0edc35..000000000 --- a/go-realtime-gui-v1.bat +++ /dev/null @@ -1,2 +0,0 @@ -runtime\python.exe gui_v1.py -pause diff --git a/go-web.bat b/go-web.bat deleted file mode 100644 index ff419757d..000000000 --- a/go-web.bat +++ /dev/null @@ -1,32 +0,0 @@ -@ECHO OFF -SETLOCAL - -:: Set the Python command. -SET PYCMD="runtime\python.exe" - -:: Set the port number. -SET PORT="7897" - -:: Set the theme of Gradio. -:: You can get more themes at https://huggingface.co/spaces/gradio/theme-gallery -:: For example if you want this one: https://huggingface.co/spaces/bethecloud/storj_theme -:: You will have to look at a line that starts like "To use this theme, set" -:: On the same line look for [" theme='[AUTHOR]/[THEME]' "]. e.g. [" theme='bethecloud/storj_theme' "] -:: Copy just the part in apostrophes: ''. e.g. bethecloud/storj_theme -:: Now modify the line below and paste that part with replacement in quotation mark. e.g. "bethecloud/storj_theme" -:: In the end you should have `SET THEME="bethecloud/storj_theme"` -SET THEME="gradio/soft" - -:: Echo the current settings. -ECHO Current Settings: -ECHO. -ECHO Python command: %PYCMD% -ECHO Port number: %PORT% -ECHO Theme: %THEME% -ECHO. - -:: Execute the Python script with the current settings. -%PYCMD% infer-web.py --pycmd %PYCMD% --port %PORT% --theme %THEME% - -:: Pause the script at the end. -pause \ No newline at end of file diff --git a/go-web.ps1 b/go-web.ps1 deleted file mode 100644 index 9a407d5fa..000000000 --- a/go-web.ps1 +++ /dev/null @@ -1,29 +0,0 @@ -# Set the theme of Gradio. -# You can get more themes at https://huggingface.co/spaces/gradio/theme-gallery -# For example if you want this one: https://huggingface.co/spaces/bethecloud/storj_theme -# You will have to look at a line that starts like "To use this theme, set" -# On the same line look for [" theme='[AUTHOR]/[THEME]' "]. e.g. [" theme='bethecloud/storj_theme' "] -# Copy just the part in apostrophes: ''. e.g. bethecloud/storj_theme -# Now modify the line below and paste that part with replacement in quotation mark. e.g. "bethecloud/storj_theme" -# In the end you should have THEME = "bethecloud/storj_theme" -$props = @{ - PYCMD = "runtime\python.exe" - PORT = "7897" - THEME = "gradio/soft" # Modify accordigly to change Gradio theme -} - -Write-Host "Current Settings:`n" -ForegroundColor Magenta -# Display the current settings -$props.GetEnumerator() | ForEach-Object { - Write-Host ("{0}:" -f $_.Name) -NoNewline -ForegroundColor Green - Write-Host (" {0}" -f $_.Value) -ForegroundColor Cyan -} - -Write-Host "" - -# Run Python script using properties as arguments -& $props.PYCMD infer-web.py --pycmd $props.PYCMD --port $props.PORT --theme $props.THEME - -# Pause the script at the end -Write-Host "Press any key to continue ..." -$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") \ No newline at end of file diff --git a/gui_v0.py b/gui_v0.py deleted file mode 100644 index 0c318446f..000000000 --- a/gui_v0.py +++ /dev/null @@ -1,787 +0,0 @@ -import os, sys, traceback, re - -import json - -now_dir = os.getcwd() -sys.path.append(now_dir) -from config import Config - -Config = Config() -import PySimpleGUI as sg -import sounddevice as sd -import noisereduce as nr -import numpy as np -from fairseq import checkpoint_utils -import librosa, torch, pyworld, faiss, time, threading -import torch.nn.functional as F -import torchaudio.transforms as tat -import scipy.signal as signal -import torchcrepe - -# import matplotlib.pyplot as plt -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from i18n import I18nAuto - -i18n = I18nAuto() -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -current_dir = os.getcwd() - - -class RVC: - def __init__( - self, key, f0_method, hubert_path, pth_path, index_path, npy_path, index_rate - ) -> None: - """ - 初始化 - """ - try: - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.f0_method = f0_method - self.sr = 16000 - self.window = 160 - - # Get Torch Device - if torch.cuda.is_available(): - self.torch_device = torch.device( - f"cuda:{0 % torch.cuda.device_count()}" - ) - elif torch.backends.mps.is_available(): - self.torch_device = torch.device("mps") - else: - self.torch_device = torch.device("cpu") - - if index_rate != 0: - self.index = faiss.read_index(index_path) - # self.big_npy = np.load(npy_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - model_path = hubert_path - print("load model(s) from {}".format(model_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", - ) - self.model = models[0] - self.model = self.model.to(device) - if Config.is_half: - self.model = self.model.half() - else: - self.model = self.model.float() - self.model.eval() - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if Config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - except: - print(traceback.format_exc()) - - def get_regular_crepe_computation(self, x, f0_min, f0_max, model="full"): - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.torch_device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - def get_harvest_computation(self, x, f0_min, f0_max): - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - return f0 - - def get_f0(self, x, f0_up_key, inp_f0=None): - # Calculate Padding and f0 details here - p_len = x.shape[0] // 512 # For Now This probs doesn't work - x_pad = 1 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = 0 - # Here, check f0_methods and get their computations - if self.f0_method == "harvest": - f0 = self.get_harvest_computation(x, f0_min, f0_max) - elif self.f0_method == "reg-crepe": - f0 = self.get_regular_crepe_computation(x, f0_min, f0_max) - elif self.f0_method == "reg-crepe-tiny": - f0 = self.get_regular_crepe_computation(x, f0_min, f0_max, "tiny") - - # Calculate f0_course and f0_bak here - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def infer(self, feats: torch.Tensor) -> np.ndarray: - """ - 推理函数 - """ - audio = feats.clone().cpu().numpy() - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - if Config.is_half: - feats = feats.half() - else: - feats = feats.float() - inputs = { - "source": feats.to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9 if self.version == "v1" else 12, - } - torch.cuda.synchronize() - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - - ####索引优化 - try: - if ( - hasattr(self, "index") - and hasattr(self, "big_npy") - and self.index_rate != 0 - ): - npy = feats[0].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if Config.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate - + (1 - self.index_rate) * feats - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - torch.cuda.synchronize() - print(feats.shape) - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(audio, self.f0_up_key) - p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存 - else: - pitch, pitchf = None, None - p_len = min(feats.shape[1], 13000) # 太大了爆显存 - torch.cuda.synchronize() - # print(feats.shape,pitch.shape) - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.LongTensor(pitch).unsqueeze(0).to(device) - pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device) - p_len = torch.LongTensor([p_len]).to(device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float() - ) - torch.cuda.synchronize() - return infered_audio - - -class GUIConfig: - def __init__(self) -> None: - self.hubert_path: str = "" - self.pth_path: str = "" - self.index_path: str = "" - self.npy_path: str = "" - self.f0_method: str = "" - self.pitch: int = 12 - self.samplerate: int = 44100 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -30 - self.crossfade_time: float = 0.08 - self.extra_time: float = 0.04 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.index_rate = 0.3 - - -class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) = self.get_devices() - try: - with open("values1.json", "r") as j: - data = json.load(j) - except: - # Injecting f0_method into the json data - with open("values1.json", "w") as j: - data = { - "pth_path": "", - "index_path": "", - "sg_input_device": input_devices[ - input_devices_indices.index(sd.default.device[0]) - ], - "sg_output_device": output_devices[ - output_devices_indices.index(sd.default.device[1]) - ], - "threhold": "-45", - "pitch": "0", - "index_rate": "0", - "block_time": "1", - "crossfade_length": "0.04", - "extra_time": "1", - } - return data - - def launcher(self): - data = self.load() - sg.theme("DarkTeal12") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title="Proudly forked by Mangio621", - layout=[[sg.Image("./mangio_utils/lol.png")]], - ), - sg.Frame( - title=i18n("加载模型"), - layout=[ - [ - sg.Input( - default_text="hubert_base.pt", - key="hubert_path", - disabled=True, - ), - sg.FileBrowse( - i18n("Hubert模型"), - initial_folder=os.path.join(os.getcwd()), - file_types=(("pt files", "*.pt"),), - ), - ], - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("选择.pth文件"), - initial_folder=os.path.join(os.getcwd(), "weights"), - file_types=(("weight files", "*.pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("选择.index文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("index files", "*.index"),), - ), - ], - [ - sg.Input( - default_text="你不需要填写这个You don't need write this.", - key="npy_path", - disabled=True, - ), - sg.FileBrowse( - i18n("选择.npy文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("feature files", "*.npy"),), - ), - ], - ], - ), - ], - [ - # Mangio f0 Selection frame Here - sg.Frame( - layout=[ - [ - sg.Radio( - "Harvest", "f0_method", key="harvest", default=True - ), - sg.Radio("Crepe", "f0_method", key="reg-crepe"), - sg.Radio("Crepe Tiny", "f0_method", key="reg-crepe-tiny"), - ] - ], - title="Select an f0 Method", - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("输入设备")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("输出设备")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - ], - title=i18n("音频设备(请使用同种类驱动)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("响应阈值")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", ""), - ), - ], - [ - sg.Text(i18n("音调设置")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", ""), - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", ""), - ), - ], - ], - title=i18n("常规设置"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("采样长度")), - sg.Slider( - range=(0.1, 3.0), - key="block_time", - resolution=0.1, - orientation="h", - default_value=data.get("block_time", ""), - ), - ], - [ - sg.Text(i18n("淡入淡出长度")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", ""), - ), - ], - [ - sg.Text(i18n("额外推理时长")), - sg.Slider( - range=(0.05, 3.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", ""), - ), - ], - [ - sg.Checkbox(i18n("输入降噪"), key="I_noise_reduce"), - sg.Checkbox(i18n("输出降噪"), key="O_noise_reduce"), - ], - ], - title=i18n("性能设置"), - ), - ], - [ - sg.Button(i18n("开始音频转换"), key="start_vc"), - sg.Button(i18n("停止音频转换"), key="stop_vc"), - sg.Text(i18n("推理时间(ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - print("using_cuda:" + str(torch.cuda.is_available())) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "f0_method": self.get_f0_method_from_radios(values), - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - } - with open("values1.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - # Function that returns the used f0 method in string format "harvest" - def get_f0_method_from_radios(self, values): - f0_array = [ - {"name": "harvest", "val": values["harvest"]}, - {"name": "reg-crepe", "val": values["reg-crepe"]}, - {"name": "reg-crepe-tiny", "val": values["reg-crepe-tiny"]}, - ] - # Filter through to find a true value - used_f0 = "" - for f0 in f0_array: - if f0["val"] == True: - used_f0 = f0["name"] - break - if used_f0 == "": - used_f0 = "harvest" # Default Harvest if used_f0 is empty somehow - return used_f0 - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("请选择pth文件")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("请选择index文件")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["hubert_path"]): - sg.popup(i18n("hubert模型路径不可包含中文")) - return False - if pattern.findall(values["pth_path"]): - sg.popup(i18n("pth文件路径不可包含中文")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("index文件路径不可包含中文")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt") - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.npy_path = values["npy_path"] - self.config.f0_method = self.get_f0_method_from_radios(values) - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.index_rate = values["index_rate"] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.block_frame = int(self.config.block_time * self.config.samplerate) - self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate) - self.sola_search_frame = int(0.012 * self.config.samplerate) - self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s - self.extra_frame = int(self.config.extra_time * self.config.samplerate) - self.rvc = None - self.rvc = RVC( - self.config.pitch, - self.config.f0_method, - self.config.hubert_path, - self.config.pth_path, - self.config.index_path, - self.config.npy_path, - self.config.index_rate, - ) - self.input_wav: np.ndarray = np.zeros( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame, - dtype="float32", - ) - self.output_wav: torch.Tensor = torch.zeros( - self.block_frame, device=device, dtype=torch.float32 - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_in_window: torch.Tensor = torch.linspace( - 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler1 = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ) - self.resampler2 = tat.Resample( - orig_freq=self.rvc.tgt_sr, - new_freq=self.config.samplerate, - dtype=torch.float32, - ) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - with sd.Stream( - channels=2, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - print("Audio block passed.") - print("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.I_noise_reduce: - indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate) - - """noise gate""" - frame_length = 2048 - hop_length = 1024 - rms = librosa.feature.rms( - y=indata, frame_length=frame_length, hop_length=hop_length - ) - db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - # print(rms.shape,db.shape,db) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * hop_length : (i + 1) * hop_length] = 0 - self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata) - - # infer - print("input_wav:" + str(self.input_wav.shape)) - # print('infered_wav:'+str(infer_wav.shape)) - infer_wav: torch.Tensor = self.resampler2( - self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav))) - )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to( - device - ) - print("infer_wav:" + str(infer_wav.shape)) - - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - cor_nom = F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame], - self.sola_buffer[None, None, :], - ) - cor_den = torch.sqrt( - F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame] - ** 2, - torch.ones(1, 1, self.crossfade_frame, device=device), - ) - + 1e-8 - ) - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - print("sola offset: " + str(int(sola_offset))) - - # crossfade - self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame] - self.output_wav[: self.crossfade_frame] *= self.fade_in_window - self.output_wav[: self.crossfade_frame] += self.sola_buffer[:] - if sola_offset < self.sola_search_frame: - self.sola_buffer[:] = ( - infer_wav[ - -self.sola_search_frame - - self.crossfade_frame - + sola_offset : -self.sola_search_frame - + sola_offset - ] - * self.fade_out_window - ) - else: - self.sola_buffer[:] = ( - infer_wav[-self.crossfade_frame :] * self.fade_out_window - ) - - if self.config.O_noise_reduce: - outdata[:] = np.tile( - nr.reduce_noise( - y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate - ), - (2, 1), - ).T - else: - outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - print("infer time:" + str(total_time)) - print("f0_method: " + str(self.config.f0_method)) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[input_devices.index(input_device)] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - print("input device:" + str(sd.default.device[0]) + ":" + str(input_device)) - print("output device:" + str(sd.default.device[1]) + ":" + str(output_device)) - - -gui = GUI() diff --git a/gui_v1.py b/gui_v1.py deleted file mode 100644 index 07ff3c9b8..000000000 --- a/gui_v1.py +++ /dev/null @@ -1,637 +0,0 @@ -import os, sys - -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -now_dir = os.getcwd() -sys.path.append(now_dir) -import multiprocessing - - -class Harvest(multiprocessing.Process): - def __init__(self, inp_q, opt_q): - multiprocessing.Process.__init__(self) - self.inp_q = inp_q - self.opt_q = opt_q - - def run(self): - import numpy as np, pyworld - - while 1: - idx, x, res_f0, n_cpu, ts = self.inp_q.get() - f0, t = pyworld.harvest( - x.astype(np.double), - fs=16000, - f0_ceil=1100, - f0_floor=50, - frame_period=10, - ) - res_f0[idx] = f0 - if len(res_f0.keys()) >= n_cpu: - self.opt_q.put(ts) - - -if __name__ == "__main__": - from multiprocessing import Queue - from queue import Empty - import numpy as np - import multiprocessing - import traceback, re - import json - import PySimpleGUI as sg - import sounddevice as sd - import noisereduce as nr - from multiprocessing import cpu_count - import librosa, torch, time, threading - import torch.nn.functional as F - import torchaudio.transforms as tat - from i18n import I18nAuto - - i18n = I18nAuto() - device = torch.device( - "cuda" - if torch.cuda.is_available() - else ("mps" if torch.backends.mps.is_available() else "cpu") - ) - current_dir = os.getcwd() - inp_q = Queue() - opt_q = Queue() - n_cpu = min(cpu_count(), 8) - for _ in range(n_cpu): - Harvest(inp_q, opt_q).start() - from rvc_for_realtime import RVC - - class GUIConfig: - def __init__(self) -> None: - self.pth_path: str = "" - self.index_path: str = "" - self.pitch: int = 12 - self.samplerate: int = 40000 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -30 - self.crossfade_time: float = 0.08 - self.extra_time: float = 0.04 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.index_rate = 0.3 - self.n_cpu = min(n_cpu, 8) - self.f0method = "harvest" - - class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - input_devices, output_devices, _, _ = self.get_devices() - try: - with open("values1.json", "r") as j: - data = json.load(j) - data["pm"] = data["f0method"] == "pm" - data["harvest"] = data["f0method"] == "harvest" - data["crepe"] = data["f0method"] == "crepe" - data["rmvpe"] = data["f0method"] == "rmvpe" - except: - with open("values1.json", "w") as j: - data = { - "pth_path": " ", - "index_path": " ", - "sg_input_device": input_devices[sd.default.device[0]], - "sg_output_device": output_devices[sd.default.device[1]], - "threhold": "-45", - "pitch": "0", - "index_rate": "0", - "block_time": "1", - "crossfade_length": "0.04", - "extra_time": "1", - "f0method": "rmvpe", - } - return data - - def launcher(self): - data = self.load() - sg.theme("LightBlue3") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title=i18n("加载模型"), - layout=[ - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("选择.pth文件"), - initial_folder=os.path.join(os.getcwd(), "weights"), - file_types=((". pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("选择.index文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=((". index"),), - ), - ], - ], - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("输入设备")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("输出设备")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - ], - title=i18n("音频设备(请使用同种类驱动)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("响应阈值")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", ""), - ), - ], - [ - sg.Text(i18n("音调设置")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", ""), - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", ""), - ), - ], - [ - sg.Text(i18n("音高算法")), - sg.Radio( - "pm", - "f0method", - key="pm", - default=data.get("pm", "") == True, - ), - sg.Radio( - "harvest", - "f0method", - key="harvest", - default=data.get("harvest", "") == True, - ), - sg.Radio( - "crepe", - "f0method", - key="crepe", - default=data.get("crepe", "") == True, - ), - sg.Radio( - "rmvpe", - "f0method", - key="rmvpe", - default=data.get("rmvpe", "") == True, - ), - ], - ], - title=i18n("常规设置"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("采样长度")), - sg.Slider( - range=(0.12, 2.4), - key="block_time", - resolution=0.03, - orientation="h", - default_value=data.get("block_time", ""), - ), - ], - [ - sg.Text(i18n("harvest进程数")), - sg.Slider( - range=(1, n_cpu), - key="n_cpu", - resolution=1, - orientation="h", - default_value=data.get( - "n_cpu", min(self.config.n_cpu, n_cpu) - ), - ), - ], - [ - sg.Text(i18n("淡入淡出长度")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", ""), - ), - ], - [ - sg.Text(i18n("额外推理时长")), - sg.Slider( - range=(0.05, 3.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", ""), - ), - ], - [ - sg.Checkbox(i18n("输入降噪"), key="I_noise_reduce"), - sg.Checkbox(i18n("输出降噪"), key="O_noise_reduce"), - ], - ], - title=i18n("性能设置"), - ), - ], - [ - sg.Button(i18n("开始音频转换"), key="start_vc"), - sg.Button(i18n("停止音频转换"), key="stop_vc"), - sg.Text(i18n("推理时间(ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - print("using_cuda:" + str(torch.cuda.is_available())) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - "n_cpu": values["n_cpu"], - "f0method": ["pm", "harvest", "crepe", "rmvpe"][ - [ - values["pm"], - values["harvest"], - values["crepe"], - values["rmvpe"], - ].index(True) - ], - } - with open("values1.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("请选择pth文件")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("请选择index文件")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["pth_path"]): - sg.popup(i18n("pth文件路径不可包含中文")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("index文件路径不可包含中文")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.index_rate = values["index_rate"] - self.config.n_cpu = values["n_cpu"] - self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][ - [ - values["pm"], - values["harvest"], - values["crepe"], - values["rmvpe"], - ].index(True) - ] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.rvc = RVC( - self.config.pitch, - self.config.pth_path, - self.config.index_path, - self.config.index_rate, - self.config.n_cpu, - inp_q, - opt_q, - device, - ) - self.config.samplerate = self.rvc.tgt_sr - self.config.crossfade_time = min( - self.config.crossfade_time, self.config.block_time - ) - self.block_frame = int(self.config.block_time * self.config.samplerate) - self.crossfade_frame = int( - self.config.crossfade_time * self.config.samplerate - ) - self.sola_search_frame = int(0.01 * self.config.samplerate) - self.extra_frame = int(self.config.extra_time * self.config.samplerate) - self.zc = self.rvc.tgt_sr // 100 - self.input_wav: np.ndarray = np.zeros( - int( - np.ceil( - ( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame - ) - / self.zc - ) - * self.zc - ), - dtype="float32", - ) - self.output_wav_cache: torch.Tensor = torch.zeros( - int( - np.ceil( - ( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame - ) - / self.zc - ) - * self.zc - ), - device=device, - dtype=torch.float32, - ) - self.pitch: np.ndarray = np.zeros( - self.input_wav.shape[0] // self.zc, - dtype="int32", - ) - self.pitchf: np.ndarray = np.zeros( - self.input_wav.shape[0] // self.zc, - dtype="float64", - ) - self.output_wav: torch.Tensor = torch.zeros( - self.block_frame, device=device, dtype=torch.float32 - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_in_window: torch.Tensor = torch.linspace( - 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ).to(device) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - channels = 1 if sys.platform == "darwin" else 2 - with sd.Stream( - channels=channels, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - print("Audio block passed.") - print("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.I_noise_reduce: - indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate) - """noise gate""" - frame_length = 2048 - hop_length = 1024 - rms = librosa.feature.rms( - y=indata, frame_length=frame_length, hop_length=hop_length - ) - if self.config.threhold > -60: - db_threhold = ( - librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - ) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * hop_length : (i + 1) * hop_length] = 0 - self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata) - # infer - inp = torch.from_numpy(self.input_wav).to(device) - ##0 - res1 = self.resampler(inp) - ###55% - rate1 = self.block_frame / ( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame - ) - rate2 = ( - self.crossfade_frame + self.sola_search_frame + self.block_frame - ) / ( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame - ) - res2 = self.rvc.infer( - res1, - res1[-self.block_frame :].cpu().numpy(), - rate1, - rate2, - self.pitch, - self.pitchf, - self.config.f0method, - ) - self.output_wav_cache[-res2.shape[0] :] = res2 - infer_wav = self.output_wav_cache[ - -self.crossfade_frame - self.sola_search_frame - self.block_frame : - ] - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - cor_nom = F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame], - self.sola_buffer[None, None, :], - ) - cor_den = torch.sqrt( - F.conv1d( - infer_wav[ - None, None, : self.crossfade_frame + self.sola_search_frame - ] - ** 2, - torch.ones(1, 1, self.crossfade_frame, device=device), - ) - + 1e-8 - ) - if sys.platform == "darwin": - cor_nom = cor_nom.cpu() - cor_den = cor_den.cpu() - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - print("sola offset: " + str(int(sola_offset))) - self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame] - self.output_wav[: self.crossfade_frame] *= self.fade_in_window - self.output_wav[: self.crossfade_frame] += self.sola_buffer[:] - # crossfade - if sola_offset < self.sola_search_frame: - self.sola_buffer[:] = ( - infer_wav[ - -self.sola_search_frame - - self.crossfade_frame - + sola_offset : -self.sola_search_frame - + sola_offset - ] - * self.fade_out_window - ) - else: - self.sola_buffer[:] = ( - infer_wav[-self.crossfade_frame :] * self.fade_out_window - ) - if self.config.O_noise_reduce: - if sys.platform == "darwin": - noise_reduced_signal = nr.reduce_noise( - y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate - ) - outdata[:] = noise_reduced_signal[:, np.newaxis] - else: - outdata[:] = np.tile( - nr.reduce_noise( - y=self.output_wav[:].cpu().numpy(), - sr=self.config.samplerate, - ), - (2, 1), - ).T - else: - if sys.platform == "darwin": - outdata[:] = self.output_wav[:].cpu().numpy()[:, np.newaxis] - else: - outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - print("infer time:" + str(total_time)) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[ - input_devices.index(input_device) - ] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - print("input device:" + str(sd.default.device[0]) + ":" + str(input_device)) - print( - "output device:" + str(sd.default.device[1]) + ":" + str(output_device) - ) - - gui = GUI() diff --git a/guidml.py b/guidml.py deleted file mode 100644 index aadf22d69..000000000 --- a/guidml.py +++ /dev/null @@ -1,710 +0,0 @@ -""" -0416后的更新: - 引入config中half - 重建npy而不用填写 - v2支持 - 无f0模型支持 - 修复 - - int16: - 增加无索引支持 - f0算法改harvest(怎么看就只有这个会影响CPU占用),但是不这么改效果不好 -""" -import os, sys, traceback, re - -import json - -now_dir = os.getcwd() -sys.path.append(now_dir) -from config import Config - -Config = Config() - -import torch_directml -import PySimpleGUI as sg -import sounddevice as sd -import noisereduce as nr -import numpy as np -from fairseq import checkpoint_utils -import librosa, torch, pyworld, faiss, time, threading -import torch.nn.functional as F -import torchaudio.transforms as tat -import scipy.signal as signal - - -# import matplotlib.pyplot as plt -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from i18n import I18nAuto - -i18n = I18nAuto() -device = torch_directml.device(torch_directml.default_device()) -current_dir = os.getcwd() - - -class RVC: - def __init__( - self, key, hubert_path, pth_path, index_path, npy_path, index_rate - ) -> None: - """ - 初始化 - """ - try: - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.sr = 16000 - self.window = 160 - if index_rate != 0: - self.index = faiss.read_index(index_path) - # self.big_npy = np.load(npy_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - model_path = hubert_path - print("load model(s) from {}".format(model_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", - ) - self.model = models[0] - self.model = self.model.to(device) - if Config.is_half: - self.model = self.model.half() - else: - self.model = self.model.float() - self.model.eval() - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=Config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if Config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - except: - print(traceback.format_exc()) - - def get_f0(self, x, f0_up_key, inp_f0=None): - x_pad = 1 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def infer(self, feats: torch.Tensor) -> np.ndarray: - """ - 推理函数 - """ - audio = feats.clone().cpu().numpy() - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - if Config.is_half: - feats = feats.half() - else: - feats = feats.float() - inputs = { - "source": feats.to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9 if self.version == "v1" else 12, - } - torch.cuda.synchronize() - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - - ####索引优化 - try: - if ( - hasattr(self, "index") - and hasattr(self, "big_npy") - and self.index_rate != 0 - ): - npy = feats[0].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if Config.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate - + (1 - self.index_rate) * feats - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - torch.cuda.synchronize() - print(feats.shape) - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(audio, self.f0_up_key) - p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存 - else: - pitch, pitchf = None, None - p_len = min(feats.shape[1], 13000) # 太大了爆显存 - torch.cuda.synchronize() - # print(feats.shape,pitch.shape) - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.LongTensor(pitch).unsqueeze(0).to(device) - pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device) - p_len = torch.LongTensor([p_len]).to(device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float() - ) - torch.cuda.synchronize() - return infered_audio - - -class GUIConfig: - def __init__(self) -> None: - self.hubert_path: str = "" - self.pth_path: str = "" - self.index_path: str = "" - self.npy_path: str = "" - self.pitch: int = 12 - self.samplerate: int = 44100 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -30 - self.crossfade_time: float = 0.08 - self.extra_time: float = 0.04 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.index_rate = 0.3 - - -class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) = self.get_devices() - try: - with open("values1.json", "r") as j: - data = json.load(j) - except: - with open("values1.json", "w") as j: - data = { - "pth_path": "", - "index_path": "", - "sg_input_device": input_devices[ - input_devices_indices.index(sd.default.device[0]) - ], - "sg_output_device": output_devices[ - output_devices_indices.index(sd.default.device[1]) - ], - "threhold": "-45", - "pitch": "0", - "index_rate": "0", - "block_time": "1", - "crossfade_length": "0.04", - "extra_time": "1", - } - return data - - def launcher(self): - data = self.load() - sg.theme("LightBlue3") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title=i18n("加载模型"), - layout=[ - [ - sg.Input( - default_text="hubert_base.pt", - key="hubert_path", - disabled=True, - ), - sg.FileBrowse( - i18n("Hubert模型"), - initial_folder=os.path.join(os.getcwd()), - file_types=(("pt files", "*.pt"),), - ), - ], - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("选择.pth文件"), - initial_folder=os.path.join(os.getcwd(), "weights"), - file_types=(("weight files", "*.pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("选择.index文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("index files", "*.index"),), - ), - ], - [ - sg.Input( - default_text="你不需要填写这个You don't need write this.", - key="npy_path", - disabled=True, - ), - sg.FileBrowse( - i18n("选择.npy文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=(("feature files", "*.npy"),), - ), - ], - ], - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("输入设备")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("输出设备")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - ], - title=i18n("音频设备(请使用同种类驱动)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("响应阈值")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", ""), - ), - ], - [ - sg.Text(i18n("音调设置")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", ""), - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", ""), - ), - ], - ], - title=i18n("常规设置"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("采样长度")), - sg.Slider( - range=(0.1, 3.0), - key="block_time", - resolution=0.1, - orientation="h", - default_value=data.get("block_time", ""), - ), - ], - [ - sg.Text(i18n("淡入淡出长度")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", ""), - ), - ], - [ - sg.Text(i18n("额外推理时长")), - sg.Slider( - range=(0.05, 3.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", ""), - ), - ], - [ - sg.Checkbox(i18n("输入降噪"), key="I_noise_reduce"), - sg.Checkbox(i18n("输出降噪"), key="O_noise_reduce"), - ], - ], - title=i18n("性能设置"), - ), - ], - [ - sg.Button(i18n("开始音频转换"), key="start_vc"), - sg.Button(i18n("停止音频转换"), key="stop_vc"), - sg.Text(i18n("推理时间(ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - print("using_cuda:" + str(torch.cuda.is_available())) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - } - with open("values1.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("请选择pth文件")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("请选择index文件")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["hubert_path"]): - sg.popup(i18n("hubert模型路径不可包含中文")) - return False - if pattern.findall(values["pth_path"]): - sg.popup(i18n("pth文件路径不可包含中文")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("index文件路径不可包含中文")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt") - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.npy_path = values["npy_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.index_rate = values["index_rate"] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.block_frame = int(self.config.block_time * self.config.samplerate) - self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate) - self.sola_search_frame = int(0.012 * self.config.samplerate) - self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s - self.extra_frame = int(self.config.extra_time * self.config.samplerate) - self.rvc = None - self.rvc = RVC( - self.config.pitch, - self.config.hubert_path, - self.config.pth_path, - self.config.index_path, - self.config.npy_path, - self.config.index_rate, - ) - self.input_wav: np.ndarray = np.zeros( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame, - dtype="float32", - ) - self.output_wav: torch.Tensor = torch.zeros( - self.block_frame, device=device, dtype=torch.float32 - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_in_window: torch.Tensor = torch.linspace( - 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler1 = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ) - self.resampler2 = tat.Resample( - orig_freq=self.rvc.tgt_sr, - new_freq=self.config.samplerate, - dtype=torch.float32, - ) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - with sd.Stream( - channels=2, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - print("Audio block passed.") - print("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.I_noise_reduce: - indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate) - - """noise gate""" - frame_length = 2048 - hop_length = 1024 - rms = librosa.feature.rms( - y=indata, frame_length=frame_length, hop_length=hop_length - ) - db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - # print(rms.shape,db.shape,db) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * hop_length : (i + 1) * hop_length] = 0 - self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata) - - # infer - print("input_wav:" + str(self.input_wav.shape)) - # print('infered_wav:'+str(infer_wav.shape)) - infer_wav: torch.Tensor = self.resampler2( - self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav))) - )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to( - device - ) - print("infer_wav:" + str(infer_wav.shape)) - - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - cor_nom = F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame], - self.sola_buffer[None, None, :], - ) - cor_den = torch.sqrt( - F.conv1d( - infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame] - ** 2, - torch.ones(1, 1, self.crossfade_frame, device=device), - ) - + 1e-8 - ) - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - print("sola offset: " + str(int(sola_offset))) - - # crossfade - self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame] - self.output_wav[: self.crossfade_frame] *= self.fade_in_window - self.output_wav[: self.crossfade_frame] += self.sola_buffer[:] - if sola_offset < self.sola_search_frame: - self.sola_buffer[:] = ( - infer_wav[ - -self.sola_search_frame - - self.crossfade_frame - + sola_offset : -self.sola_search_frame - + sola_offset - ] - * self.fade_out_window - ) - else: - self.sola_buffer[:] = ( - infer_wav[-self.crossfade_frame :] * self.fade_out_window - ) - - if self.config.O_noise_reduce: - outdata[:] = np.tile( - nr.reduce_noise( - y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate - ), - (2, 1), - ).T - else: - outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - print("infer time:" + str(total_time)) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[input_devices.index(input_device)] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - print("input device:" + str(sd.default.device[0]) + ":" + str(input_device)) - print("output device:" + str(sd.default.device[1]) + ":" + str(output_device)) - - -gui = GUI() diff --git a/i18n.py b/i18n.py deleted file mode 100644 index 4e3536bb5..000000000 --- a/i18n.py +++ /dev/null @@ -1,43 +0,0 @@ -import json - -def load_language_list(language): - try: - with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f: - return json.load(f) - except FileNotFoundError: - raise FileNotFoundError( - f"Failed to load language file for {language}. Check if the correct .json file exists." - ) - - -class I18nAuto: - """ - A class used for internationalization using JSON language files. - - Examples - -------- - >>> i18n = I18nAuto('en_US') - >>> i18n.print() - Using Language: en_US - """ - def __init__(self, language=None): - from locale import getdefaultlocale - language = language or getdefaultlocale()[0] - if not self._language_exists(language): - language = "en_US" - - self.language_map = load_language_list(language) - self.language = language - - @staticmethod - def _language_exists(language): - from os.path import exists - return exists(f"./i18n/{language}.json") - - def __call__(self, key): - """Returns the translation of the given key if it exists, else returns the key itself.""" - return self.language_map.get(key, key) - - def print(self): - """Prints the language currently in use.""" - print(f"Using Language: {self.language}") \ No newline at end of file diff --git a/i18n/en_US.json b/i18n/en_US.json deleted file mode 100644 index 21689e107..000000000 --- a/i18n/en_US.json +++ /dev/null @@ -1,227 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.", - "是": "Yes", - "选择你的数据集。": "Select your dataset.", - "更新清单。": "Update list.", - "下载模型": "Download Model", - "下载备份": "Download Backup", - "下载数据集": "Download Dataset", - "下载": "Download", - "网址": "Url:", - "保存前构建索引。": "Build the index before saving.", - "训练结束后保存您的模型。": "Save your model once the training ends.", - "存储类型": "Save type", - "保存模型": "Save model", - "选择模型保存方法": "Choose the method", - "保存所有": "Save all", - "保存 D 和 G": "Save D and G", - "数据集名称": "dataset_name", - "保存声音": "Save voice", - "下载文件:": "Downloading the file: ", - "停止培训": "Stop training", - "最近查看或下载此文件的用户过多": "Too many users have recently viewed or downloaded this file", - "无法从该私人链接获取文件": "Cannot get file from this private link", - "完整下载": "Full download", - "下载模型时发生错误。": "An error occurred downloading", - "正确存储模型": "model saved successfully", - "保存模型...": "Saving the model...", - "保存时未编制索引...": "Saved without index...", - "宓模型": "model_name", - "无模型保存(PTH)": "Saved without inference model...", - "保存模型时发生错误": "An error occurred saving the model", - "您要保存的模型不存在,请确保输入的名称正确。": "The model you want to save does not exist, be sure to enter the correct name.", - "无法下载模型。": "The file could not be downloaded.", - "解压缩出错。": "Unzip error.", - "added.index 文件的路径(如果它没有自动找到该文件)": "Path to your added.index file (if it didn't automatically find it)", - "模型下载成功。": "It has been downloaded successfully.", - "继续提取...": "Proceeding with the extraction...", - "备份已成功上传。": "The Backup has been uploaded successfully.", - "数据集加载成功。": "The Dataset has been loaded successfully.", - "模型已正确加载。": "The Model has been loaded successfully.", - "它用于下载您的推理模型。": "It is used to download your inference models.", - "它用于下载您的训练备份。": "It is used to download your training backups.", - "下载兼容格式(.wav/.flac)的音频数据集以训练模型。": "Download the dataset with the audios in a compatible format (.wav/.flac) to train your model.", - "未找到可上传的相关文件": "No relevant file was found to upload.", - "该模型可用于推理,并有 .index 文件。": "The model works for inference, and has the .index file.", - "该模型可用于推理,但没有 .index 文件。": "The model works for inference, but it doesn't have the .index file.", - "这可能需要几分钟时间,请稍候...": "This may take a few minutes, please wait...", - "资源": "Resources", - "step1:正在处理数据": "Step 1: Processing data", - "step2a:无需提取音高": "Step 2a: Skipping pitch extraction", - "step2b:正在提取特征": "Step 2b: Extracting features", - "step3a:正在训练模型": "Step 3a: Model training started", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.", - "全流程结束!": "All processes have been completed!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录使用需遵守的协议-LICENSE.txt.": "This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible.
If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory Agreement-LICENSE.txt for details.", - "模型推理": "Model Inference", - "推理音色": "Inferencing voice:", - "模型名称": "Model_Name", - "数据集名": "Dataset_Name", - "模型是否具有俯仰引导功能": "Whether the model has pitch guidance.", - "是否只保存最新的 .ckpt 文件以节省硬盘空间": "Whether to save only the latest .ckpt file to save hard drive space", - "将所有训练集缓存到 GPU 内存中。缓存小型数据集(少于 10 分钟)可以加快训练速度,但缓存大型数据集会消耗大量 GPU 内存,可能无法显著提高速度": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement", - "在每个保存点将一个小的最终模型保存到 权重 文件夹中": "Save a small final model to the 'weights' folder at each save point", - "刷新音色列表和索引路径": "Refresh voice list, index path and audio files", - "卸载音色省显存": "Unload voice to save GPU memory:", - "请选择说话人id": "Select Speaker/Singer ID:", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.", - "变调(整数, 半音数量, 升八度12降八度-12)": "Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):", - "输入待处理音频文件路径(默认是正确格式示例)": "Enter the path of the audio file to be processed (default is the correct format example):", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive):", - "crepe_hop_length": "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.", - "特征检索库文件路径": "Feature search database file path", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.", - "特征检索库文件路径,为空则使用下拉的选择结果": "Path to the feature index file. Leave blank to use the selected result from the dropdown:", - "自动检测index路径,下拉式选择(dropdown)": "Auto-detect index path and select from the dropdown", - "特征文件路径": "Path to feature file:", - "检索特征占比": "Search feature ratio:", - "后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:", - "转换": "Convert", - "输出信息": "Output information", - "输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').", - "指定输出文件夹": "Specify output folder:", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):", - "也可批量输入音频文件, 二选一, 优先读文件夹": "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.", - "导出文件格式": "Export file format", - "伴奏人声分离&去混响&去回声": "UVR5", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": " ", - "输入待处理音频文件夹路径": "Enter the path of the audio folder to be processed:", - "模型": "Model", - "指定输出主人声文件夹": "Specify the output folder for vocals:", - "指定输出非主人声文件夹": "Specify the output folder for accompaniment:", - "训练": "Train", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.", - "输入实验名": "Enter the experiment name:", - "目标采样率": "Target sample rate:", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Whether the model has pitch guidance (required for singing, optional for speech):", - "版本": "Version", - "提取音高和处理数据使用的CPU进程数": "Number of CPU processes used for pitch extraction and data processing:", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.", - "输入训练文件夹路径": "Enter the path of the training folder:", - "请指定说话人id": "Please specify the speaker/singer ID:", - "自动检测音频路径并从下拉菜单中选择:": "Auto detect audio path and select from the dropdown:", - "在要处理的音频文件路径中添加音频名称(默认为正确格式示例),从下拉列表中移除使用音频的路径:": "Add audio's name to the path to the audio file to be processed (default is the correct format example) Remove the path to use an audio from the dropdown list:", - "高级设置": "Advanced Settings", - "设置": "Settings", - "高级设置[批量]": "Advanced Settings [Batch]", - "地位": "Status", - "处理数据": "Process data", - "将音频拖到此处,然后点击刷新按钮": "Drag your audio here and hit the refresh button", - "或录制音频": "Or record an audio.", - "共振声移动推理音频": "Formant shift inference audio", - "用于将男性转换为女性,反之亦然": "Used for male to female and vice-versa conversions", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:", - "显卡信息": "GPU Information", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'dio': improved speech but slower extraction; 'harvest': better quality but slower extraction):", - "特征提取": "Feature extraction", - "step3: 填写训练设置, 开始训练模型和索引": "Step 3: Fill in the training settings and start training the model and index", - "步骤4:单击模型的导出最低点后,在模型图上的导出最低点,新文件将位于logs/[yourmodelname]/lowestvals/folder中": "Step 4: Export lowest points on a graph of the model After clicking on Export lowest points of a model, The new files will be located in logs/[yourmodelname]/lowestvals/ folder", - "保存频率save_every_epoch": "Save frequency (save_every_epoch):", - "总训练轮数total_epoch": "Total training epochs (total_epoch):", - "每张显卡的batch_size": "Batch size per GPU:", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:", - "否": "No", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:", - "加载预训练底模G路径": "Load pre-trained base model G path:", - "加载预训练底模D路径": "Load pre-trained base model D path:", - "训练模型": "Train model", - "训练特征索引": "Train feature index", - "一键训练": "One-click training", - "ckpt处理": "ckpt Processing", - "模型融合, 可用于测试音色融合": "Model fusion, can be used to test timbre fusion", - "A模型路径": "Path to Model A:", - "B模型路径": "Path to Model B:", - "A模型权重": "Weight (w) for Model A:", - "模型是否带音高指导": "Whether the model has pitch guidance:", - "要置入的模型信息": "Model information to be placed:", - "保存的模型名不带后缀": "Saved model name (without extension):", - "模型版本型号": "Model architecture version:", - "融合": "Fusion", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modify model information (only supported for small model files extracted from the 'weights' folder)", - "模型路径": "Path to Model:", - "要改的模型信息": "Model information to be modified:", - "保存的文件名, 默认空为和源文件同名": "Save file name (default: same as the source file):", - "修改": "Modify", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "View model information (only supported for small model files extracted from the 'weights' folder)", - "查看": "View", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:", - "保存名": "Save name:", - "模型是否带音高指导,1是0否": "Whether the model has pitch guidance (1: yes, 0: no):", - "提取": "Extract", - "Onnx导出": "Export Onnx", - "RVC模型路径": "RVC Model Path:", - "Onnx输出路径": "Onnx Export Path:", - "MoeVS模型": "MoeVS Model", - "导出Onnx模型": "Export Onnx Model", - "常见问题解答": "FAQ (Frequently Asked Questions)", - "招募音高曲线前端编辑器": "Recruiting front-end editors for pitch curves", - "加开发群联系我xxxxx": "Join the development group and contact me at xxxxx", - "点击查看交流、问题反馈群号": "Click to view the communication and problem feedback group number", - "xxxxx": "xxxxx", - "加载模型": "Load model", - "Hubert模型": "Hubert Model", - "选择.pth文件": "Select the .pth file", - "选择.index文件": "Select the .index file", - "选择.npy文件": "Select the .npy file", - "输入设备": "Input device", - "输出设备": "Output device", - "音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)", - "响应阈值": "Response threshold", - "音调设置": "Pitch settings", - "是否使用音符名称而不是它们的赫兹值。例如,使用[C5,D6]代替[523.25,1174.66]赫兹。": "Whether to use note names instead of their hertz value. E.G. [C5, D6] instead of [523.25, 1174.66]Hz", - "Index Rate": "Index Rate", - "常规设置": "General settings", - "采样长度": "Sample length", - "淡入淡出长度": "Fade length", - "额外推理时长": "Extra inference time", - "输入降噪": "Input noise reduction", - "输出降噪": "Output noise reduction", - "性能设置": "Performance settings", - "开始音频转换": "Start audio conversion", - "停止音频转换": "Stop audio conversion", - "推理时间(ms):": "Inference time (ms):", - "请选择pth文件": "Select the pth file", - "请选择index文件": "Select the index file", - "hubert模型路径不可包含中文": "The hubert model path must not contain Chinese characters", - "pth文件路径不可包含中文": "The pth file path must not contain Chinese characters.", - "index文件路径不可包含中文": "The index file path must not contain Chinese characters.", - "音高算法": "Step algorithm", - "harvest进程数": "Number of epoch processes", - "最低点导出": "Lowest points export", - "保存多少个最低点": "How many lowest points to save", - "导出模型的最低点": "Export lowest points of a model", - "输出型号": "Output models", - "所选模型的统计数据": "Stats of selected models", - "自定义 f0 [根音] 文件": "Custom f0 [Root pitch] File", - "音高最小值": "Min pitch", - "指定推断的最小音高 [HZ]": "Specify minimal pitch for inference [HZ]", - "为推断指定最小音高 [音符][八度]": "Specify minimal pitch for inference [NOTE][OCTAVE]", - "音高最大值": "Max pitch", - "指定推断的最大音高 [HZ]": "Specify max pitch for inference [HZ]", - "为推断指定最大音高 [音符][八度]": "Specify max pitch for inference [NOTE][OCTAVE]", - "浏览共振峰预设": "Browse presets for formanting", - "预设位于 formantshiftcfg/ 文件夹中": "Presets are located in formantshiftcfg/ folder", - "默认值为 1.0": "Default value is 1.0", - "用于共振峰变换的 Quefrency": "Quefrency for formant shifting", - "用于共振峰变换的音色": "Timbre for formant shifting", - "应用": "Apply", - "单个": "Single", - "批处理": "Batch", - "单独的 YouTube 曲目": "Separate YouTube tracks", - "下载 YouTube 视频的音频并自动分离声音和伴奏轨道": "Download audio from a YouTube video and automatically separate the vocal and instrumental tracks", - "额外": "Extra", - "将生成的音频与其他音频(伴奏)合并,还可以用它来创建视频": "Merge your generated audios with other audio (instrumental) [Beta]", - "选择您的伴奏": "Choose your instrumental", - "选择生成的音频": "Choose the generated audio", - "合并": "Combine", - "下载并分离": "Download and Separate", - "粘贴 YouTube 链接": "Enter the youtube link", - "此部分包含一些额外的实用工具,通常可能处于实验阶段": "This section contains some extra utilities that often may be in experimental phases", - "合并音频": "Merge Audios" -} \ No newline at end of file diff --git a/i18n/es_ES.json b/i18n/es_ES.json deleted file mode 100644 index 16e61f57f..000000000 --- a/i18n/es_ES.json +++ /dev/null @@ -1,226 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "Lamentablemente, no tiene una tarjeta gráfica adecuada para soportar su entrenamiento", - "是": "Sí", - "选择你的数据集。": "Selecciona tu conjunto de datos.", - "更新清单。": "Actualizar lista.", - "下载模型": "Descargar modelo", - "下载备份": "Descargar backup", - "下载数据集": "Descargar dataset", - "下载": "Descargar", - "网址": "Introduce el enlace:", - "保存前构建索引。": "Genere el índice antes de guardar.", - "训练结束后保存您的模型。": "Guarde su modelo una vez el entrenamiento termina.", - "存储类型": "Tipo de guardado", - "保存模型": "Guardar modelo", - "选择模型保存方法": "Elige el metodo", - "保存所有": "Guardar todo", - "保存 D 和 G": "Guardar D y G", - "保存声音": "Guardar voz", - "下载文件:": "Descargando el archivo: ", -"数据集名称": "nombre_dataset", - "最近查看或下载此文件的用户过多": "Demasiados usuarios han visto o descargado recientemente este archivo", - "无法从该私人链接获取文件": "No se puede obtener el archivo de este enlace privado", - "完整下载": "Descarga completa", - "下载模型时发生错误。": "Ocurrio un error descargando", - "正确存储模型": "Modelo guardado correctamente", - "保存模型...": "Guardando el modelo...", - "宓模型": "nombre_modelo", - "保存时未编制索引...": "Guardado sin index...", - "无模型保存(PTH)": "Guardado sin modelo de inferencia...", - "保存模型时发生错误": "Ocurrio un error guardando el modelo", - "您要保存的模型不存在,请确保输入的名称正确。": "El modelo que desea guardar no existe, asegúrese de introducir el nombre correcto.", - "无法下载模型。": "No se ha podido descargar el archivo.", - "解压缩出错。": "Error al descomprimir.", - "模型下载成功。": "Se ha descargado correctamente.", - "继续提取...": "Procediendo con la extracción...", - "备份已成功上传。": "El Backup se ha cargado correctamente.", - "数据集加载成功。": "El Dataset se ha cargado correctamente. ", - "模型已正确加载。": "El Modelo se ha cargado correctamente.", - "它用于下载您的推理模型。": "Descarga modelos para poder utilizarlos en inferencias.", - "它用于下载您的训练备份。": "Si necesitas continuar entrenando un modelo, descarga su backup.", - "下载兼容格式(.wav/.flac)的音频数据集以训练模型。": "Descarga el dataset con los audios en un formato compatible (.wav/.flac) para entrenar tu modelo.", - "未找到可上传的相关文件": "No se encontró ningún archivo relevante para cargar.", - "该模型可用于推理,并有 .index 文件。": "El modelo funciona para inferencia, y tiene el archivo .index.", - "该模型可用于推理,但没有 .index 文件。": "El modelo funciona para inferencia, pero no tiene el archivo .index.", - "这可能需要几分钟时间,请稍候...": "Esto puede tomar unos minutos, por favor espere...", - "资源": "Recursos", - "step1:正在处理数据": "Paso 1: Procesando datos", - "step2a:无需提取音高": "Paso 2a: No es necesario extraer el tono", - "step2b:正在提取特征": "Paso 2b: Extrayendo características", - "step3a:正在训练模型": "Paso 3a: Entrenando el modelo", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Entrenamiento finalizado, puede ver el registro de entrenamiento en la consola o en el archivo train.log en la carpeta del experimento", - "全流程结束!": "¡Todo el proceso ha terminado!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录使用需遵守的协议-LICENSE.txt.": "Este software es de código abierto bajo la licencia MIT, el autor no tiene ningún control sobre el software, y aquellos que usan el software y difunden los sonidos exportados por el software son los únicos responsables.
Si no está de acuerdo con esta cláusula, no puede utilizar ni citar ningún código ni archivo del paquete de software. Consulte el directorio raíz Agreement-LICENSE.txt para obtener más información.", - "模型推理": "Inferencia", - "推理音色": "Inferencia de voz", - "added.index 文件的路径(如果它没有自动找到该文件)": "Autodetección del archivo added.index (Si no lo ha encontrado automáticamente utiliza la casilla de arriba)", - "刷新音色列表和索引路径": "Actualizar lista de modelos, audios e índex", - "卸载音色省显存": "Descargue la voz para ahorrar memoria GPU", - "请选择说话人id": "Seleccione una identificación de altavoz", - "将音频拖到此处,然后点击刷新按钮": "Arrastra tu audio aquí y pulsa el botón de actualizar", - "或录制音频": "O graba un audio.", - "共振声移动推理音频": "Audio de inferencia de cambio de formantes", - "用于将男性转换为女性,反之亦然": "Se utiliza para conversiones de hombre a mujer y viceversa", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tecla +12 recomendada para conversión de voz de hombre a mujer, tecla -12 para conversión de voz de mujer a hombre. Si el rango de tono es demasiado amplio y causa distorsión, ajústelo usted mismo a un rango adecuado.", - "变调(整数, 半音数量, 升八度12降八度-12)": "Cambio de tono (Entero, número de semitonos, subir una octava +12 o bajar una octava -12)", - "输入待处理音频文件路径(默认是正确格式示例)": "Ingrese la ruta del archivo del audio que se procesará (el formato predeterminado es el ejemplo correcto)", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleccione el algoritmo para la extracción de tono.", - "crepe_hop_length": "Crepe Hop Length (Sólo se aplica al crepé): Hop length se refiere al tiempo que tarda el locutor en saltar a un tono dramático. Los hop lengths más cortos tardan más en inferirse, pero son más precisos.", - "特征检索库文件路径": "Ruta del archivo de la base de datos de búsqueda de características", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Si >=3: aplica un filtrado de mediana a los resultados de tono recogidos. El valor representa el radio del filtro y puede reducir la respiración", - "特征检索库文件路径,为空则使用下拉的选择结果": "Ruta del archivo added.index (Dejar en blanco si la casilla de abajo lo ha detectado correctamente)", - "自动检测index路径,下拉式选择(dropdown)": "Auto-detectar la ruta del índice y seleccionar desde el menú desplegable", - "特征文件路径": "Ruta del archivo de características", - "检索特征占比": "Proporción de función de búsqueda", - "后处理重采样至最终采样率,0为不进行重采样": "Remuestreo posterior al proceso a la tasa de muestreo final, 0 significa no remuestrear", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Proporción de fusión para reemplazar el sobre de volumen de entrada con el sobre de volumen de salida, cuanto más cerca de 1, más se utiliza el sobre de salida", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteger las consonantes claras y la respiración, prevenir artefactos como la distorsión de sonido electrónico, 0.5 no está activado, reducir aumentará la protección pero puede reducir el efecto del índice", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Archivo de curva F0, opcional, un tono por línea, en lugar de F0 predeterminado y cambio de tono", - "转换": "Conversión", - "地位": "Estado", - "输出信息": "Información de salida", - "输出音频(右下角三个点,点了可以下载)": "Salida de audio (Haga clic en los tres puntos en la esquina inferior derecha para descargar)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversión por lotes, ingrese la carpeta que contiene los archivos de audio para convertir o cargue varios archivos de audio. El audio convertido se emitirá en la carpeta especificada (opción predeterminada).", - "指定输出文件夹": "Especificar carpeta de salida", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Ingrese la ruta a la carpeta de audio que se procesará (simplemente cópiela desde la barra de direcciones del administrador de archivos)", - "也可批量输入音频文件, 二选一, 优先读文件夹": "También se pueden ingresar múltiples archivos de audio, cualquiera de las dos opciones, con prioridad dada a la carpeta", - "导出文件格式": "Formato de archivo de exportación", - "伴奏人声分离&去混响&去回声": "UVR5", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": " ", - "输入待处理音频文件夹路径": "Ingrese la ruta a la carpeta de audio que se procesará", - "模型": "Modelo", - "指定输出主人声文件夹": "Especifique la carpeta de salida para la voz principal", - "指定输出非主人声文件夹": "Especifique la carpeta de salida para las voces no principales", - "训练": "Entrenamiento", -"停止培训": "Parar entrenamiento", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Paso 1: Complete la configuración del experimento. Los datos del experimento se almacenan en el directorio 'logs', con cada experimento en una carpeta separada. La ruta del nombre del experimento debe ingresarse manualmente y debe contener la configuración del experimento, los registros y los archivos del modelo entrenado.", - "输入实验名": "Ingrese el nombre del modelo", - "目标采样率": "Tasa de muestreo objetivo", - "模型名称": "Nombre_Modelo", - "数据集名": "Nombre_Dataset", - "是否只保存最新的 .ckpt 文件以节省硬盘空间": "Si desea guardar sólo el último archivo G y D para ahorrar espacio en el disco duro.", - "将所有训练集缓存到 GPU 内存中。缓存小型数据集(少于 10 分钟)可以加快训练速度,但缓存大型数据集会消耗大量 GPU 内存,可能无法显著提高速度": "Almacena en caché bloques de entrenamiento pequeños en la GPU para acelerar el proceso (menos de 10 minutos).", - "模型是否具有俯仰引导功能": "Si el modelo dispone de guía de tono.", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Si el modelo tiene guía de tono (necesaria para cantar, pero no para hablar)", - "版本": "Versión", - "高级设置": "Ajustes avanzados", - "设置": "Ajustes", - "高级设置[批量]": "Ajustes avanzados [Batch]", - "提取音高和处理数据使用的CPU进程数": "Número de procesos de CPU utilizados para extraer el tono y procesar los datos", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Paso 2a: Recorra automáticamente la carpeta de capacitación y corte y normalice todos los archivos de audio que se pueden decodificar en audio. Se generarán dos carpetas 'wav' en el directorio del experimento. Actualmente, solo se admite la capacitación de una sola persona.", - "输入训练文件夹路径": "Introduce la ruta de la carpeta que contiene el dataset.", - "请指定说话人id": "Especifique el ID del hablante", - "处理数据": "Procesar datos", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Paso 2b: Use la CPU para extraer el tono (si el modelo tiene guía de tono) y la GPU para extraer características (seleccione el número de tarjeta).", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Separe los números de identificación de las GPUs utilizando un guión (-) al ingresarlos, en caso de contar con múltiples GPUs.", - "显卡信息": "Información de la GPU", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "Seleccione el algoritmo de extracción de tono.", - "特征提取": "Extracción de características", - "step3: 填写训练设置, 开始训练模型和索引": "Paso 3: Complete la configuración de entrenamiento y comience a entrenar el modelo y el índice.", - "步骤4:单击模型的导出最低点后,在模型图上的导出最低点,新文件将位于logs/[yourmodelname]/lowestvals/folder中": "Paso 4: Exporta los puntos más bajos en un gráfico del modelo. Después de hacer clic en Exportar puntos más bajos de un modelo, los nuevos archivos se encontrarán en la carpeta logs/[nombredetumodelo]/lowestvals.", - "保存频率save_every_epoch": "Frecuencia de guardado. (save_every_epoch)", - "总训练轮数total_epoch": "Épocas de entrenamiento. (total_epoch)", - "每张显卡的batch_size": "Tamaño del lote por tarjeta gráfica. (batch_size)", - "在每个保存点将一个小的最终模型保存到 权重 文件夹中": "Guardar un pequeño modelo final en la carpeta 'weights' en cada punto de guardado.", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Si guardar solo el archivo ckpt más reciente para ahorrar espacio en disco", - "否": "No", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Almacena en caché conjuntos pequeños de entrenamiento en la memoria de la GPU puede acelerar el entrenamiento (menos de 10 minutos).", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "¿Guardar el pequeño modelo final en la carpeta 'weights' en cada punto de guardado?", - "加载预训练底模G路径": "Cargue la ruta G del modelo base preentrenada.", - "加载预训练底模D路径": "Cargue la ruta del modelo D base preentrenada.", - "训练模型": "Entrenar Modelo", - "训练特征索引": "Índice de características del Entrenamiento", - "一键训练": "Formación con un solo clic. (No funciona)", - "ckpt处理": "Procesamiento de ckpt", - "模型融合, 可用于测试音色融合": "Fusión de modelos, se puede utilizar para fusionar diferentes voces", - "A模型路径": "Modelo A ruta.", - "B模型路径": "Modelo B ruta.", - "A模型权重": "Un peso modelo para el modelo A.", - "模型是否带音高指导": "Si el modelo tiene guía de tono.", - "要置入的模型信息": "Información del modelo a colocar.", - "保存的模型名不带后缀": "Nombre del modelo guardado sin extensión.", - "模型版本型号": "Versión y modelo del modelo", - "融合": "Fusión.", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modificar la información del modelo (solo admite archivos de modelos pequeños extraídos en la carpeta de pesos).", - "模型路径": "Ruta del modelo", - "要改的模型信息": "Información del modelo a modificar", - "保存的文件名, 默认空为和源文件同名": "Nombre del archivo que se guardará, el valor predeterminado es el mismo que el nombre del archivo de origen", - "修改": "Modificar", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Ver información del modelo (solo aplicable a archivos de modelos pequeños extraídos de la carpeta 'pesos')", - "查看": "Ver", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Extracción de modelo (ingrese la ruta de un archivo de modelo grande en la carpeta 'logs'), aplicable cuando desea extraer un archivo de modelo pequeño después de entrenar a mitad de camino y no se guardó automáticamente, o cuando desea probar un modelo intermedio", - "保存名": "Guardar nombre", - "模型是否带音高指导,1是0否": "Si el modelo tiene guía de tono, 1 para sí, 0 para no", - "提取": "Extracter", - "Onnx导出": "Exportar Onnx", - "RVC模型路径": "Ruta del modelo RVC", - "Onnx输出路径": "Ruta de salida Onnx", - "导出Onnx模型": "Exportar modelo Onnx", - "常见问题解答": "Preguntas frecuentes", - "招募音高曲线前端编辑器": "Reclutar editores front-end para curvas de tono", - "加开发群联系我xxxxx": "Únase al grupo de desarrollo para contactarme en xxxxx", - "点击查看交流、问题反馈群号": "Haga clic para ver el número de grupo de comunicación y comentarios sobre problemas", - "自动检测音频路径并从下拉菜单中选择:": "Auto-detectar la ruta del audio y seleccionar desde el menú desplegable", - "在要处理的音频文件路径中添加音频名称(默认为正确格式示例),从下拉列表中移除使用音频的路径:": "Añade el nombre del audio a la ruta del archivo de audio a procesar (por defecto es el ejemplo de formato correcto) Elimina la ruta para utilizar un audio de la lista desplegable", - "xxxxx": "xxxxx", - "加载模型": "Cargar modelo", - "Hubert模型": "Modelo de Hubert ", - "选择.pth文件": "Seleccionar archivo .pth", - "选择.index文件": "Select .index file", - "选择.npy文件": "Seleccionar archivo .npy", - "输入设备": "Dispositivo de entrada", - "输出设备": "Dispositivo de salida", - "音频设备(请使用同种类驱动)": "Dispositivo de audio (utilice el mismo tipo de controlador)", - "响应阈值": "Umbral de respuesta", - "音调设置": "Ajuste de tono", - "是否使用音符名称而不是它们的赫兹值。例如,使用[C5,D6]代替[523.25,1174.66]赫兹。": "Si usar nombres de notas en lugar de sus valores en hertzios. Por ejemplo, [C5, D6] en lugar de [523.25, 1174.66] Hz.", - "Index Rate": "Tasa de índice", - "常规设置": "Configuración general", - "采样长度": "Longitud de muestreo", - "淡入淡出长度": "Duración del fundido de entrada/salida", - "额外推理时长": "Tiempo de inferencia adicional", - "输入降噪": "Reducción de ruido de entrada", - "输出降噪": "Reducción de ruido de salida", - "性能设置": "Configuración de rendimiento", - "开始音频转换": "Iniciar conversión de audio", - "停止音频转换": "Detener la conversión de audio", - "推理时间(ms):": "Inferir tiempo (ms):", - "请选择pth文件": "Seleccione el archivo pth", - "请选择index文件": "Seleccione el archivo de indice", - "hubert模型路径不可包含中文": "La ruta del modelo hubert no debe contener caracteres chinos", - "pth文件路径不可包含中文": "La ruta del archivo pth no debe contener caracteres chinos.", - "index文件路径不可包含中文": "La ruta del archivo de índice no debe contener caracteres chinos.", - "音高算法": "Algoritmo de paso", - "harvest进程数": "Número de procesos de epochs", - "最低点导出": "Exportación de puntos más bajos", - "保存多少个最低点": "Cuantos puntos más bajos para salvar", - "导出模型的最低点": "Exportar puntos más bajos de un modelo", - "输出型号": "Modelos de salida", - "所选模型的统计数据": "Estadísticas de modelos seleccionados", - "自定义 f0 [根音] 文件": "Archivo personalizado F0 [Root pitch]", - "音高最小值": "Tono mínimo", - "指定推断的最小音高 [HZ]": "Especificar tono mínimo para inferencia [HZ]", - "为推断指定最小音高 [音符][八度]": "Especificar tono mínimo para inferencia [NOTA][OCTAVA]", - "音高最大值": "Tono máximo", - "指定推断的最大音高 [HZ]": "Especificar tono máximo para inferencia [HZ]", - "为推断指定最大音高 [音符][八度]": "Especificar tono máximo para inferencia [NOTA][OCTAVA]", - "浏览共振峰预设": "Examinar ajustes preestablecidos para formar", - "预设位于 formantshiftcfg/ 文件夹中": "Los preajustes se encuentran en la carpeta formantshiftcfg/", - "默认值为 1.0": "El valor predeterminado es 1.0", - "用于共振峰变换的 Quefrency": "Quefrencia para cambio de formantes", - "用于共振峰变换的音色": "Timbre para cambio de formantes", - "应用": "Aplicar", - "单个": "Individual", - "批处理": "Por Lotes", - "单独的 YouTube 曲目": "Separar pistas de YouTube", - "下载 YouTube 视频的音频并自动分离声音和伴奏轨道": "Obtén el audio de un video de YouTube y automáticamente separa las pistas vocales e instrumentales.", - "额外": "Adicional", - "将生成的音频与其他音频(伴奏)合并,还可以用它来创建视频": "Combinar tus audios generados con otro audio (instrumental) [Beta]", - "选择您的伴奏": "Elige tu instrumental", - "选择生成的音频": "Elige el audio generado", - "合并": "Combinar", - "下载并分离": "Descargar y Separar", - "粘贴 YouTube 链接": "Introduce el enlace de YouTube:", - "此部分包含一些额外的实用工具,通常可能处于实验阶段": "Esta sección incluye algunas utilidades adicionales que a menudo se encuentran en etapas experimentales.", - "合并音频": "Unir Audios" -} diff --git a/i18n/it_IT.json b/i18n/it_IT.json deleted file mode 100644 index b314f312d..000000000 --- a/i18n/it_IT.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "Sfortunatamente, non è disponibile alcuna GPU compatibile per supportare l'addestramento.", - "是": "SÌ", - "step1:正在处理数据": "Passaggio 1: elaborazione dei dati", - "step2a:无需提取音高": "Step 2a: Saltare l'estrazione del tono", - "step2b:正在提取特征": "Passaggio 2b: estrazione delle funzionalità", - "step3a:正在训练模型": "Passaggio 3a: è iniziato l'addestramento del modello", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Addestramento completato. ", - "全流程结束!": "Tutti i processi sono stati completati!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Questo software è open source con licenza MIT.
Se non si accetta questa clausola, non è possibile utilizzare o fare riferimento a codici e file all'interno del pacchetto software. Contratto-LICENZA.txt per dettagli.", - "模型推理": "Inferenza del modello", - "推理音色": "Voce di inferenza:", - "刷新音色列表和索引路径": "Aggiorna l'elenco delle voci e il percorso dell'indice", - "卸载音色省显存": "Scarica la voce per risparmiare memoria della GPU:", - "请选择说话人id": "Seleziona ID locutore/cantante:", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Tonalità +12 consigliata per la conversione da maschio a femmina e tonalità -12 per la conversione da femmina a maschio. ", - "变调(整数, 半音数量, 升八度12降八度-12)": "Trasposizione (numero intero, numero di semitoni, alza di un'ottava: 12, abbassa di un'ottava: -12):", - "输入待处理音频文件路径(默认是正确格式示例)": "Immettere il percorso del file audio da elaborare (l'impostazione predefinita è l'esempio di formato corretto):", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Seleziona l'algoritmo di estrazione del tono (\"pm\": estrazione più veloce ma risultato di qualità inferiore; \"harvest\": bassi migliori ma estremamente lenti; \"crepe\": qualità migliore ma utilizzo intensivo della GPU):", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Se >=3: applica il filtro mediano ai risultati del pitch raccolto. ", - "特征检索库文件路径,为空则使用下拉的选择结果": "Percorso del file di indice delle caratteristiche. ", - "自动检测index路径,下拉式选择(dropdown)": "Rileva automaticamente il percorso dell'indice e seleziona dal menu a tendina:", - "特征文件路径": "Percorso del file delle caratteristiche:", - "检索特征占比": "Rapporto funzionalità di ricerca (controlla la forza dell'accento, troppo alto ha artefatti):", - "后处理重采样至最终采样率,0为不进行重采样": "Ricampiona l'audio di output in post-elaborazione alla frequenza di campionamento finale. ", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Regola il ridimensionamento dell'inviluppo del volume. ", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Proteggi le consonanti senza voce e i suoni del respiro per evitare artefatti come il tearing nella musica elettronica. ", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "File curva F0 (opzionale). ", - "转换": "Convertire", - "输出信息": "Informazioni sull'uscita", - "输出音频(右下角三个点,点了可以下载)": "Esporta audio (clicca sui tre puntini in basso a destra per scaricarlo)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Conversione massiva. Inserisci il percorso della cartella che contiene i file da convertire o carica più file audio. I file convertiti finiranno nella cartella specificata. (default: opt) ", - "指定输出文件夹": "Specifica la cartella di output:", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Immettere il percorso della cartella audio da elaborare (copiarlo dalla barra degli indirizzi del file manager):", - "也可批量输入音频文件, 二选一, 优先读文件夹": "Puoi anche inserire file audio in massa. ", - "导出文件格式": "Formato file di esportazione", - "伴奏人声分离&去混响&去回声": "Separazione voce/accompagnamento", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Elaborazione batch per la separazione dell'accompagnamento vocale utilizzando il modello UVR5.
Esempio di un formato di percorso di cartella valido: D:\\path\\to\\input\\folder (copialo dalla barra degli indirizzi del file manager).
Il modello è suddiviso in tre categorie:
1. Conserva la voce: scegli questa opzione per l'audio senza armonie.
2. Mantieni solo la voce principale: scegli questa opzione per l'audio con armonie.
3. Modelli di de-riverbero e de-delay (di FoxJoy):
  (1) MDX-Net: la scelta migliore per la rimozione del riverbero stereo ma non può rimuovere il riverbero mono;

Note di de-riverbero/de-delay:
1. Il tempo di elaborazione per il modello DeEcho-DeReverb è circa il doppio rispetto agli altri due modelli DeEcho.
2. Il modello MDX-Net-Dereverb è piuttosto lento.
3. La configurazione più pulita consigliata consiste nell'applicare prima MDX-Net e poi DeEcho-Aggressive.", - "输入待处理音频文件夹路径": "Immettere il percorso della cartella audio da elaborare:", - "模型": "Modello", - "指定输出主人声文件夹": "Specifica la cartella di output per le voci:", - "指定输出非主人声文件夹": "Specificare la cartella di output per l'accompagnamento:", - "训练": "Addestramento", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Passaggio 1: compilare la configurazione sperimentale. ", - "输入实验名": "Inserisci il nome dell'esperimento:", - "目标采样率": "Frequenza di campionamento target:", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Se il modello ha una guida del tono (necessario per il canto, facoltativo per il parlato):", - "版本": "Versione", - "提取音高和处理数据使用的CPU进程数": "Numero di processi CPU utilizzati per l'estrazione del tono e l'elaborazione dei dati:", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Passaggio 2a: attraversa automaticamente tutti i file nella cartella di addestramento che possono essere decodificati in audio ed esegui la normalizzazione delle sezioni. ", - "输入训练文件夹路径": "Inserisci il percorso della cartella di addestramento:", - "请指定说话人id": "Si prega di specificare l'ID del locutore/cantante:", - "处理数据": "Processa dati", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Passaggio 2b: utilizzare la CPU per estrarre il tono (se il modello ha il tono), utilizzare la GPU per estrarre le caratteristiche (selezionare l'indice GPU):", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Inserisci gli indici GPU separati da '-', ad esempio 0-1-2 per utilizzare GPU 0, 1 e 2:", - "显卡信息": "Informazioni GPU", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "Seleziona l'algoritmo di estrazione del tono (\"pm\": estrazione più rapida ma parlato di qualità inferiore; \"dio\": parlato migliorato ma estrazione più lenta; \"harvest\": migliore qualità ma estrazione più lenta):", - "特征提取": "Estrazione delle caratteristiche", - "step3: 填写训练设置, 开始训练模型和索引": "Passaggio 3: compilare le impostazioni di addestramento e avviare l'addestramento del modello e dell'indice", - "保存频率save_every_epoch": "Frequenza di salvataggio (save_every_epoch):", - "总训练轮数total_epoch": "Epoch totali di addestramento (total_epoch):", - "每张显卡的batch_size": "Dimensione batch per GPU:", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Salva solo l'ultimo file '.ckpt' per risparmiare spazio su disco:", - "否": "NO", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Memorizza nella cache tutti i set di addestramento nella memoria della GPU. ", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Salva un piccolo modello finale nella cartella \"weights\" in ogni punto di salvataggio:", - "加载预训练底模G路径": "Carica il percorso G del modello base pre-addestrato:", - "加载预训练底模D路径": "Carica il percorso D del modello base pre-addestrato:", - "训练模型": "Addestra modello", - "训练特征索引": "Addestra indice delle caratteristiche", - "一键训练": "Addestramento con un clic", - "ckpt处理": "Elaborazione ckpt", - "模型融合, 可用于测试音色融合": "Model fusion, può essere utilizzato per testare la fusione timbrica", - "A模型路径": "Percorso per il modello A:", - "B模型路径": "Percorso per il modello B:", - "A模型权重": "Peso (w) per il modello A:", - "模型是否带音高指导": "Se il modello ha una guida del tono:", - "要置入的模型信息": "Informazioni sul modello da posizionare:", - "保存的模型名不带后缀": "Nome del modello salvato (senza estensione):", - "模型版本型号": "Versione dell'architettura del modello:", - "融合": "Fusione", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modifica le informazioni sul modello (supportato solo per i file di modello di piccole dimensioni estratti dalla cartella 'weights')", - "模型路径": "Percorso al modello:", - "要改的模型信息": "Informazioni sul modello da modificare:", - "保存的文件名, 默认空为和源文件同名": "Salva il nome del file (predefinito: uguale al file di origine):", - "修改": "Modificare", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Visualizza le informazioni sul modello (supportato solo per file di modello piccoli estratti dalla cartella 'weights')", - "查看": "Visualizzazione", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Estrazione del modello (inserire il percorso del modello di file di grandi dimensioni nella cartella \"logs\"). ", - "保存名": "Salva nome:", - "模型是否带音高指导,1是0否": "Se il modello ha una guida del tono (1: sì, 0: no):", - "提取": "Estrai", - "Onnx导出": "Esporta Onnx", - "RVC模型路径": "Percorso modello RVC:", - "Onnx输出路径": "Percorso di esportazione Onnx:", - "导出Onnx模型": "Esporta modello Onnx", - "常见问题解答": "FAQ (Domande frequenti)", - "招募音高曲线前端编辑器": "Reclutamento di redattori front-end per curve di tono", - "加开发群联系我xxxxx": "Unisciti al gruppo di sviluppo e contattami a xxxxx", - "点击查看交流、问题反馈群号": "Fare clic per visualizzare il numero del gruppo di comunicazione e feedback sui problemi", - "xxxxx": "xxxxx", - "加载模型": "Carica modello", - "Hubert模型": "Modello Hubert", - "选择.pth文件": "Seleziona il file .pth", - "选择.index文件": "Seleziona il file .index", - "选择.npy文件": "Seleziona il file .npy", - "输入设备": "Dispositivo di input", - "输出设备": "Dispositivo di uscita", - "音频设备(请使用同种类驱动)": "Dispositivo audio (utilizzare lo stesso tipo di driver)", - "响应阈值": "Soglia di risposta", - "音调设置": "Impostazioni del tono", - "Index Rate": "Tasso di indice", - "常规设置": "Impostazioni generali", - "采样长度": "Lunghezza del campione", - "淡入淡出长度": "Lunghezza dissolvenza", - "额外推理时长": "Tempo di inferenza extra", - "输入降噪": "Riduzione del rumore in ingresso", - "输出降噪": "Riduzione del rumore in uscita", - "性能设置": "Impostazioni delle prestazioni", - "开始音频转换": "Avvia la conversione audio", - "停止音频转换": "Arresta la conversione audio", - "推理时间(ms):": "Tempo di inferenza (ms):", - "请选择pth文件": "请选择pth 文件", - "请选择index文件": "请选择index文件", - "hubert模型路径不可包含中文": "hubert 模型路径不可包含中文", - "pth文件路径不可包含中文": "pth è un'app per il futuro", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "音高算法": "音高算法", - "harvest进程数": "harvest进程数" -} diff --git a/i18n/ja_JP.json b/i18n/ja_JP.json deleted file mode 100644 index b763accff..000000000 --- a/i18n/ja_JP.json +++ /dev/null @@ -1,126 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "トレーニングに対応したGPUが動作しないのは残念です。", - "是": "はい", - "step1:正在处理数据": "step1:処理中のデータ", - "step2a:无需提取音高": "step2a:ピッチの抽出は不要", - "step2b:正在提取特征": "step2b:抽出される特徴量", - "step3a:正在训练模型": "step3a:トレーニング中のモデル", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "トレーニング終了時に、トレーニングログやフォルダ内のtrain.logを確認することができます", - "全流程结束!": "全工程が完了!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录使用需遵守的协议-LICENSE.txt.": "本ソフトウェアはMITライセンスに基づくオープンソースであり、作者は本ソフトウェアに対していかなる強制力も持ちません。本ソフトウェアの利用者および本ソフトウェアから派生した音源(成果物)を配布する者は、本ソフトウェアに対して自身で責任を負うものとします。
この条項に同意しない場合、パッケージ内のコードやファイルを使用や参照を禁じます。詳しくは使用需遵守的协议-LICENSE.txtをご覧ください.", - "模型推理": "モデル推論", - "推理音色": "音源推論", - "刷新音色列表和索引路径": "音源リストとインデックスパスの更新", - "卸载音色省显存": "音源を削除してメモリを節約", - "请选择说话人id": "話者IDを選択してください", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性から女性へは+12キーをお勧めします。女性から男性へは-12キーをお勧めします。音域が広すぎて音質が劣化した場合は、適切な音域に自分で調整することもできます。", - "变调(整数, 半音数量, 升八度12降八度-12)": "ピッチ変更(整数、半音数、上下オクターブ12-12)", - "输入待处理音频文件路径(默认是正确格式示例)": "処理対象音声ファイルのパスを入力してください(デフォルトは正しいフォーマットの例です)", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU", - "crepe_hop_length": "Crepe Hop Length (Only applies to crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.", - "特征检索库文件路径": "特徴量検索データベースのファイルパス", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3 次に、harvestピッチの認識結果に対してメディアンフィルタを使用します。値はフィルター半径で、ミュートを減衰させるために使用します。", - "特征检索库文件路径,为空则使用下拉的选择结果": "特徴検索ライブラリへのパス 空の場合はドロップダウンで選択", - "自动检测index路径,下拉式选择(dropdown)": "インデックスパスの自動検出 ドロップダウンで選択", - "特征文件路径": "特徴量ファイルのパス", - "检索特征占比": "検索特徴率", - "后处理重采样至最终采样率,0为不进行重采样": "最終的なサンプリングレートへのポストプロセッシングのリサンプリング リサンプリングしない場合は0", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "入力ソースの音量エンベロープと出力音量エンベロープの融合率 1に近づくほど、出力音量エンベロープの割合が高くなる", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0(最低共振周波数)カーブファイル(オプション、1行に1ピッチ、デフォルトのF0(最低共振周波数)とエレベーションを置き換えます。)", - "转换": "変換", - "输出信息": "出力情報", - "输出音频(右下角三个点,点了可以下载)": "出力音声(右下の三点をクリックしてダウンロードできます)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "一括変換、変換する音声フォルダを入力、または複数の音声ファイルをアップロードし、指定したフォルダ(デフォルトのopt)に変換した音声を出力します。", - "指定输出文件夹": "出力フォルダを指定してください", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "処理対象音声フォルダーのパスを入力してください(ファイルマネージャのアドレスバーからコピーしてください)", - "也可批量输入音频文件, 二选一, 优先读文件夹": "複数の音声ファイルを一括で入力することもできますが、フォルダーを優先して読み込みます", - "导出文件格式": "导出文件格式", - "伴奏人声分离&去混响&去回声": "伴奏人声分离&去混响&去回声", - "输入待处理音频文件夹路径": "処理するオーディオファイルのフォルダパスを入力してください", - "模型": "モデル", - "指定输出主人声文件夹": "指定输出主人声文件夹", - "指定输出非主人声文件夹": "指定输出非主人声文件夹", - "训练": "トレーニング", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "ステップ1:実験設定を入力します。実験データはlogsに保存され、各実験にはフォルダーがあります。実験名のパスを手動で入力する必要があり、実験設定、ログ、トレーニングされたモデルファイルが含まれます。", - "输入实验名": "モデル名", - "目标采样率": "目標サンプリングレート", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "モデルに音高ガイドがあるかどうか(歌唱には必要ですが、音声には必要ありません)", - "版本": "バージョン", - "提取音高和处理数据使用的CPU进程数": "ピッチの抽出やデータ処理に使用するCPUスレッド数", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "ステップ2a: 訓練フォルダー内のすべての音声ファイルを自動的に探索し、スライスと正規化を行い、2つのwavフォルダーを実験ディレクトリに生成します。現在は一人でのトレーニングのみをサポートしています。", - "输入训练文件夹路径": "トレーニング用フォルダのパスを入力してください", - "请指定说话人id": "話者IDを指定してください", - "处理数据": "データ処理", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "ステップ2b: CPUを使用して音高を抽出する(モデルに音高がある場合)、GPUを使用して特徴を抽出する(GPUの番号を選択する)", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "ハイフンで区切って使用するGPUの番号を入力します。例えば0-1-2はGPU0、GPU1、GPU2を使用します", - "显卡信息": "GPU情報", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "音高抽出アルゴリズムの選択:歌声を入力する場合は、pmを使用して速度を上げることができます。CPUが低い場合はdioを使用して速度を上げることができます。harvestは品質が高く、精度が高いですが、遅いです。", - "特征提取": "特徴抽出", - "step3: 填写训练设置, 开始训练模型和索引": "ステップ3: トレーニング設定を入力して、モデルとインデックスのトレーニングを開始します", - "保存频率save_every_epoch": "エポックごとの保存頻度", - "总训练轮数total_epoch": "総エポック数", - "每张显卡的batch_size": "GPUごとのバッチサイズ", - "是否仅保存最新的ckpt文件以节省硬盘空间": "ハードディスク容量を節約するため、最新のckptファイルのみを保存するかどうか", - "否": "いいえ", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "すべてのトレーニングデータをメモリにキャッシュするかどうか。10分以下の小さなデータはキャッシュしてトレーニングを高速化できますが、大きなデータをキャッシュするとメモリが破裂し、あまり速度が上がりません。", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "各保存時点の小モデルを全部weightsフォルダに保存するかどうか", - "加载预训练底模G路径": "事前学習済みのGモデルのパス", - "加载预训练底模D路径": "事前学習済みのDモデルのパス", - "训练模型": "モデルのトレーニング", - "训练特征索引": "特徴インデックスのトレーニング", - "一键训练": "One-click training. (Not working on this fork)", - "ckpt处理": "ckptファイルの処理", - "模型融合, 可用于测试音色融合": "モデルのマージ、音源のマージテストに使用できます", - "A模型路径": "Aモデルのパス", - "B模型路径": "Bモデルのパス", - "A模型权重": "Aモデルの重み", - "模型是否带音高指导": "モデルに音高ガイドを付けるかどうか", - "要置入的模型信息": "挿入するモデル情報", - "保存的模型名不带后缀": "拡張子のない保存するモデル名", - "模型版本型号": "モデルのバージョン", - "融合": "フュージョン", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報の修正(weightsフォルダから抽出された小さなモデルファイルのみ対応)", - "模型路径": "モデルパス", - "要改的模型信息": "変更するモデル情報", - "保存的文件名, 默认空为和源文件同名": "保存するファイル名、デフォルトでは空欄で元のファイル名と同じ名前になります", - "修改": "変更", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "モデル情報を表示する(小さいモデルファイルはweightsフォルダーからのみサポートされています)", - "查看": "表示", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "モデル抽出(ログフォルダー内の大きなファイルのモデルパスを入力)、モデルを半分までトレーニングし、自動的に小さいファイルモデルを保存しなかったり、中間モデルをテストしたい場合に適用されます。", - "保存名": "保存ファイル名", - "模型是否带音高指导,1是0否": "モデルに音高ガイドを付けるかどうか、1は付ける、0は付けない", - "提取": "抽出", - "Onnx导出": "Onnx", - "RVC模型路径": "RVCモデルパス", - "Onnx输出路径": "Onnx出力パス", - "MoeVS模型": "MoeSS?", - "导出Onnx模型": "Onnxに変換", - "常见问题解答": "よくある質問", - "招募音高曲线前端编辑器": "音高曲線フロントエンドエディターを募集", - "加开发群联系我xxxxx": "開発グループに参加して私に連絡してくださいxxxxx", - "点击查看交流、问题反馈群号": "クリックして交流、問題フィードバックグループ番号を表示", - "xxxxx": "xxxxx", - "加载模型": "モデルをロード", - "Hubert模型": "Hubertモデル", - "选择.pth文件": ".pthファイルを選択", - "选择.index文件": ".indexファイルを選択", - "选择.npy文件": ".npyファイルを選択", - "输入设备": "入力デバイス", - "输出设备": "出力デバイス", - "音频设备(请使用同种类驱动)": "オーディオデバイス(同じ種類のドライバーを使用してください)", - "响应阈值": "反応閾値", - "音调设置": "音程設定", - "Index Rate": "Index Rate", - "常规设置": "一般設定", - "采样长度": "サンプル長", - "淡入淡出长度": "フェードイン/フェードアウト長", - "额外推理时长": "追加推論時間", - "输入降噪": "入力ノイズの低減", - "输出降噪": "出力ノイズの低減", - "性能设置": "パフォーマンス設定", - "开始音频转换": "音声変換を開始", - "停止音频转换": "音声変換を停止", - "推理时间(ms):": "推論時間(ms):", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。":"UVR5モデルを使用したボーカル伴奏の分離バッチ処理。
有効なフォルダーパスフォーマットの例: D:\\path\\to\\input\\folder (ファイルマネージャのアドレスバーからコピーします)。
モデルは三つのカテゴリに分かれています:
1. ボーカルを保持: ハーモニーのないオーディオに対してこれを選択します。HP5よりもボーカルをより良く保持します。HP2とHP3の二つの内蔵モデルが含まれています。HP3は伴奏をわずかに漏らす可能性がありますが、HP2よりもわずかにボーカルをより良く保持します。
2. 主なボーカルのみを保持: ハーモニーのあるオーディオに対してこれを選択します。主なボーカルを弱める可能性があります。HP5の一つの内蔵モデルが含まれています。
3. ディリバーブとディレイモデル (by FoxJoy):
  (1) MDX-Net: ステレオリバーブの除去に最適な選択肢ですが、モノリバーブは除去できません;
 (234) DeEcho: ディレイ効果を除去します。AggressiveモードはNormalモードよりも徹底的に除去します。DeReverbはさらにリバーブを除去し、モノリバーブを除去することができますが、高周波のリバーブが強い内容に対しては非常に効果的ではありません。
ディリバーブ/ディレイに関する注意点:
1. DeEcho-DeReverbモデルの処理時間は、他の二つのDeEchoモデルの約二倍です。
2. MDX-Net-Dereverbモデルは非常に遅いです。
3. 推奨される最もクリーンな設定は、最初にMDX-Netを適用し、その後にDeEcho-Aggressiveを適用することです。" -} diff --git a/i18n/locale_diff.py b/i18n/locale_diff.py deleted file mode 100644 index 257277965..000000000 --- a/i18n/locale_diff.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import os -from collections import OrderedDict - -# Define the standard file name -standard_file = "zh_CN.json" - -# Find all JSON files in the directory -dir_path = "./" -languages = [ - f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file -] - -# Load the standard file -with open(standard_file, "r", encoding="utf-8") as f: - standard_data = json.load(f, object_pairs_hook=OrderedDict) - -# Loop through each language file -for lang_file in languages: - # Load the language file - with open(lang_file, "r", encoding="utf-8") as f: - lang_data = json.load(f, object_pairs_hook=OrderedDict) - - # Find the difference between the language file and the standard file - diff = set(standard_data.keys()) - set(lang_data.keys()) - - miss = set(lang_data.keys()) - set(standard_data.keys()) - - # Add any missing keys to the language file - for key in diff: - lang_data[key] = key - - # Del any extra keys to the language file - for key in miss: - del lang_data[key] - - # Sort the keys of the language file to match the order of the standard file - lang_data = OrderedDict( - sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0])) - ) - - # Save the updated language file - with open(lang_file, "w", encoding="utf-8") as f: - json.dump(lang_data, f, ensure_ascii=False, indent=4) - f.write("\n") diff --git a/i18n/ru-RU.json b/i18n/ru-RU.json deleted file mode 100644 index 5be2521cd..000000000 --- a/i18n/ru-RU.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "К сожалению у вас нету видеокарты, которая поддерживает тренировку модели.", - "是": "Да", - "step1:正在处理数据": "Шаг 1: Переработка данных", - "step2a:无需提取音高": "Шаг 2а: Пропуск вытаскивания тональности", - "step2b:正在提取特征": "Шаг 2б: Вытаскивание черт", - "step3a:正在训练模型": "Шаг 3а: Тренировка модели начата", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Тренировка завершена. Вы можете проверить логи тренировки в консоли или в файле 'train.log' в папке модели.", - "全流程结束!": "Все процессы завершены!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.", - "模型推理": "Обработка модели", - "推理音色": "Обработка голоса:", - "刷新音色列表和索引路径": "Обновить список голосов и индексов", - "卸载音色省显存": "Выгрузить голос для сохранения памяти видеокарты:", - "请选择说话人id": "Выбери айди голоса:", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Рекомендованно +12 для конвертирования мужского голоса в женский и -12 для конвертирования женского в мужской. Если диапазон голоса слищком велик и голос искажается, значение можно изменить на свой вкус.", - "变调(整数, 半音数量, 升八度12降八度-12)": "Высота голоса (число, полутоны, поднять на октаву: 12, понизить на октаву: -12):", - "输入待处理音频文件路径(默认是正确格式示例)": "Введите путь к аудиофайлу, который хотите переработать (по умолчанию введён правильный формат):", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Выберите алгоритм вытаскивания тональности ('pm': быстрое извлечение но качество речи хуже; 'harvest': бассы лучше но очень медленный; 'crepe': лучшее качество но сильно использует видеокарту):", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Если больше 3: применить медианную фильтрацию к вытащенным тональностям. Значение контролирует радиус фильтра и может уменьшить излишнее дыхание.", - "特征检索库文件路径,为空则使用下拉的选择结果": "Путь к файлу индекса черт. Оставьте пустым, чтобы использовать выбранный результат из списка:", - "自动检测index路径,下拉式选择(dropdown)": "Автоматически найти путь к индексу и выбрать его из списка:", - "特征文件路径": "Путь к файлу черт:", - "检索特征占比": "Соотношение поиска черт:", - "后处理重采样至最终采样率,0为不进行重采样": "Изменить частоту дискретизации в выходном файле на финальную. Поставьте 0, чтобы ничего не изменялось:", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Использовать громкость входного файла для замены или перемешивания с громкостью выходного файла. Чем ближе соотношение к 1, тем больше используется звука из выходного файла:", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Защитить глухие согласные и звуки дыхания для предотвращения артефактов, например разрывание в электронной музыке. Поставьте на 0.5, чтобы выключить. Уменьшите значение для повышения защиты, но при этом может ухудшиться аккуратность индексирования:", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "Файл дуги F0 (не обязательно). Одна тональность на каждую строчку. Заменяет обычный F0 и модуляцию тональности:", - "转换": "Конвертировать", - "输出信息": "Выходная информация", - "输出音频(右下角三个点,点了可以下载)": "Экспортировать аудиофайл (нажми на три точки в правом нижнем углу для загрузки)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Конвертировать пачкой. Введите путь к папке, в которой находятся файлы для конвертирования или выложите несколько аудиофайлов. Сконвертированные файлы будут сохранены в указанной папке (по умолчанию 'opt').", - "指定输出文件夹": "Укажите выходную папку:", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Введите путь к папке с аудио для переработки:", - "也可批量输入音频文件, 二选一, 优先读文件夹": "Вы также можете выложить аудиофайлы пачкой. Выберите одно из двух. Приоритет отдаётся считыванию из папки.", - "导出文件格式": "Формат выходного файла", - "伴奏人声分离&去混响&去回声": "Отделение вокала/инструментала и убирание эхо", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Пакетная обработка для разделения вокального сопровождения с использованием модели UVR5.
Пример допустимого формата пути к папке: D:\\path\\to\\input\\folder
Модель разделена на три категории:
1. Сохранить вокал: выберите этот вариант для звука без гармоний. Он сохраняет вокал лучше, чем HP5. Он включает в себя две встроенные модели: HP2 и HP3. HP3 может немного пропускать инструментал, но сохраняет вокал немного лучше, чем HP2.
2. Сохранить только основной вокал: выберите этот вариант для звука с гармониями. Это может ослабить основной вокал. Он включает одну встроенную модель: HP5.
3. Модели удаления реверберации и задержки (от FoxJoy):
  (1) MDX-Net: лучший выбор для удаления стереореверберации, но он не может удалить монореверберацию;
 (234) DeEcho: удаляет эффекты задержки. Агрессивный режим удаляет более тщательно, чем Нормальный режим. DeReverb дополнительно удаляет реверберацию и может удалять монореверберацию, но не очень эффективно для сильно реверберированного высокочастотного контента.
Примечания по удалению реверберации/задержки:
1. Время обработки для модели DeEcho-DeReverb примерно в два раза больше, чем для двух других моделей DeEcho.
2. Модель MDX-Net-Dereverb довольно медленная.
3. Рекомендуемая самая чистая конфигурация — сначала применить MDX-Net, а затем DeEcho-Aggressive.", - "输入待处理音频文件夹路径": "Введите путь к папке с аудиофайлами для переработки:", - "模型": "Модели", - "指定输出主人声文件夹": "Введите путь к папке для вокала:", - "指定输出非主人声文件夹": "Введите путь к папке для инструментала:", - "训练": "Тренировка", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Шаг 1: Заполните настройки модели. Данные модели сохранены в папку 'logs' и для каждой модели создаётся отдельная папка. Введите вручную путь к настройкам для модели, в которой находятся логи и тренировочные файлы.", - "输入实验名": "Введите название модели:", - "目标采样率": "Частота дискретизации модели:", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Наведение по тональности у модели (обязательно для пения, необязательно для речи):", - "版本": "Версия", - "提取音高和处理数据使用的CPU进程数": "Число процессов ЦП, используемое для вытаскивания тональностей и обрабротки данных:", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Шаг 2а: Автоматически пройтись по всем аудиофайлам в папке тренировки и нормализировать куски. Создаст 2 папки wav в папке модели. В данных момент поддерживается тренировка только одного голоса.", - "输入训练文件夹路径": "Введите путь к папке тренировки:", - "请指定说话人id": "Введите айди голоса:", - "处理数据": "Переработать данные", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Шаг 2б: Вытащить тональности с помошью процессора (если в модели есть тональности), вытащить черты с помощью видеокарты (выберите какой):", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Введите, какие(-ую) видеокарты(-у) хотите использовать через '-', например 0-1-2, чтобы использовать видеокарту 0, 1 и 2:", - "显卡信息": "Информация о видеокартах", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "Выберите алгоритм вытаскивания тональности ('pm': быстрое извлечение но качество речи хуже; 'harvest': бассы лучше но очень медленный; 'crepe': лучшее качество но сильно использует видеокарту):", - "特征提取": "Вытаскивание черт", - "step3: 填写训练设置, 开始训练模型和索引": "Шаг 3: Заполните остальные настройки тренировки и начните тренировать модель и индекс", - "保存频率save_every_epoch": "Частота сохранения (save_every_epoch):", - "总训练轮数total_epoch": "Полное количество эпох (total_epoch):", - "每张显卡的batch_size": "Размер пачки для видеокарты:", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Сохранять только последний файл '.ckpt', чтобы сохранить место на диске:", - "否": "Нет", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Кэшировать все тренировочные сеты в видеопамять. Кэширование маленький датасетов (меньше 10 минут) может ускорить тренировку, но кэширование больших, наоборот, займёт много видеопамяти и не сильно ускорит тренировку:", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Сохранять маленькую финальную модель в папку 'weights' на каждой точке сохранения:", - "加载预训练底模G路径": "Путь к натренированой базовой модели G:", - "加载预训练底模D路径": "Путь к натренированой базовой модели D:", - "训练模型": "Тренировать модель", - "训练特征索引": "Тренировать индекс черт", - "一键训练": "Тренировка одним нажатием", - "ckpt处理": "Обработка ckpt", - "模型融合, 可用于测试音色融合": "Слияние моделей, может быть использовано для проверки слияния тембра", - "A模型路径": "Путь к модели А:", - "B模型路径": "Путь к модели Б:", - "A模型权重": "Вес (w) модели А::", - "模型是否带音高指导": "Есть ли у модели наведение по тональности (1: да, 0: нет):", - "要置入的模型信息": "Информация о модели:", - "保存的模型名不带后缀": "Название сохранённой модели (без расширения):", - "模型版本型号": "Версия архитектуры модели:", - "融合": "Слияние", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Модифицировать информацию о модели (поддерживается только для маленких моделей, взятых из папки 'weights')", - "模型路径": "Путь к папке:", - "要改的模型信息": "Информация о модели, которую нужно модифицировать:", - "保存的文件名, 默认空为和源文件同名": "Название сохранённого файла (по умолчанию такое же, как и входного):", - "修改": "Модифицировать", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Просмотреть информацию о модели (поддерживается только для маленких моделей, взятых из папки 'weights')", - "查看": "Просмотр", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Вытаскивание модели (введите путь к большому файлу модели в папке 'logs'). Полезно, если Вам нужно заверщить тренировку и вручную достать и сохранить маленький файл модели, или если Вам нужно проверить незаконченную модель:", - "保存名": "Имя сохранённого файла:", - "模型是否带音高指导,1是0否": "Есть ли у модели наведение по тональности (1: да, 0: нет):", - "提取": "Вытащить", - "Onnx导出": "Экспортировать Onnx", - "RVC模型路径": "Путь к модели RVC:", - "Onnx输出路径": "Путь для экспотрированного Onnx:", - "导出Onnx模型": "Экспортировать Onnx модель", - "常见问题解答": "ЧаВО (Часто задаваемые вопросы)", - "招募音高曲线前端编辑器": "Использование фронтенд редакторов для тональных дуг", - "加开发群联系我xxxxx": "Присоединитесь к группе разработки и свяжитесь со мной по xxxxx", - "点击查看交流、问题反馈群号": "Нажмите, чтобы просмотреть номер группы коммуникации и отзывах о проблемах", - "xxxxx": "xxxxx", - "加载模型": "Загрузить модель", - "Hubert模型": "Модель Hubert", - "选择.pth文件": "Выбрать файл .pth", - "选择.index文件": "Выбрать файл .index", - "选择.npy文件": "Выбрать файл .npy", - "输入设备": "Входное устройство", - "输出设备": "Выходное устройство", - "音频设备(请使用同种类驱动)": "Аудио устройство (пожалуйста используйте такой=же тип драйвера)", - "响应阈值": "Порог ответа", - "音调设置": "Настройки тональности", - "Index Rate": "Темп индекса", - "常规设置": "Основные настройки", - "采样长度": "Длина сэмпла", - "淡入淡出长度": "Длина затухания", - "额外推理时长": "Доп. время переработки", - "输入降噪": "Уменьшения шума во входной информации", - "输出降噪": "Уменьшения шума во выходной информации", - "性能设置": "Настройки быстроты", - "开始音频转换": "Начать конвертацию аудио", - "停止音频转换": "Закончить конвертацию аудио", - "推理时间(ms):": "Время переработки (мс):", - "请选择pth文件": "请选择pth文件", - "请选择index文件": "请选择index文件", - "hubert模型路径不可包含中文": "hubert模型路径不可包含中文", - "pth文件路径不可包含中文": "pth文件路径不可包含中文", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "音高算法": "音高算法", - "harvest进程数": "harvest进程数" -} diff --git a/i18n/tr_TR.json b/i18n/tr_TR.json deleted file mode 100644 index 26daae852..000000000 --- a/i18n/tr_TR.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "Maalesef, eğitiminizi desteklemek için uyumlu bir GPU bulunmamaktadır.", - "是": "Evet", - "step1:正在处理数据": "Adım 1: Veri işleme", - "step2a:无需提取音高": "Adım 2a: Pitch çıkartma adımını atlama", - "step2b:正在提取特征": "Adım 2b: Özelliklerin çıkarılması", - "step3a:正在训练模型": "Adım 3a: Model eğitimi başladı", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "Eğitim tamamlandı. Eğitim günlüklerini konsolda veya deney klasörü altındaki train.log dosyasında kontrol edebilirsiniz.", - "全流程结束!": "Tüm işlemler tamamlandı!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "Bu yazılım, MIT lisansı altında açık kaynaklıdır. Yazarın yazılım üzerinde herhangi bir kontrolü yoktur. Yazılımı kullanan ve yazılım tarafından dışa aktarılan sesleri dağıtan kullanıcılar sorumludur.
Eğer bu maddeyle aynı fikirde değilseniz, yazılım paketi içindeki herhangi bir kod veya dosyayı kullanamaz veya referans göremezsiniz. Detaylar için kök dizindeki Agreement-LICENSE.txt dosyasına bakınız.", - "模型推理": "Model çıkartma (Inference)", - "推理音色": "Ses çıkartma (Inference):", - "刷新音色列表和索引路径": "Ses listesini ve indeks yolunu yenile", - "卸载音色省显存": "GPU bellek kullanımını azaltmak için sesi kaldır", - "请选择说话人id": "Konuşmacı/Şarkıcı No seçin:", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "Erkekten kadına çevirmek için +12 tuş önerilir, kadından erkeğe çevirmek için ise -12 tuş önerilir. Eğer ses aralığı çok fazla genişler ve ses bozulursa, isteğe bağlı olarak uygun aralığa kendiniz de ayarlayabilirsiniz.", - "变调(整数, 半音数量, 升八度12降八度-12)": "Transpoze et (tamsayı, yarıton sayısıyla; bir oktav yükseltmek için: 12, bir oktav düşürmek için: -12):", - "输入待处理音频文件路径(默认是正确格式示例)": "İşlenecek ses dosyasının yolunu girin (varsayılan doğru format örneğidir):", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "Pitch algoritmasını seçin ('pm': daha hızlı çıkarır ancak daha düşük kaliteli konuşma; 'harvest': daha iyi konuşma sesi ancak son derece yavaş; 'crepe': daha da iyi kalite ancak GPU yoğunluğu gerektirir):", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": "Eğer >=3 ise, elde edilen pitch sonuçlarına median filtreleme uygula. Bu değer, filtre yarıçapını temsil eder ve nefesliliği azaltabilir.", - "特征检索库文件路径,为空则使用下拉的选择结果": "Özellik indeksi dosyasının yolunu belirtin. Seçilen sonucu kullanmak için boş bırakın veya açılır menüden seçim yapın.", - "自动检测index路径,下拉式选择(dropdown)": "İndeks yolunu otomatik olarak tespit et ve açılır menüden seçim yap.", - "特征文件路径": "Özellik dosyasının yolu:", - "检索特征占比": "Arama özelliği oranı (vurgu gücünü kontrol eder, çok yüksek olması sanal etkilere neden olur)", - "后处理重采样至最终采样率,0为不进行重采样": "Son işleme aşamasında çıktı sesini son örnekleme hızına yeniden örnekle. 0 değeri için yeniden örnekleme yapılmaz:", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Sesin hacim zarfını ayarlayın. 0'a yakın değerler, sesin orijinal vokallerin hacmine benzer olmasını sağlar. Düşük bir değerle ses gürültüsünü maskeleyebilir ve hacmi daha doğal bir şekilde duyulabilir hale getirebilirsiniz. 1'e yaklaştıkça sürekli bir yüksek ses seviyesi elde edilir:", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Sessiz ünsüzleri ve nefes seslerini koruyarak elektronik müzikte yırtılma gibi sanal hataların oluşmasını engeller. 0.5 olarak ayarlandığında devre dışı kalır. Değerin azaltılması korumayı artırabilir, ancak indeksleme doğruluğunu azaltabilir:", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 eğrisi dosyası (isteğe bağlı). Her satırda bir pitch değeri bulunur. Varsayılan F0 ve pitch modülasyonunu değiştirir:", - "转换": "Dönüştür", - "输出信息": "Çıkış bilgisi", - "输出音频(右下角三个点,点了可以下载)": "Ses dosyasını dışa aktar (indirmek için sağ alt köşedeki üç noktaya tıklayın)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Toplu dönüştür. Dönüştürülecek ses dosyalarının bulunduğu klasörü girin veya birden çok ses dosyasını yükleyin. Dönüştürülen ses dosyaları belirtilen klasöre ('opt' varsayılan olarak) dönüştürülecektir", - "指定输出文件夹": "Çıkış klasörünü belirt:", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "İşlenecek ses klasörünün yolunu girin (dosya yöneticisinin adres çubuğundan kopyalayın):", - "也可批量输入音频文件, 二选一, 优先读文件夹": "Toplu olarak ses dosyalarını da girebilirsiniz. İki seçenekten birini seçin. Öncelik klasörden okumaya verilir.", - "导出文件格式": "Dışa aktarma dosya formatı", - "伴奏人声分离&去混响&去回声": "Vokal/Müzik Ayrıştırma ve Yankı Giderme", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch işleme kullanarak vokal eşlik ayrımı için UVR5 modeli kullanılır.
Geçerli bir klasör yol formatı örneği: D:\\path\\to\\input\\folder (dosya yöneticisi adres çubuğundan kopyalanır).
Model üç kategoriye ayrılır:
1. Vokalleri koru: Bu seçeneği, harmoni içermeyen sesler için kullanın. HP5'ten daha iyi bir şekilde vokalleri korur. İki dahili model içerir: HP2 ve HP3. HP3, eşlik sesini hafifçe sızdırabilir, ancak vokalleri HP2'den biraz daha iyi korur.
2. Sadece ana vokalleri koru: Bu seçeneği, harmoni içeren sesler için kullanın. Ana vokalleri zayıflatabilir. Bir dahili model içerir: HP5.
3. Reverb ve gecikme modelleri (FoxJoy tarafından):
  (1) MDX-Net: Stereo reverb'i kaldırmak için en iyi seçenek, ancak mono reverb'i kaldıramaz;
 (234) DeEcho: Gecikme efektlerini kaldırır. Agresif mod, Normal moda göre daha kapsamlı bir şekilde kaldırma yapar. DeReverb ayrıca reverb'i kaldırır ve mono reverb'i kaldırabilir, ancak yoğun yankılı yüksek frekanslı içerikler için çok etkili değildir.
Reverb/gecikme notları:
1. DeEcho-DeReverb modelinin işleme süresi diğer iki DeEcho modeline göre yaklaşık olarak iki kat daha uzundur.
2. MDX-Net-Dereverb modeli oldukça yavaştır.
3. Tavsiye edilen en temiz yapılandırma önce MDX-Net'i uygulamak ve ardından DeEcho-Aggressive uygulamaktır.", - "输入待处理音频文件夹路径": "İşlenecek ses klasörünün yolunu girin:", - "模型": "Model", - "指定输出主人声文件夹": "Vokal için çıkış klasörünü belirtin:", - "指定输出非主人声文件夹": "Müzik ve diğer sesler için çıkış klasörünü belirtin:", - "训练": "Eğitim", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Adım 1: Deneysel yapılandırmayı doldurun. Deneysel veriler 'logs' klasöründe saklanır ve her bir deney için ayrı bir klasör vardır. Deneysel adı yolu manuel olarak girin; bu yol, deneysel yapılandırmayı, günlükleri ve eğitilmiş model dosyalarını içerir.", - "输入实验名": "Deneysel adı girin:", - "目标采样率": "Hedef örnekleme oranı:", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Modelin ses yüksekliği (Pitch) rehberliği içerip içermediği (şarkı söyleme için şarttır, konuşma için isteğe bağlıdır):", - "版本": "Sürüm", - "提取音高和处理数据使用的CPU进程数": "Ses yüksekliği çıkartmak (Pitch) ve verileri işlemek için kullanılacak CPU işlemci sayısı:", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Adım 2a: Eğitim klasöründe ses dosyalarını otomatik olarak gezinerek dilimleme normalizasyonu yapın. Deney dizini içinde 2 wav klasörü oluşturur. Şu anda sadece tek kişilik eğitim desteklenmektedir.", - "输入训练文件夹路径": "Eğitim klasörünün yolunu girin:", - "请指定说话人id": "Lütfen konuşmacı/sanatçı no belirtin:", - "处理数据": "Verileri işle", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Adım 2b: Ses yüksekliği (Pitch) çıkartmak için CPU kullanın (eğer model ses yüksekliği içeriyorsa), özellikleri çıkartmak için GPU kullanın (GPU indeksini seçin):", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "GPU indekslerini '-' ile ayırarak girin, örneğin 0-1-2, GPU 0, 1 ve 2'yi kullanmak için:", - "显卡信息": "GPU Bilgisi", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "Ses yüksekliği (Pitch) çıkartma algoritmasını seçin ('pm': daha hızlı çıkartma, ancak düşük kaliteli konuşma; 'dio': geliştirilmiş konuşma kalitesi, ancak daha yavaş çıkartma; 'harvest': daha iyi kalite, ancak daha da yavaş çıkartma):", - "特征提取": "Özellik çıkartma", - "step3: 填写训练设置, 开始训练模型和索引": "Adım 3: Eğitim ayarlarını doldurun ve modeli ve dizini eğitmeye başlayın", - "保存频率save_every_epoch": "Kaydetme sıklığı (save_every_epoch):", - "总训练轮数total_epoch": "Toplam eğitim turu (total_epoch):", - "每张显卡的batch_size": "Her GPU için yığın boyutu (batch_size):", - "是否仅保存最新的ckpt文件以节省硬盘空间": "Sadece en son '.ckpt' dosyasını kaydet:", - "否": "Hayır", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Tüm eğitim verilerini GPU belleğine önbelleğe alıp almayacağınızı belirtin. Küçük veri setlerini (10 dakikadan az) önbelleğe almak eğitimi hızlandırabilir, ancak büyük veri setlerini önbelleğe almak çok fazla GPU belleği tüketir ve çok fazla hız artışı sağlamaz:", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "Her kaydetme noktasında son küçük bir modeli 'weights' klasörüne kaydetmek için:", - "加载预训练底模G路径": "Önceden eğitilmiş temel G modelini yükleme yolu:", - "加载预训练底模D路径": "Önceden eğitilmiş temel D modelini yükleme yolu:", - "训练模型": "Modeli Eğit", - "训练特征索引": "Özellik Dizinini Eğit", - "一键训练": "Tek Tuşla Eğit", - "ckpt处理": "ckpt İşleme", - "模型融合, 可用于测试音色融合": "Model birleştirme, ses rengi birleştirmesi için kullanılabilir", - "A模型路径": "A Modeli Yolu:", - "B模型路径": "B Modeli Yolu:", - "A模型权重": "A Modeli Ağırlığı:", - "模型是否带音高指导": "Modelin ses yüksekliği rehberi içerip içermediği:", - "要置入的模型信息": "Eklemek için model bilgileri:", - "保存的模型名不带后缀": "Kaydedilecek model adı (uzantı olmadan):", - "模型版本型号": "Model mimari versiyonu:", - "融合": "Birleştir", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Model bilgilerini düzenle (sadece 'weights' klasöründen çıkarılan küçük model dosyaları desteklenir)", - "模型路径": "Model Yolu:", - "要改的模型信息": "Düzenlenecek model bilgileri:", - "保存的文件名, 默认空为和源文件同名": "Kaydedilecek dosya adı (varsayılan: kaynak dosya ile aynı):", - "修改": "Düzenle", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "Model bilgilerini görüntüle (sadece 'weights' klasöründen çıkarılan küçük model dosyaları desteklenir)", - "查看": "Görüntüle", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model çıkartma (büyük dosya modeli yolunu 'logs' klasöründe girin). Bu, eğitimi yarıda bırakmak istediğinizde ve manuel olarak küçük bir model dosyası çıkartmak ve kaydetmek istediğinizde veya bir ara modeli test etmek istediğinizde kullanışlıdır:", - "保存名": "Kaydetme Adı:", - "模型是否带音高指导,1是0否": "Modelin ses yüksekliği rehberi içerip içermediği (1: evet, 0: hayır):", - "提取": "Çıkart", - "Onnx导出": "Onnx Dışa Aktar", - "RVC模型路径": "RVC Model Yolu:", - "Onnx输出路径": "Onnx Dışa Aktarım Yolu:", - "导出Onnx模型": "Onnx Modeli Dışa Aktar", - "常见问题解答": "Sıkça Sorulan Sorular (SSS)", - "招募音高曲线前端编辑器": "Ses yükseklik eğrisi ön uç düzenleyicisi için işe alım", - "加开发群联系我xxxxx": "Geliştirme grubuna katılın ve benimle iletişime geçin: xxxxx", - "点击查看交流、问题反馈群号": "İletişim ve sorun geri bildirim grup numarasını görüntülemek için tıklayın", - "xxxxx": "xxxxx", - "加载模型": "Model yükle", - "Hubert模型": "Hubert Modeli", - "选择.pth文件": ".pth dosyası seç", - "选择.index文件": ".index dosyası seç", - "选择.npy文件": ".npy dosyası seç", - "输入设备": "Giriş cihazı", - "输出设备": "Çıkış cihazı", - "音频设备(请使用同种类驱动)": "Ses cihazı (aynı tür sürücüyü kullanın)", - "响应阈值": "Tepki eşiği", - "音调设置": "Pitch ayarları", - "Index Rate": "Index Oranı", - "常规设置": "Genel ayarlar", - "采样长度": "Örnekleme uzunluğu", - "淡入淡出长度": "Geçiş (Fade) uzunluğu", - "额外推理时长": "Ekstra çıkartma süresi", - "输入降噪": "Giriş gürültü azaltma", - "输出降噪": "Çıkış gürültü azaltma", - "性能设置": "Performans ayarları", - "开始音频转换": "Ses dönüştürmeyi başlat", - "停止音频转换": "Ses dönüştürmeyi durdur", - "推理时间(ms):": "Çıkarsama süresi (ms):", - "请选择pth文件": "Lütfen .pth dosyası seçin", - "请选择index文件": "Lütfen .index dosyası seçin", - "hubert模型路径不可包含中文": "hubert modeli yolu Çince karakter içeremez", - "pth文件路径不可包含中文": ".pth dosya yolu Çince karakter içeremez", - "index文件路径不可包含中文": ".index dosya yolu Çince karakter içeremez", - "音高算法": "音高算法", - "harvest进程数": "harvest进程数" -} diff --git a/i18n/zh_CN.json b/i18n/zh_CN.json deleted file mode 100644 index 31e5d8590..000000000 --- a/i18n/zh_CN.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", - "是": "是", - "step1:正在处理数据": "step1:正在处理数据", - "step2a:无需提取音高": "step2a:无需提取音高", - "step2b:正在提取特征": "step2b:正在提取特征", - "step3a:正在训练模型": "step3a:正在训练模型", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", - "全流程结束!": "全流程结束!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.", - "模型推理": "模型推理", - "推理音色": "推理音色", - "刷新音色列表和索引路径": "刷新音色列表和索引路径", - "卸载音色省显存": "卸载音色省显存", - "请选择说话人id": "请选择说话人id", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ", - "变调(整数, 半音数量, 升八度12降八度-12)": "变调(整数, 半音数量, 升八度12降八度-12)", - "输入待处理音频文件路径(默认是正确格式示例)": "输入待处理音频文件路径(默认是正确格式示例)", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU", - "crepe_hop_length": "Crepe Hop Length (Only applies to crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.", - "特征检索库文件路径": "特征检索库文件路径", - "特征检索库文件路径,为空则使用下拉的选择结果": "特征检索库文件路径,为空则使用下拉的选择结果", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音", - "自动检测index路径,下拉式选择(dropdown)": "自动检测index路径,下拉式选择(dropdown)", - "特征文件路径": "特征文件路径", - "检索特征占比": "检索特征占比", - "后处理重采样至最终采样率,0为不进行重采样": "后处理重采样至最终采样率,0为不进行重采样", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调", - "转换": "转换", - "输出信息": "输出信息", - "输出音频(右下角三个点,点了可以下载)": "输出音频(右下角三个点,点了可以下载)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ", - "指定输出文件夹": "指定输出文件夹", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)", - "也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量输入音频文件, 二选一, 优先读文件夹", - "导出文件格式": "导出文件格式", - "伴奏人声分离&去混响&去回声": "伴奏人声分离&去混响&去回声", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。", - "输入待处理音频文件夹路径": "输入待处理音频文件夹路径", - "模型": "模型", - "指定输出主人声文件夹": "指定输出主人声文件夹", - "指定输出非主人声文件夹": "指定输出非主人声文件夹", - "训练": "训练", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ", - "输入实验名": "输入实验名", - "目标采样率": "目标采样率", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否带音高指导(唱歌一定要, 语音可以不要)", - "版本": "版本", - "提取音高和处理数据使用的CPU进程数": "提取音高和处理数据使用的CPU进程数", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ", - "输入训练文件夹路径": "输入训练文件夹路径", - "请指定说话人id": "请指定说话人id", - "处理数据": "处理数据", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2", - "显卡信息": "显卡信息", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢", - "特征提取": "特征提取", - "step3: 填写训练设置, 开始训练模型和索引": "step3: 填写训练设置, 开始训练模型和索引", - "保存频率save_every_epoch": "保存频率save_every_epoch", - "总训练轮数total_epoch": "总训练轮数total_epoch", - "每张显卡的batch_size": "每张显卡的batch_size", - "是否仅保存最新的ckpt文件以节省硬盘空间": "是否仅保存最新的ckpt文件以节省硬盘空间", - "否": "否", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存时间点将最终小模型保存至weights文件夹", - "加载预训练底模G路径": "加载预训练底模G路径", - "加载预训练底模D路径": "加载预训练底模D路径", - "训练模型": "训练模型", - "训练特征索引": "训练特征索引", - "一键训练": "一键训练", - "ckpt处理": "ckpt处理", - "模型融合, 可用于测试音色融合": "模型融合, 可用于测试音色融合", - "A模型路径": "A模型路径", - "B模型路径": "B模型路径", - "A模型权重": "A模型权重", - "模型是否带音高指导": "模型是否带音高指导", - "要置入的模型信息": "要置入的模型信息", - "保存的模型名不带后缀": "保存的模型名不带后缀", - "模型版本型号": "模型版本型号", - "融合": "融合", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型信息(仅支持weights文件夹下提取的小模型文件)", - "模型路径": "模型路径", - "要改的模型信息": "要改的模型信息", - "保存的文件名, 默认空为和源文件同名": "保存的文件名, 默认空为和源文件同名", - "修改": "修改", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型信息(仅支持weights文件夹下提取的小模型文件)", - "查看": "查看", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况", - "保存名": "保存名", - "模型是否带音高指导,1是0否": "模型是否带音高指导,1是0否", - "提取": "提取", - "Onnx导出": "Onnx导出", - "RVC模型路径": "RVC模型路径", - "Onnx输出路径": "Onnx输出路径", - "导出Onnx模型": "导出Onnx模型", - "常见问题解答": "常见问题解答", - "招募音高曲线前端编辑器": "招募音高曲线前端编辑器", - "加开发群联系我xxxxx": "加开发群联系我xxxxx", - "点击查看交流、问题反馈群号": "点击查看交流、问题反馈群号", - "xxxxx": "xxxxx", - "加载模型": "加载模型", - "Hubert模型": "Hubert模型", - "选择.pth文件": "选择.pth文件", - "选择.index文件": "选择.index文件", - "选择.npy文件": "选择.npy文件", - "输入设备": "输入设备", - "输出设备": "输出设备", - "音频设备(请使用同种类驱动)": "音频设备(请使用同种类驱动)", - "响应阈值": "响应阈值", - "音调设置": "音调设置", - "Index Rate": "Index Rate", - "常规设置": "常规设置", - "采样长度": "采样长度", - "淡入淡出长度": "淡入淡出长度", - "额外推理时长": "额外推理时长", - "输入降噪": "输入降噪", - "输出降噪": "输出降噪", - "性能设置": "性能设置", - "开始音频转换": "开始音频转换", - "停止音频转换": "停止音频转换", - "推理时间(ms):": "推理时间(ms):", - "请选择pth文件": "请选择pth文件", - "请选择index文件": "请选择index文件", - "hubert模型路径不可包含中文": "hubert模型路径不可包含中文", - "pth文件路径不可包含中文": "pth文件路径不可包含中文", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "音高算法": "音高算法", - "harvest进程数": "harvest进程数" -} diff --git a/i18n/zh_HK.json b/i18n/zh_HK.json deleted file mode 100644 index 5857c8632..000000000 --- a/i18n/zh_HK.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", - "是": "是", - "step1:正在处理数据": "step1:正在处理数据", - "step2a:无需提取音高": "step2a:无需提取音高", - "step2b:正在提取特征": "step2b:正在提取特征", - "step3a:正在训练模型": "step3a:正在训练模型", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", - "全流程结束!": "全流程结束!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。
如不認可該條款,則不能使用或引用軟體包內任何程式碼和檔案。詳見根目錄使用需遵守的協議-LICENSE.txt。", - "模型推理": "模型推理", - "推理音色": "推理音色", - "刷新音色列表和索引路径": "刷新音色列表和索引路徑", - "卸载音色省显存": "卸載音色節省 VRAM", - "请选择说话人id": "請選擇說話人ID", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。", - "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", - "输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU", - "crepe_hop_length": "Crepe Hop Length (Only applies to crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.", - "特征检索库文件路径": "特徵檢索庫檔案路徑", - "自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3則使用對harvest音高識別的結果使用中值濾波,數值為濾波半徑,使用可以削弱啞音", - "特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果", - "特征文件路径": "特徵檔案路徑", - "检索特征占比": "檢索特徵佔比", - "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保護清輔音和呼吸聲,防止電音撕裂等artifact,拉滿0.5不開啟,調低加大保護力度但可能降低索引效果", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調", - "转换": "轉換", - "输出信息": "輸出訊息", - "输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", - "指定输出文件夹": "指定輸出資料夾", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)", - "也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量輸入音頻檔案,二選一,優先讀資料夾", - "导出文件格式": "導出檔格式", - "伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。
有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。
模型分為三類:
1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;
2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。
3. 消除混響和延遲模型(由FoxJoy提供):
  (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;
 (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。
消除混響/延遲注意事項:
1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;
2. MDX-Net-Dereverb模型相當慢;
3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。", - "输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑", - "模型": "模型", - "指定输出主人声文件夹": "指定输出主人声文件夹", - "指定输出非主人声文件夹": "指定输出非主人声文件夹", - "训练": "訓練", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1:填寫實驗配置。實驗數據放在logs下,每個實驗一個資料夾,需手動輸入實驗名路徑,內含實驗配置、日誌、訓練得到的模型檔案。", - "输入实验名": "輸入實驗名稱", - "目标采样率": "目標取樣率", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否帶音高指導(唱歌一定要,語音可以不要)", - "版本": "版本", - "提取音高和处理数据使用的CPU进程数": "提取音高和處理數據使用的CPU進程數", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a:自動遍歷訓練資料夾下所有可解碼成音頻的檔案並進行切片歸一化,在實驗目錄下生成2個wav資料夾;暫時只支援單人訓練。", - "输入训练文件夹路径": "輸入訓練檔案夾路徑", - "请指定说话人id": "請指定說話人id", - "处理数据": "處理資料", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "步驟2b: 使用CPU提取音高(如果模型帶音高), 使用GPU提取特徵(選擇卡號)", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2", - "显卡信息": "顯示卡資訊", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "選擇音高提取算法:輸入歌聲可用pm提速,高品質語音但CPU差可用dio提速,harvest品質更好但較慢", - "特征提取": "特徵提取", - "step3: 填写训练设置, 开始训练模型和索引": "步驟3: 填寫訓練設定, 開始訓練模型和索引", - "保存频率save_every_epoch": "保存頻率save_every_epoch", - "总训练轮数total_epoch": "總訓練輪數total_epoch", - "每张显卡的batch_size": "每张显卡的batch_size", - "是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt檔案以節省硬碟空間", - "否": "否", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否緩存所有訓練集至 VRAM。小於10分鐘的小數據可緩存以加速訓練,大數據緩存會爆 VRAM 也加不了多少速度", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights檔夾", - "加载预训练底模G路径": "加載預訓練底模G路徑", - "加载预训练底模D路径": "加載預訓練底模D路徑", - "训练模型": "訓練模型", - "训练特征索引": "訓練特徵索引", - "一键训练": "一鍵訓練", - "ckpt处理": "ckpt處理", - "模型融合, 可用于测试音色融合": "模型融合,可用於測試音色融合", - "A模型路径": "A模型路徑", - "B模型路径": "B模型路徑", - "A模型权重": "A模型權重", - "模型是否带音高指导": "模型是否帶音高指導", - "要置入的模型信息": "要置入的模型資訊", - "保存的模型名不带后缀": "儲存的模型名不帶副檔名", - "模型版本型号": "模型版本型號", - "融合": "融合", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型資訊(僅支援weights資料夾下提取的小模型檔案)", - "模型路径": "模型路徑", - "要改的模型信息": "要改的模型資訊", - "保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名", - "修改": "修改", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型資訊(僅支援weights資料夾下提取的小模型檔案)", - "查看": "查看", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(輸入logs資料夾下大檔案模型路徑),適用於訓一半不想訓了模型沒有自動提取儲存小檔案模型,或者想測試中間模型的情況", - "保存名": "儲存名", - "模型是否带音高指导,1是0否": "模型是否帶音高指導,1是0否", - "提取": "提取", - "Onnx导出": "Onnx导出", - "RVC模型路径": "RVC模型路径", - "Onnx输出路径": "Onnx输出路径", - "导出Onnx模型": "导出Onnx模型", - "常见问题解答": "常見問題解答", - "招募音高曲线前端编辑器": "招募音高曲線前端編輯器", - "加开发群联系我xxxxx": "加開發群聯繫我xxxxx", - "点击查看交流、问题反馈群号": "點擊查看交流、問題反饋群號", - "xxxxx": "xxxxx", - "加载模型": "載入模型", - "Hubert模型": "Hubert 模型", - "选择.pth文件": "選擇 .pth 檔案", - "选择.index文件": "選擇 .index 檔案", - "选择.npy文件": "選擇 .npy 檔案", - "输入设备": "輸入設備", - "输出设备": "輸出設備", - "音频设备(请使用同种类驱动)": "音訊設備 (請使用同種類驅動)", - "响应阈值": "響應閾值", - "音调设置": "音調設定", - "Index Rate": "Index Rate", - "常规设置": "一般設定", - "采样长度": "取樣長度", - "淡入淡出长度": "淡入淡出長度", - "额外推理时长": "額外推理時長", - "输入降噪": "輸入降噪", - "输出降噪": "輸出降噪", - "性能设置": "效能設定", - "开始音频转换": "開始音訊轉換", - "停止音频转换": "停止音訊轉換", - "推理时间(ms):": "推理時間(ms):", - "请选择pth文件": "请选择pth文件", - "请选择index文件": "请选择index文件", - "hubert模型路径不可包含中文": "hubert模型路径不可包含中文", - "pth文件路径不可包含中文": "pth文件路径不可包含中文", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "音高算法": "音高算法", - "harvest进程数": "harvest进程数" -} diff --git a/i18n/zh_SG.json b/i18n/zh_SG.json deleted file mode 100644 index 2f2a73f87..000000000 --- a/i18n/zh_SG.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", - "是": "是", - "step1:正在处理数据": "step1:正在处理数据", - "step2a:无需提取音高": "step2a:无需提取音高", - "step2b:正在提取特征": "step2b:正在提取特征", - "step3a:正在训练模型": "step3a:正在训练模型", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", - "全流程结束!": "全流程结束!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。
如不認可該條款,則不能使用或引用軟體包內任何程式碼和檔案。詳見根目錄使用需遵守的協議-LICENSE.txt。", - "模型推理": "模型推理", - "推理音色": "推理音色", - "刷新音色列表和索引路径": "刷新音色列表和索引路徑", - "卸载音色省显存": "卸載音色節省 VRAM", - "请选择说话人id": "請選擇說話人ID", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。", - "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", - "输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU", - "crepe_hop_length": "Crepe Hop Length (Only applies to crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.", - "特征检索库文件路径": "特徵檢索庫檔案路徑", - "特征文件路径": "特徵檔案路徑", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3則使用對harvest音高識別的結果使用中值濾波,數值為濾波半徑,使用可以削弱啞音", - "特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果", - "自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)", - "检索特征占比": "檢索特徵佔比", - "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保護清輔音和呼吸聲,防止電音撕裂等artifact,拉滿0.5不開啟,調低加大保護力度但可能降低索引效果", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調", - "转换": "轉換", - "输出信息": "輸出訊息", - "输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", - "指定输出文件夹": "指定輸出資料夾", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)", - "也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量輸入音頻檔案,二選一,優先讀資料夾", - "导出文件格式": "導出檔格式", - "伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。
有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。
模型分為三類:
1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;
2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。
3. 消除混響和延遲模型(由FoxJoy提供):
  (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;
 (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。
消除混響/延遲注意事項:
1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;
2. MDX-Net-Dereverb模型相當慢;
3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。", - "输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑", - "模型": "模型", - "指定输出主人声文件夹": "指定输出主人声文件夹", - "指定输出非主人声文件夹": "指定输出非主人声文件夹", - "训练": "訓練", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1:填寫實驗配置。實驗數據放在logs下,每個實驗一個資料夾,需手動輸入實驗名路徑,內含實驗配置、日誌、訓練得到的模型檔案。", - "输入实验名": "輸入實驗名稱", - "目标采样率": "目標取樣率", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否帶音高指導(唱歌一定要,語音可以不要)", - "版本": "版本", - "提取音高和处理数据使用的CPU进程数": "提取音高和處理數據使用的CPU進程數", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a:自動遍歷訓練資料夾下所有可解碼成音頻的檔案並進行切片歸一化,在實驗目錄下生成2個wav資料夾;暫時只支援單人訓練。", - "输入训练文件夹路径": "輸入訓練檔案夾路徑", - "请指定说话人id": "請指定說話人id", - "处理数据": "處理資料", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "步驟2b: 使用CPU提取音高(如果模型帶音高), 使用GPU提取特徵(選擇卡號)", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2", - "显卡信息": "顯示卡資訊", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "選擇音高提取算法:輸入歌聲可用pm提速,高品質語音但CPU差可用dio提速,harvest品質更好但較慢", - "特征提取": "特徵提取", - "step3: 填写训练设置, 开始训练模型和索引": "步驟3: 填寫訓練設定, 開始訓練模型和索引", - "保存频率save_every_epoch": "保存頻率save_every_epoch", - "总训练轮数total_epoch": "總訓練輪數total_epoch", - "每张显卡的batch_size": "每张显卡的batch_size", - "是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt檔案以節省硬碟空間", - "否": "否", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否緩存所有訓練集至 VRAM。小於10分鐘的小數據可緩存以加速訓練,大數據緩存會爆 VRAM 也加不了多少速度", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights檔夾", - "加载预训练底模G路径": "加載預訓練底模G路徑", - "加载预训练底模D路径": "加載預訓練底模D路徑", - "训练模型": "訓練模型", - "训练特征索引": "訓練特徵索引", - "一键训练": "一鍵訓練", - "ckpt处理": "ckpt處理", - "模型融合, 可用于测试音色融合": "模型融合,可用於測試音色融合", - "A模型路径": "A模型路徑", - "B模型路径": "B模型路徑", - "A模型权重": "A模型權重", - "模型是否带音高指导": "模型是否帶音高指導", - "要置入的模型信息": "要置入的模型資訊", - "保存的模型名不带后缀": "儲存的模型名不帶副檔名", - "模型版本型号": "模型版本型號", - "融合": "融合", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型資訊(僅支援weights資料夾下提取的小模型檔案)", - "模型路径": "模型路徑", - "要改的模型信息": "要改的模型資訊", - "保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名", - "修改": "修改", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型資訊(僅支援weights資料夾下提取的小模型檔案)", - "查看": "查看", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(輸入logs資料夾下大檔案模型路徑),適用於訓一半不想訓了模型沒有自動提取儲存小檔案模型,或者想測試中間模型的情況", - "保存名": "儲存名", - "模型是否带音高指导,1是0否": "模型是否帶音高指導,1是0否", - "提取": "提取", - "Onnx导出": "Onnx导出", - "RVC模型路径": "RVC模型路径", - "Onnx输出路径": "Onnx输出路径", - "导出Onnx模型": "导出Onnx模型", - "常见问题解答": "常見問題解答", - "招募音高曲线前端编辑器": "招募音高曲線前端編輯器", - "加开发群联系我xxxxx": "加開發群聯繫我xxxxx", - "点击查看交流、问题反馈群号": "點擊查看交流、問題反饋群號", - "xxxxx": "xxxxx", - "加载模型": "載入模型", - "Hubert模型": "Hubert 模型", - "选择.pth文件": "選擇 .pth 檔案", - "选择.index文件": "選擇 .index 檔案", - "选择.npy文件": "選擇 .npy 檔案", - "输入设备": "輸入設備", - "输出设备": "輸出設備", - "音频设备(请使用同种类驱动)": "音訊設備 (請使用同種類驅動)", - "响应阈值": "響應閾值", - "音调设置": "音調設定", - "Index Rate": "Index Rate", - "常规设置": "一般設定", - "采样长度": "取樣長度", - "淡入淡出长度": "淡入淡出長度", - "额外推理时长": "額外推理時長", - "输入降噪": "輸入降噪", - "输出降噪": "輸出降噪", - "性能设置": "效能設定", - "开始音频转换": "開始音訊轉換", - "停止音频转换": "停止音訊轉換", - "推理时间(ms):": "推理時間(ms):", - "请选择pth文件": "请选择pth文件", - "请选择index文件": "请选择index文件", - "hubert模型路径不可包含中文": "hubert模型路径不可包含中文", - "pth文件路径不可包含中文": "pth文件路径不可包含中文", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "音高算法": "音高算法", - "harvest进程数": "harvest进程数" -} diff --git a/i18n/zh_TW.json b/i18n/zh_TW.json deleted file mode 100644 index c8f3340b8..000000000 --- a/i18n/zh_TW.json +++ /dev/null @@ -1,132 +0,0 @@ -{ - "很遗憾您这没有能用的显卡来支持您训练": "很遗憾您这没有能用的显卡来支持您训练", - "是": "是", - "step1:正在处理数据": "step1:正在处理数据", - "step2a:无需提取音高": "step2a:无需提取音高", - "step2b:正在提取特征": "step2b:正在提取特征", - "step3a:正在训练模型": "step3a:正在训练模型", - "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log": "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log", - "全流程结束!": "全流程结束!", - "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.": "本軟體以MIT協議開源,作者不對軟體具備任何控制力,使用軟體者、傳播軟體導出的聲音者自負全責。
如不認可該條款,則不能使用或引用軟體包內任何程式碼和檔案。詳見根目錄使用需遵守的協議-LICENSE.txt。", - "模型推理": "模型推理", - "推理音色": "推理音色", - "刷新音色列表和索引路径": "刷新音色列表和索引路徑", - "卸载音色省显存": "卸載音色節省 VRAM", - "请选择说话人id": "請選擇說話人ID", - "男转女推荐+12key, 女转男推荐-12key, 如果音域爆炸导致音色失真也可以自己调整到合适音域. ": "男性轉女性推薦+12key,女性轉男性推薦-12key,如果音域爆炸導致音色失真也可以自己調整到合適音域。", - "变调(整数, 半音数量, 升八度12降八度-12)": "變調(整數、半音數量、升八度12降八度-12)", - "输入待处理音频文件路径(默认是正确格式示例)": "輸入待處理音頻檔案路徑(預設是正確格式示例)", - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU": "選擇音高提取演算法,輸入歌聲可用pm提速,harvest低音好但巨慢無比,crepe效果好但吃GPU", - "crepe_hop_length": "Crepe Hop Length (Only applies to crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.", - "特征检索库文件路径": "特徵檢索庫檔案路徑", - "特征文件路径": "特徵檔案路徑", - "自动检测index路径,下拉式选择(dropdown)": "自動檢測index路徑,下拉式選擇(dropdown)", - ">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音": ">=3則使用對harvest音高識別的結果使用中值濾波,數值為濾波半徑,使用可以削弱啞音", - "特征检索库文件路径,为空则使用下拉的选择结果": "特徵檢索庫檔路徑,為空則使用下拉的選擇結果", - "检索特征占比": "檢索特徵佔比", - "后处理重采样至最终采样率,0为不进行重采样": "後處理重採樣至最終採樣率,0為不進行重採樣", - "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "輸入源音量包絡替換輸出音量包絡融合比例,越靠近1越使用輸出包絡", - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "保護清輔音和呼吸聲,防止電音撕裂等artifact,拉滿0.5不開啟,調低加大保護力度但可能降低索引效果", - "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0曲線檔案,可選,一行一個音高,代替預設的F0及升降調", - "转换": "轉換", - "输出信息": "輸出訊息", - "输出音频(右下角三个点,点了可以下载)": "輸出音頻(右下角三個點,點了可以下載)", - "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "批量轉換,輸入待轉換音頻資料夾,或上傳多個音頻檔案,在指定資料夾(默認opt)下輸出轉換的音頻。", - "指定输出文件夹": "指定輸出資料夾", - "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "輸入待處理音頻資料夾路徑(去檔案管理器地址欄拷貝即可)", - "也可批量输入音频文件, 二选一, 优先读文件夹": "也可批量輸入音頻檔案,二選一,優先讀資料夾", - "导出文件格式": "導出檔格式", - "伴奏人声分离&去混响&去回声": "伴奏人聲分離&去混響&去回聲", - "人声伴奏分离批量处理, 使用UVR5模型。
合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
模型分为三类:
1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
3、去混响、去延迟模型(by FoxJoy):
  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
 (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
去混响/去延迟,附:
1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
2、MDX-Net-Dereverb模型挺慢的;
3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "使用UVR5模型進行人聲伴奏分離的批次處理。
有效資料夾路徑格式的例子:D:\\path\\to\\input\\folder(從檔案管理員地址欄複製)。
模型分為三類:
1. 保留人聲:選擇這個選項適用於沒有和聲的音訊。它比HP5更好地保留了人聲。它包括兩個內建模型:HP2和HP3。HP3可能輕微漏出伴奏,但比HP2更好地保留了人聲;
2. 僅保留主人聲:選擇這個選項適用於有和聲的音訊。它可能會削弱主人聲。它包括一個內建模型:HP5。
3. 消除混響和延遲模型(由FoxJoy提供):
  (1) MDX-Net:對於立體聲混響的移除是最好的選擇,但不能移除單聲道混響;
 (234) DeEcho:移除延遲效果。Aggressive模式比Normal模式移除得更徹底。DeReverb另外移除混響,可以移除單聲道混響,但對於高頻重的板式混響移除不乾淨。
消除混響/延遲注意事項:
1. DeEcho-DeReverb模型的處理時間是其他兩個DeEcho模型的近兩倍;
2. MDX-Net-Dereverb模型相當慢;
3. 個人推薦的最乾淨配置是先使用MDX-Net,然後使用DeEcho-Aggressive。", - "输入待处理音频文件夹路径": "輸入待處理音頻資料夾路徑", - "模型": "模型", - "指定输出主人声文件夹": "指定输出主人声文件夹", - "指定输出非主人声文件夹": "指定输出非主人声文件夹", - "训练": "訓練", - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "step1:填寫實驗配置。實驗數據放在logs下,每個實驗一個資料夾,需手動輸入實驗名路徑,內含實驗配置、日誌、訓練得到的模型檔案。", - "输入实验名": "輸入實驗名稱", - "目标采样率": "目標取樣率", - "模型是否带音高指导(唱歌一定要, 语音可以不要)": "模型是否帶音高指導(唱歌一定要,語音可以不要)", - "版本": "版本", - "提取音高和处理数据使用的CPU进程数": "提取音高和處理數據使用的CPU進程數", - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "step2a:自動遍歷訓練資料夾下所有可解碼成音頻的檔案並進行切片歸一化,在實驗目錄下生成2個wav資料夾;暫時只支援單人訓練。", - "输入训练文件夹路径": "輸入訓練檔案夾路徑", - "请指定说话人id": "請指定說話人id", - "处理数据": "處理資料", - "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "步驟2b: 使用CPU提取音高(如果模型帶音高), 使用GPU提取特徵(選擇卡號)", - "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "以-分隔輸入使用的卡號, 例如 0-1-2 使用卡0和卡1和卡2", - "显卡信息": "顯示卡資訊", - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "選擇音高提取算法:輸入歌聲可用pm提速,高品質語音但CPU差可用dio提速,harvest品質更好但較慢", - "特征提取": "特徵提取", - "step3: 填写训练设置, 开始训练模型和索引": "步驟3: 填寫訓練設定, 開始訓練模型和索引", - "保存频率save_every_epoch": "保存頻率save_every_epoch", - "总训练轮数total_epoch": "總訓練輪數total_epoch", - "每张显卡的batch_size": "每张显卡的batch_size", - "是否仅保存最新的ckpt文件以节省硬盘空间": "是否僅保存最新的ckpt檔案以節省硬碟空間", - "否": "否", - "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "是否緩存所有訓練集至 VRAM。小於10分鐘的小數據可緩存以加速訓練,大數據緩存會爆 VRAM 也加不了多少速度", - "是否在每次保存时间点将最终小模型保存至weights文件夹": "是否在每次保存時間點將最終小模型保存至weights檔夾", - "加载预训练底模G路径": "加載預訓練底模G路徑", - "加载预训练底模D路径": "加載預訓練底模D路徑", - "训练模型": "訓練模型", - "训练特征索引": "訓練特徵索引", - "一键训练": "一鍵訓練", - "ckpt处理": "ckpt處理", - "模型融合, 可用于测试音色融合": "模型融合,可用於測試音色融合", - "A模型路径": "A模型路徑", - "B模型路径": "B模型路徑", - "A模型权重": "A模型權重", - "模型是否带音高指导": "模型是否帶音高指導", - "要置入的模型信息": "要置入的模型資訊", - "保存的模型名不带后缀": "儲存的模型名不帶副檔名", - "模型版本型号": "模型版本型號", - "融合": "融合", - "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "修改模型資訊(僅支援weights資料夾下提取的小模型檔案)", - "模型路径": "模型路徑", - "要改的模型信息": "要改的模型資訊", - "保存的文件名, 默认空为和源文件同名": "儲存的檔案名,預設空為與來源檔案同名", - "修改": "修改", - "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "查看模型資訊(僅支援weights資料夾下提取的小模型檔案)", - "查看": "查看", - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "模型提取(輸入logs資料夾下大檔案模型路徑),適用於訓一半不想訓了模型沒有自動提取儲存小檔案模型,或者想測試中間模型的情況", - "保存名": "儲存名", - "模型是否带音高指导,1是0否": "模型是否帶音高指導,1是0否", - "提取": "提取", - "Onnx导出": "Onnx导出", - "RVC模型路径": "RVC模型路径", - "Onnx输出路径": "Onnx输出路径", - "导出Onnx模型": "导出Onnx模型", - "常见问题解答": "常見問題解答", - "招募音高曲线前端编辑器": "招募音高曲線前端編輯器", - "加开发群联系我xxxxx": "加開發群聯繫我xxxxx", - "点击查看交流、问题反馈群号": "點擊查看交流、問題反饋群號", - "xxxxx": "xxxxx", - "加载模型": "載入模型", - "Hubert模型": "Hubert 模型", - "选择.pth文件": "選擇 .pth 檔案", - "选择.index文件": "選擇 .index 檔案", - "选择.npy文件": "選擇 .npy 檔案", - "输入设备": "輸入設備", - "输出设备": "輸出設備", - "音频设备(请使用同种类驱动)": "音訊設備 (請使用同種類驅動)", - "响应阈值": "響應閾值", - "音调设置": "音調設定", - "Index Rate": "Index Rate", - "常规设置": "一般設定", - "采样长度": "取樣長度", - "淡入淡出长度": "淡入淡出長度", - "额外推理时长": "額外推理時長", - "输入降噪": "輸入降噪", - "输出降噪": "輸出降噪", - "性能设置": "效能設定", - "开始音频转换": "開始音訊轉換", - "停止音频转换": "停止音訊轉換", - "推理时间(ms):": "推理時間(ms):", - "请选择pth文件": "请选择pth文件", - "请选择index文件": "请选择index文件", - "hubert模型路径不可包含中文": "hubert模型路径不可包含中文", - "pth文件路径不可包含中文": "pth文件路径不可包含中文", - "index文件路径不可包含中文": "index文件路径不可包含中文", - "音高算法": "音高算法", - "harvest进程数": "harvest进程数" -} diff --git a/icon.png b/icon.png deleted file mode 100644 index f9aae65bb..000000000 Binary files a/icon.png and /dev/null differ diff --git a/infer-web.py b/infer-web.py deleted file mode 100644 index 3f191b600..000000000 --- a/infer-web.py +++ /dev/null @@ -1,2559 +0,0 @@ -import sys -from shutil import rmtree -import shutil -import json # Mangio fork using json for preset saving -import datetime -import unicodedata -from glob import glob1 -from signal import SIGTERM -import librosa -import os -now_dir = os.getcwd() -sys.path.append(now_dir) -import lib.globals.globals as rvc_globals -from LazyImport import lazyload - -math = lazyload('math') - -import traceback -import warnings -tensorlowest = lazyload('tensorlowest') -import faiss -ffmpeg = lazyload('ffmpeg') - -np = lazyload("numpy") -torch = lazyload('torch') -re = lazyload('regex') -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' -os.environ["OPENBLAS_NUM_THREADS"] = "1" -os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1" -import logging -from random import shuffle -from subprocess import Popen -import easy_infer -gr = lazyload("gradio") -SF = lazyload("soundfile") -SFWrite = SF.write -from config import Config -from fairseq import checkpoint_utils -from i18n import I18nAuto -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM -from infer_uvr5 import _audio_pre_, _audio_pre_new -from MDXNet import MDXNetDereverb -from my_utils import load_audio -from train.process_ckpt import change_info, extract_small_model, merge, show_info -from vc_infer_pipeline import VC -from sklearn.cluster import MiniBatchKMeans - -import time -import threading - -from shlex import quote as SQuote - -RQuote = lambda val: SQuote(str(val)) - -tmp = os.path.join(now_dir, "TEMP") -runtime_dir = os.path.join(now_dir, "runtime/Lib/site-packages") -directories = ['logs', 'audios', 'datasets', 'weights'] - -rmtree(tmp, ignore_errors=True) -rmtree(os.path.join(runtime_dir, "infer_pack"), ignore_errors=True) -rmtree(os.path.join(runtime_dir, "uvr5_pack"), ignore_errors=True) - -os.makedirs(tmp, exist_ok=True) -for folder in directories: - os.makedirs(os.path.join(now_dir, folder), exist_ok=True) - -os.environ["TEMP"] = tmp -warnings.filterwarnings("ignore") -torch.manual_seed(114514) -logging.getLogger("numba").setLevel(logging.WARNING) -try: - file = open('csvdb/stop.csv', 'x') - file.close() -except FileExistsError: pass - -global DoFormant, Quefrency, Timbre - -DoFormant = rvc_globals.DoFormant -Quefrency = rvc_globals.Quefrency -Timbre = rvc_globals.Timbre - -config = Config() -i18n = I18nAuto() -i18n.print() -# 判断是否有能用来训练和加速推理的N卡 -ngpu = torch.cuda.device_count() -gpu_infos = [] -mem = [] -if_gpu_ok = False - -keywords = ["10", "16", "20", "30", "40", "A2", "A3", "A4", "P4", "A50", "500", "A60", - "70", "80", "90", "M4", "T4", "TITAN"] - -if torch.cuda.is_available() or ngpu != 0: - for i in range(ngpu): - gpu_name = torch.cuda.get_device_name(i).upper() - if any(keyword in gpu_name for keyword in keywords): - if_gpu_ok = True # 至少有一张能用的N卡 - gpu_infos.append("%s\t%s" % (i, gpu_name)) - mem.append(int(torch.cuda.get_device_properties(i).total_memory / 1e9 + 0.4)) - -gpu_info = "\n".join(gpu_infos) if if_gpu_ok and gpu_infos else i18n("很遗憾您这没有能用的显卡来支持您训练") -default_batch_size = min(mem) // 2 if if_gpu_ok and gpu_infos else 1 -gpus = "-".join(i[0] for i in gpu_infos) - -hubert_model = None - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task(["hubert_base.pt"], suffix="") - hubert_model = models[0].to(config.device) - - if config.is_half: - hubert_model = hubert_model.half() - - hubert_model.eval() - -datasets_root = "datasets" -weight_root = "weights" -weight_uvr5_root = "uvr5_weights" -index_root = "logs" -fshift_root = "formantshiftcfg" -audio_root = "audios" -audio_others_root = "audio-others" - -sup_audioext = {'wav', 'mp3', 'flac', 'ogg', 'opus', - 'm4a', 'mp4', 'aac', 'alac', 'wma', - 'aiff', 'webm', 'ac3'} - -names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - -indexes_list = [os.path.join(root, name) - for root, _, files in os.walk(index_root, topdown=False) - for name in files - if name.endswith(".index") and "trained" not in name] - -audio_paths = [os.path.join(root, name) - for root, _, files in os.walk(audio_root, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext))] - -audio_others_paths = [os.path.join(root, name) - for root, _, files in os.walk(audio_others_root, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext))] - -uvr5_names = [name.replace(".pth", "") - for name in os.listdir(weight_uvr5_root) - if name.endswith(".pth") or "onnx" in name] - -check_for_name = lambda: sorted(names)[0] if names else '' - -datasets=[] -for foldername in os.listdir(os.path.join(now_dir, datasets_root)): - if "." not in foldername: - datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername)) - -def get_dataset(): - if len(datasets) > 0: - return sorted(datasets)[0] - else: - return '' - -def update_dataset_list(name): - new_datasets = [] - for foldername in os.listdir(os.path.join(now_dir, datasets_root)): - if "." not in foldername: - new_datasets.append(os.path.join(easy_infer.find_folder_parent(".","pretrained"),"datasets",foldername)) - return gr.Dropdown.update(choices=new_datasets) - -def get_indexes(): - indexes_list = [ - os.path.join(dirpath, filename) - for dirpath, _, filenames in os.walk(index_root) - for filename in filenames - if filename.endswith(".index") and "trained" not in filename - ] - - return indexes_list if indexes_list else '' - -def get_fshift_presets(): - fshift_presets_list = [ - os.path.join(dirpath, filename) - for dirpath, _, filenames in os.walk(fshift_root) - for filename in filenames - if filename.endswith(".txt") - ] - - return fshift_presets_list if fshift_presets_list else '' - -# Define función de conversión llamada por el botón -import soundfile as sf - -def generate_output_path(output_folder, base_name, extension): - # Generar un nombre único para el archivo de salida - index = 1 - while True: - output_path = os.path.join(output_folder, f"{base_name}_{index}.{extension}") - if not os.path.exists(output_path): - return output_path - index += 1 - -def combine_and_save_audios(audio1_path, audio2_path, output_path): - audio1, sr1 = librosa.load(audio1_path, sr=None) - audio2, sr2 = librosa.load(audio2_path, sr=None) - - # Alinear las tasas de muestreo - if sr1 != sr2: - if sr1 > sr2: - audio2 = librosa.resample(audio2, orig_sr=sr2, target_sr=sr1) - else: - audio1 = librosa.resample(audio1, orig_sr=sr1, target_sr=sr2) - - # Ajustar los audios para que tengan la misma longitud - target_length = min(len(audio1), len(audio2)) - audio1 = librosa.util.fix_length(audio1, target_length) - audio2 = librosa.util.fix_length(audio2, target_length) - - # Combinar los audios - combined_audio = audio1 + audio2 - - sf.write(output_path, combined_audio, sr1) - -# Resto de tu código... - - -# Define función de conversión llamada por el botón -def audio_combined(audio1_path, audio2_path): - output_folder = os.path.join(now_dir, "audio-others") - os.makedirs(output_folder, exist_ok=True) - - # Generar nombres únicos para los archivos de salida - base_name = "combined_audio" - extension = "wav" - output_path = generate_output_path(output_folder, base_name, extension) - - combine_and_save_audios(audio1_path, audio2_path, output_path) - - return "¡Conversión completa!", output_path - - -def vc_single( - sid: str, - input_audio_path0: str, - input_audio_path1: str, - f0_up_key: int, - f0_file: str, - f0_method: str, - file_index: str, - file_index2: str, - index_rate: float, - filter_radius: int, - resample_sr: int, - rms_mix_rate: float, - protect: float, - crepe_hop_length: int, - f0_min: int, - note_min: str, - f0_max: int, - note_max: str, -): - global total_time - total_time = 0 - start_time = time.time() - global tgt_sr, net_g, vc, hubert_model, version - if not input_audio_path0 and not input_audio_path1: - return "You need to upload an audio", None - - if (not os.path.exists(input_audio_path0)) and (not os.path.exists(os.path.join(now_dir, input_audio_path0))): - return "Audio was not properly selected or doesn't exist", None - - # This might be jank, but I'm trying to make sure this gets the right file... - input_audio_path1 = input_audio_path1 or input_audio_path0 - print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'") - print("-------------------") - - f0_up_key = int(f0_up_key) - - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted min pitch freq - {f0_min}\n" - f"Converted max pitch freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - try: - print(f"Attempting to load {input_audio_path1}....") - audio = load_audio(input_audio_path1, - 16000, - DoFormant=rvc_globals.DoFormant, - Quefrency=rvc_globals.Quefrency, - Timbre=rvc_globals.Timbre) - - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - - times = [0, 0, 0] - if not hubert_model: - print("Loading HuBERT for the first time...") - load_hubert() - - try: - if_f0 = cpt.get("f0", 1) - except NameError: - message = "Model was not properly selected" - print(message) - return message, None - - file_index = ( - file_index.strip(" ").strip('"').strip("\n").strip('"').strip(" ").replace("trained", "added") - ) if file_index != "" else file_index2 - - try: - audio_opt = vc.pipeline( - hubert_model, - net_g, - sid, - audio, - input_audio_path1, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_file=f0_file, - f0_min=f0_min, - f0_max=f0_max - ) - except AssertionError: - message = "Mismatching index version detected (v1 with v2, or v2 with v1)." - print(message) - return message, None - except NameError: - message = "RVC libraries are still loading. Please try again in a few seconds." - print(message) - return message, None - - if tgt_sr != resample_sr >= 16000: - tgt_sr = resample_sr - - index_info = "Using index:%s." % file_index if os.path.exists(file_index) else "Index not used." - - end_time = time.time() - total_time = end_time - start_time - - return f"Success.\n {index_info}\nTime:\n npy:{times[0]}, f0:{times[1]}, infer:{times[2]}\nTotal Time: {total_time} seconds", (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - -def vc_multi( - sid, - dir_path, - opt_root, - paths, - f0_up_key, - f0_method, - file_index, - file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - format1, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, -): - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted min pitch freq - {f0_min}\n" - f"Converted max pitch freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - - try: - dir_path, opt_root = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [dir_path, opt_root]] - os.makedirs(opt_root, exist_ok=True) - - paths = [os.path.join(dir_path, name) for name in os.listdir(dir_path)] if dir_path else [path.name for path in paths] - infos = [] - - for path in paths: - info, opt = vc_single(sid, path, None, f0_up_key, None, f0_method, file_index, file_index2, index_rate, filter_radius, - resample_sr, rms_mix_rate, protect, crepe_hop_length, f0_min, note_min, f0_max, note_max) - - if "Success" in info: - try: - tgt_sr, audio_opt = opt - #sys.stdout.write(f"\nTarget Sample Rate (tgt_sr): {tgt_sr}") # Debugging print - base_name = os.path.splitext(os.path.basename(path))[0] - output_path = f"{opt_root}/{base_name}.{format1}" - path, extension = output_path, format1 - path, extension = output_path if format1 in ["wav", "flac", "mp3", "ogg", "aac", "m4a"] else f"{output_path}.wav", format1 - #sys.stdout.write(f"\nOutput Path: {path}") # Debugging print - #sys.stdout.write(f"\nFile Extension: {extension}") # Debugging print - SFWrite(path, audio_opt, tgt_sr) - #sys.stdout.write("\nFile Written Successfully with SFWrite") # Debugging print - if os.path.exists(path) and extension not in ["wav", "flac", "mp3", "ogg", "aac", "m4a"]: - sys.stdout.write(f"Running command: ffmpeg -i {RQuote(path)} -vn {RQuote(path[:-4] + '.' + extension)} -q:a 2 -y") - os.system(f"ffmpeg -i {RQuote(path)} -vn {RQuote(path[:-4] + '.' + extension)} -q:a 2 -y") - #print(f"\nFile Converted to {extension} using ffmpeg") # Debugging print - except: - info += traceback.format_exc() - print(f"\nException encountered: {info}") # Debugging print - infos.append(f"{os.path.basename(path)}->{info}") - yield "\n".join(infos) - yield "\n".join(infos) - except: - yield traceback.format_exc() - - -def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): - infos = [] - try: - inp_root, save_root_vocal, save_root_ins = [x.strip(" ").strip('"').strip("\n").strip('"').strip(" ") for x in [inp_root, save_root_vocal, save_root_ins]] - - pre_fun = MDXNetDereverb(15) if model_name == "onnx_dereverb_By_FoxJoy" else (_audio_pre_ if "DeEcho" not in model_name else _audio_pre_new)( - agg=int(agg), - model_path=os.path.join(weight_uvr5_root, model_name + ".pth"), - device=config.device, - is_half=config.is_half, - ) - - paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)] if inp_root else [path.name for path in paths] - - for path in paths: - inp_path = os.path.join(inp_root, path) - need_reformat, done = 1, 0 - - try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if info["streams"][0]["channels"] == 2 and info["streams"][0]["sample_rate"] == "44100": - need_reformat = 0 - pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0) - done = 1 - except: - traceback.print_exc() - - if need_reformat: - tmp_path = f"{tmp}/{os.path.basename(RQuote(inp_path))}.reformatted.wav" - os.system(f"ffmpeg -i {RQuote(inp_path)} -vn -acodec pcm_s16le -ac 2 -ar 44100 {RQuote(tmp_path)} -y") - inp_path = tmp_path - - try: - if not done: - pre_fun._path_audio_(inp_path, save_root_ins, save_root_vocal, format0) - infos.append(f"{os.path.basename(inp_path)}->Success") - yield "\n".join(infos) - except: - infos.append(f"{os.path.basename(inp_path)}->{traceback.format_exc()}") - yield "\n".join(infos) - except: - infos.append(traceback.format_exc()) - yield "\n".join(infos) - finally: - try: - if model_name == "onnx_dereverb_By_FoxJoy": - del pre_fun.pred.model - del pre_fun.pred.model_ - else: - del pre_fun.model - - del pre_fun - except: traceback.print_exc() - - print("clean_empty_cache") - - if torch.cuda.is_available(): torch.cuda.empty_cache() - - yield "\n".join(infos) - -def get_vc(sid, to_return_protect0, to_return_protect1): - global n_spk, tgt_sr, net_g, vc, cpt, version, hubert_model - if not sid: - if hubert_model is not None: - print("clean_empty_cache") - del net_g, n_spk, vc, hubert_model, tgt_sr - hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - if_f0, version = cpt.get("f0", 1), cpt.get("version", "v1") - net_g = (SynthesizerTrnMs256NSFsid if version == "v1" else SynthesizerTrnMs768NSFsid)( - *cpt["config"], is_half=config.is_half) if if_f0 == 1 else (SynthesizerTrnMs256NSFsid_nono if version == "v1" else SynthesizerTrnMs768NSFsid_nono)(*cpt["config"]) - del net_g, cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - cpt = None - return ({"visible": False, "__type__": "update"},) * 3 - - print(f"loading {sid}") - cpt = torch.load(sid, map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - - if cpt.get("f0", 1) == 0: - to_return_protect0 = to_return_protect1 = {"visible": False, "value": 0.5, "__type__": "update"} - else: - to_return_protect0 = {"visible": True, "value": to_return_protect0, "__type__": "update"} - to_return_protect1 = {"visible": True, "value": to_return_protect1, "__type__": "update"} - - version = cpt.get("version", "v1") - net_g = (SynthesizerTrnMs256NSFsid if version == "v1" else SynthesizerTrnMs768NSFsid)( - *cpt["config"], is_half=config.is_half) if cpt.get("f0", 1) == 1 else (SynthesizerTrnMs256NSFsid_nono if version == "v1" else SynthesizerTrnMs768NSFsid_nono)(*cpt["config"]) - del net_g.enc_q - - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - net_g = net_g.half() if config.is_half else net_g.float() - - vc = VC(tgt_sr, config) - n_spk = cpt["config"][-3] - - return ( - {"visible": False, "maximum": n_spk, "__type__": "update"}, - to_return_protect0, - to_return_protect1 - ) - - -def change_choices(): - names = [os.path.join(root, file) - for root, _, files in os.walk(weight_root) - for file in files - if file.endswith((".pth", ".onnx"))] - indexes_list = [os.path.join(root, name) for root, _, files in os.walk(index_root, topdown=False) for name in files if name.endswith(".index") and "trained" not in name] - audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))] - - - return ( - {"choices": sorted(names), "__type__": "update"}, - {"choices": sorted(indexes_list), "__type__": "update"}, - {"choices": sorted(audio_paths), "__type__": "update"} - ) -def change_choices3(): - - audio_paths = [os.path.join(audio_root, file) for file in os.listdir(os.path.join(now_dir, "audios"))] - audio_others_paths = [os.path.join(audio_others_root, file) for file in os.listdir(os.path.join(now_dir, "audio-others"))] - - - return ( - {"choices": sorted(audio_others_paths), "__type__": "update"}, - {"choices": sorted(audio_paths), "__type__": "update"} - ) - -sr_dict = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - -def if_done(done, p): - while p.poll() is None: - time.sleep(0.5) - - done[0] = True - -def if_done_multi(done, ps): - while not all(p.poll() is not None for p in ps): - time.sleep(0.5) - done[0] = True - -def formant_enabled(cbox, qfrency, tmbre): - global DoFormant, Quefrency, Timbre - - DoFormant = cbox - Quefrency = qfrency - Timbre = tmbre - - rvc_globals.DoFormant = cbox - rvc_globals.Quefrency = qfrency - rvc_globals.Timbre = tmbre - - visibility_update = {"visible": DoFormant, "__type__": "update"} - - return ( - {"value": DoFormant, "__type__": "update"}, - ) + (visibility_update,) * 6 - - -def formant_apply(qfrency, tmbre): - global Quefrency, Timbre, DoFormant - - Quefrency = qfrency - Timbre = tmbre - DoFormant = True - - rvc_globals.DoFormant = True - rvc_globals.Quefrency = qfrency - rvc_globals.Timbre = tmbre - - return ({"value": Quefrency, "__type__": "update"}, {"value": Timbre, "__type__": "update"}) - -def update_fshift_presets(preset, qfrency, tmbre): - - if preset: - with open(preset, 'r') as p: - content = p.readlines() - qfrency, tmbre = content[0].strip(), content[1] - - formant_apply(qfrency, tmbre) - else: - qfrency, tmbre = preset_apply(preset, qfrency, tmbre) - - return ( - {"choices": get_fshift_presets(), "__type__": "update"}, - {"value": qfrency, "__type__": "update"}, - {"value": tmbre, "__type__": "update"}, - ) - -def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): - sr = sr_dict[sr] - - log_dir = os.path.join(now_dir, "logs", exp_dir) - log_file = os.path.join(log_dir, "preprocess.log") - - os.makedirs(log_dir, exist_ok=True) - - with open(log_file, "w") as f: pass - - cmd = ( - f"{config.python_cmd} " - "trainset_preprocess_pipeline_print.py " - f"{trainset_dir} " - f"{RQuote(sr)} " - f"{RQuote(n_p)} " - f"{log_dir} " - f"{RQuote(config.noparallel)}" - ) - print(cmd) - - p = Popen(cmd, shell=True) - done = [False] - - threading.Thread(target=if_done, args=(done,p,)).start() - - while not done[0]: - with open(log_file, "r") as f: - yield f.read() - time.sleep(1) - - with open(log_file, "r") as f: - log = f.read() - - print(log) - yield log - -def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, echl): - gpus = gpus.split("-") - log_dir = f"{now_dir}/logs/{exp_dir}" - log_file = f"{log_dir}/extract_f0_feature.log" - os.makedirs(log_dir, exist_ok=True) - with open(log_file, "w") as f: pass - - if if_f0: - cmd = ( - f"{config.python_cmd} extract_f0_print.py {log_dir} " - f"{RQuote(n_p)} {RQuote(f0method)} {RQuote(echl)}" - ) - print(cmd) - p = Popen(cmd, shell=True, cwd=now_dir) - done = [False] - threading.Thread(target=if_done, args=(done, p)).start() - - while not done[0]: - with open(log_file, "r") as f: - yield f.read() - time.sleep(1) - - leng = len(gpus) - ps = [] - - for idx, n_g in enumerate(gpus): - cmd = ( - f"{config.python_cmd} extract_feature_print.py {RQuote(config.device)} " - f"{RQuote(leng)} {RQuote(idx)} {RQuote(n_g)} {log_dir} {RQuote(version19)}" - ) - print(cmd) - p = Popen(cmd, shell=True, cwd=now_dir) - ps.append(p) - - done = [False] - threading.Thread(target=if_done_multi, args=(done, ps)).start() - - while not done[0]: - with open(log_file, "r") as f: - yield f.read() - time.sleep(1) - - with open(log_file, "r") as f: - log = f.read() - - print(log) - yield log - -def change_sr2(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - f0_str = "f0" if if_f0_3 else "" - model_paths = {"G": "", "D": ""} - - for model_type in model_paths: - file_path = f"pretrained{path_str}/{f0_str}{model_type}{sr2}.pth" - if os.access(file_path, os.F_OK): - model_paths[model_type] = file_path - else: - print(f"{file_path} doesn't exist, will not use pretrained model.") - - return (model_paths["G"], model_paths["D"]) - - -def change_version19(sr2, if_f0_3, version19): - path_str = "" if version19 == "v1" else "_v2" - sr2 = "40k" if (sr2 == "32k" and version19 == "v1") else sr2 - choices_update = { - "choices": ["40k", "48k"], "__type__": "update", "value": sr2 - } if version19 == "v1" else { - "choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2} - - f0_str = "f0" if if_f0_3 else "" - model_paths = {"G": "", "D": ""} - - for model_type in model_paths: - file_path = f"pretrained{path_str}/{f0_str}{model_type}{sr2}.pth" - if os.access(file_path, os.F_OK): - model_paths[model_type] = file_path - else: - print(f"{file_path} doesn't exist, will not use pretrained model.") - - return (model_paths["G"], model_paths["D"], choices_update) - - -def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15 - path_str = "" if version19 == "v1" else "_v2" - - pth_format = "pretrained%s/f0%s%s.pth" - model_desc = { "G": "", "D": "" } - - for model_type in model_desc: - file_path = pth_format % (path_str, model_type, sr2) - if os.access(file_path, os.F_OK): - model_desc[model_type] = file_path - else: - print(file_path, "doesn't exist, will not use pretrained model") - - return ( - {"visible": if_f0_3, "__type__": "update"}, - model_desc["G"], - model_desc["D"], - {"visible": if_f0_3, "__type__": "update"} - ) - - -global log_interval - -def set_log_interval(exp_dir, batch_size12): - log_interval = 1 - folder_path = os.path.join(exp_dir, "1_16k_wavs") - - if os.path.isdir(folder_path): - wav_files_num = len(glob1(folder_path,"*.wav")) - - if wav_files_num > 0: - log_interval = math.ceil(wav_files_num / batch_size12) - if log_interval > 1: - log_interval += 1 - - return log_interval - -global PID, PROCESS - -def click_train( - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, -): - with open('csvdb/stop.csv', 'w+') as file: file.write("False") - log_dir = os.path.join(now_dir, "logs", exp_dir1) - - os.makedirs(log_dir, exist_ok=True) - - gt_wavs_dir = os.path.join(log_dir, "0_gt_wavs") - feature_dim = "256" if version19 == "v1" else "768" - - feature_dir = os.path.join(log_dir, f"3_feature{feature_dim}") - - log_interval = set_log_interval(log_dir, batch_size12) - - required_dirs = [gt_wavs_dir, feature_dir] - - if if_f0_3: - f0_dir = f"{log_dir}/2a_f0" - f0nsf_dir = f"{log_dir}/2b-f0nsf" - required_dirs.extend([f0_dir, f0nsf_dir]) - - names = set(name.split(".")[0] for directory in required_dirs for name in os.listdir(directory)) - - def generate_paths(name): - paths = [gt_wavs_dir, feature_dir] - if if_f0_3: - paths.extend([f0_dir, f0nsf_dir]) - return '|'.join([path.replace('\\', '\\\\') + '/' + name + ('.wav.npy' if path in [f0_dir, f0nsf_dir] else '.wav' if path == gt_wavs_dir else '.npy') for path in paths]) - - opt = [f"{generate_paths(name)}|{spk_id5}" for name in names] - mute_dir = f"{now_dir}/logs/mute" - - for _ in range(2): - mute_string = f"{mute_dir}/0_gt_wavs/mute{sr2}.wav|{mute_dir}/3_feature{feature_dim}/mute.npy" - if if_f0_3: - mute_string += f"|{mute_dir}/2a_f0/mute.wav.npy|{mute_dir}/2b-f0nsf/mute.wav.npy" - opt.append(mute_string+f"|{spk_id5}") - - shuffle(opt) - with open(f"{log_dir}/filelist.txt", "w") as f: - f.write("\n".join(opt)) - - print("write filelist done") - print("use gpus:", gpus16) - - if pretrained_G14 == "": - print("no pretrained Generator") - if pretrained_D15 == "": - print("no pretrained Discriminator") - - G_train = f"-pg {pretrained_G14}" if pretrained_G14 else "" - D_train = f"-pd {pretrained_D15}" if pretrained_D15 else "" - - cmd = ( - f"{config.python_cmd} train_nsf_sim_cache_sid_load_pretrain.py -e {exp_dir1} -sr {sr2} -f0 {int(if_f0_3)} -bs {batch_size12}" - f" -g {gpus16 if gpus16 is not None else ''} -te {total_epoch11} -se {save_epoch10} {G_train} {D_train} -l {int(if_save_latest13)}" - f" -c {int(if_cache_gpu17)} -sw {int(if_save_every_weights18)} -v {version19} -li {log_interval}" - ) - - print(cmd) - - global p - p = Popen(cmd, shell=True, cwd=now_dir) - global PID - PID = p.pid - - p.wait() - - return "Training is done, check train.log", {"visible": False, "__type__": "update"}, {"visible": True, "__type__": "update"} - -def train_index(exp_dir1, version19): - exp_dir = os.path.join(now_dir, 'logs', exp_dir1) - os.makedirs(exp_dir, exist_ok=True) - - feature_dim = '256' if version19 == "v1" else '768' - feature_dir = os.path.join(exp_dir, f"3_feature{feature_dim}") - - if not os.path.exists(feature_dir) or len(os.listdir(feature_dir)) == 0: - return "请先进行特征提取!" - - npys = [np.load(os.path.join(feature_dir, name)) for name in sorted(os.listdir(feature_dir))] - - big_npy = np.concatenate(npys, 0) - np.random.shuffle(big_npy) - - infos = [] - if big_npy.shape[0] > 2*10**5: - infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) - yield "\n".join(infos) - try: - big_npy = MiniBatchKMeans(n_clusters=10000, verbose=True, batch_size=256 * config.n_cpu, - compute_labels=False,init="random").fit(big_npy).cluster_centers_ - except Exception as e: - infos.append(str(e)) - yield "\n".join(infos) - - np.save(os.path.join(exp_dir, "total_fea.npy"), big_npy) - - n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) - infos.append("%s,%s" % (big_npy.shape, n_ivf)) - yield "\n".join(infos) - - index = faiss.index_factory(int(feature_dim), f"IVF{n_ivf},Flat") - - index_ivf = faiss.extract_index_ivf(index) - index_ivf.nprobe = 1 - - index.train(big_npy) - - index_file_base = f"{exp_dir}/trained_IVF{n_ivf}_Flat_nprobe_{index_ivf.nprobe}_{exp_dir1}_{version19}.index" - faiss.write_index(index, index_file_base) - - infos.append("adding") - yield "\n".join(infos) - - batch_size_add = 8192 - for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i:i + batch_size_add]) - - index_file_base = f"{exp_dir}/added_IVF{n_ivf}_Flat_nprobe_{index_ivf.nprobe}_{exp_dir1}_{version19}.index" - faiss.write_index(index, index_file_base) - - infos.append(f"Successful Index Construction,added_IVF{n_ivf}_Flat_nprobe_{index_ivf.nprobe}_{exp_dir1}_{version19}.index") - yield "\n".join(infos) - -#def setBoolean(status): #true to false and vice versa / not implemented yet, dont touch!!!!!!! -# status = not status -# return status - -def change_info_(ckpt_path): - train_log_path = os.path.join(os.path.dirname(ckpt_path), "train.log") - - if not os.path.exists(train_log_path): - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - - try: - with open(train_log_path, "r") as f: - info_line = next(f).strip() - info = eval(info_line.split("\t")[-1]) - - sr, f0 = info.get("sample_rate"), info.get("if_f0") - version = "v2" if info.get("version") == "v2" else "v1" - - return sr, str(f0), version - - except Exception as e: - print(f"Exception occurred: {str(e)}, Traceback: {traceback.format_exc()}") - return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} - -def export_onnx(model_path, exported_path): - device = torch.device("cpu") - checkpoint = torch.load(model_path, map_location=device) - vec_channels = 256 if checkpoint.get("version", "v1") == "v1" else 768 - - test_inputs = { - "phone": torch.rand(1, 200, vec_channels), - "phone_lengths": torch.LongTensor([200]), - "pitch": torch.randint(5, 255, (1, 200)), - "pitchf": torch.rand(1, 200), - "ds": torch.zeros(1).long(), - "rnd": torch.rand(1, 192, 200) - } - - checkpoint["config"][-3] = checkpoint["weight"]["emb_g.weight"].shape[0] - net_g = SynthesizerTrnMsNSFsidM(*checkpoint["config"], is_half=False, version=checkpoint.get("version", "v1")) - - net_g.load_state_dict(checkpoint["weight"], strict=False) - net_g = net_g.to(device) - - dynamic_axes = {"phone": [1], "pitch": [1], "pitchf": [1], "rnd": [2]} - - torch.onnx.export( - net_g, - tuple(value.to(device) for value in test_inputs.values()), - exported_path, - dynamic_axes=dynamic_axes, - do_constant_folding=False, - opset_version=13, - verbose=False, - input_names=list(test_inputs.keys()), - output_names=["audio"], - ) - return "Finished" - - -#region Mangio-RVC-Fork CLI App - -import scipy.io.wavfile as wavfile - -cli_current_page = "HOME" - -def cli_split_command(com): - exp = r'(?:(?<=\s)|^)"(.*?)"(?=\s|$)|(\S+)' - split_array = re.findall(exp, com) - split_array = [group[0] if group[0] else group[1] for group in split_array] - return split_array - -execute_generator_function = lambda genObject: all(x is not None for x in genObject) - -def cli_infer(com): - model_name, source_audio_path, output_file_name, feature_index_path, speaker_id, transposition, f0_method, crepe_hop_length, harvest_median_filter, resample, mix, feature_ratio, protection_amnt, _, f0_min, f0_max, do_formant = cli_split_command(com)[:17] - - speaker_id, crepe_hop_length, harvest_median_filter, resample = map(int, [speaker_id, crepe_hop_length, harvest_median_filter, resample]) - transposition, mix, feature_ratio, protection_amnt = map(float, [transposition, mix, feature_ratio, protection_amnt]) - - if do_formant.lower() == 'false': - Quefrency = 1.0 - Timbre = 1.0 - else: - Quefrency, Timbre = map(float, cli_split_command(com)[17:19]) - - rvc_globals.DoFormant = do_formant.lower() == 'true' - rvc_globals.Quefrency = Quefrency - rvc_globals.Timbre = Timbre - - output_message = 'Mangio-RVC-Fork Infer-CLI:' - output_path = f'audio-others/{output_file_name}' - - print(f"{output_message} Starting the inference...") - vc_data = get_vc(model_name, protection_amnt, protection_amnt) - print(vc_data) - - print(f"{output_message} Performing inference...") - conversion_data = vc_single( - speaker_id, - source_audio_path, - source_audio_path, - transposition, - None, # f0 file support not implemented - f0_method, - feature_index_path, - feature_index_path, - feature_ratio, - harvest_median_filter, - resample, - mix, - protection_amnt, - crepe_hop_length, - f0_min=f0_min, - note_min=None, - f0_max=f0_max, - note_max=None - ) - - if "Success." in conversion_data[0]: - print(f"{output_message} Inference succeeded. Writing to {output_path}...") - wavfile.write(output_path, conversion_data[1][0], conversion_data[1][1]) - print(f"{output_message} Finished! Saved output to {output_path}") - else: - print(f"{output_message} Inference failed. Here's the traceback: {conversion_data[0]}") - -def cli_pre_process(com): - print("Mangio-RVC-Fork Pre-process: Starting...") - execute_generator_function( - preprocess_dataset( - *cli_split_command(com)[:3], - int(cli_split_command(com)[3]) - ) - ) - print("Mangio-RVC-Fork Pre-process: Finished") - -def cli_extract_feature(com): - model_name, gpus, num_processes, has_pitch_guidance, f0_method, crepe_hop_length, version = cli_split_command(com) - - num_processes = int(num_processes) - has_pitch_guidance = bool(int(has_pitch_guidance)) - crepe_hop_length = int(crepe_hop_length) - - print( - f"Mangio-RVC-CLI: Extract Feature Has Pitch: {has_pitch_guidance}" - f"Mangio-RVC-CLI: Extract Feature Version: {version}" - "Mangio-RVC-Fork Feature Extraction: Starting..." - ) - generator = extract_f0_feature( - gpus, - num_processes, - f0_method, - has_pitch_guidance, - model_name, - version, - crepe_hop_length - ) - execute_generator_function(generator) - print("Mangio-RVC-Fork Feature Extraction: Finished") - -def cli_train(com): - com = cli_split_command(com) - model_name = com[0] - sample_rate = com[1] - bool_flags = [bool(int(i)) for i in com[2:11]] - version = com[11] - - pretrained_base = "pretrained/" if version == "v1" else "pretrained_v2/" - - g_pretrained_path = f"{pretrained_base}f0G{sample_rate}.pth" - d_pretrained_path = f"{pretrained_base}f0D{sample_rate}.pth" - - print("Mangio-RVC-Fork Train-CLI: Training...") - click_train(model_name, sample_rate, *bool_flags, g_pretrained_path, d_pretrained_path, version) - -def cli_train_feature(com): - output_message = 'Mangio-RVC-Fork Train Feature Index-CLI' - print(f"{output_message}: Training... Please wait") - execute_generator_function(train_index(*cli_split_command(com))) - print(f"{output_message}: Done!") - -def cli_extract_model(com): - extract_small_model_process = extract_small_model(*cli_split_command(com)) - print( - "Mangio-RVC-Fork Extract Small Model: Success!" - if extract_small_model_process == "Success." - else f"{extract_small_model_process}\nMangio-RVC-Fork Extract Small Model: Failed!" - ) - -def preset_apply(preset, qfer, tmbr): - if preset: - try: - with open(preset, 'r') as p: - content = p.read().splitlines() - qfer, tmbr = content[0], content[1] - formant_apply(qfer, tmbr) - except IndexError: - print("Error: File does not have enough lines to read 'qfer' and 'tmbr'") - except FileNotFoundError: - print("Error: File does not exist") - except Exception as e: - print("An unexpected error occurred", e) - - return ({"value": qfer, "__type__": "update"}, {"value": tmbr, "__type__": "update"}) - -def print_page_details(): - page_description = { - - 'HOME': - "\n go home : Takes you back to home with a navigation list." - "\n go infer : Takes you to inference command execution." - "\n go pre-process : Takes you to training step.1) pre-process command execution." - "\n go extract-feature : Takes you to training step.2) extract-feature command execution." - "\n go train : Takes you to training step.3) being or continue training command execution." - "\n go train-feature : Takes you to the train feature index command execution." - "\n go extract-model : Takes you to the extract small model command execution." - - , 'INFER': - "\n arg 1) model name with .pth in ./weights: mi-test.pth" - "\n arg 2) source audio path: myFolder\\MySource.wav" - "\n arg 3) output file name to be placed in './audio-others': MyTest.wav" - "\n arg 4) feature index file path: logs/mi-test/added_IVF3042_Flat_nprobe_1.index" - "\n arg 5) speaker id: 0" - "\n arg 6) transposition: 0" - "\n arg 7) f0 method: harvest (pm, harvest, crepe, crepe-tiny, hybrid[x,x,x,x], mangio-crepe, mangio-crepe-tiny, rmvpe)" - "\n arg 8) crepe hop length: 160" - "\n arg 9) harvest median filter radius: 3 (0-7)" - "\n arg 10) post resample rate: 0" - "\n arg 11) mix volume envelope: 1" - "\n arg 12) feature index ratio: 0.78 (0-1)" - "\n arg 13) Voiceless Consonant Protection (Less Artifact): 0.33 (Smaller number = more protection. 0.50 means Dont Use.)" - "\n arg 14) Whether to formant shift the inference audio before conversion: False (if set to false, you can ignore setting the quefrency and timbre values for formanting)" - "\n arg 15)* Quefrency for formanting: 8.0 (no need to set if arg14 is False/false)" - "\n arg 16)* Timbre for formanting: 1.2 (no need to set if arg14 is False/false) \n" - "\nExample: mi-test.pth saudio/Sidney.wav myTest.wav logs/mi-test/added_index.index 0 -2 harvest 160 3 0 1 0.95 0.33 0.45 True 8.0 1.2" - - , 'PRE-PROCESS': - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Trainset directory: mydataset (or) E:\\my-data-set" - "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 4) Number of CPU threads to use: 8 \n" - "\nExample: mi-test mydataset 40k 24" - - , 'EXTRACT-FEATURE': - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" - "\n arg 3) Number of CPU threads to use: 8" - "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - "\n arg 5) f0 Method: harvest (pm, harvest, dio, crepe)" - "\n arg 6) Crepe hop length: 128" - "\n arg 7) Version for pre-trained models: v2 (use either v1 or v2)\n" - "\nExample: mi-test 0 24 1 harvest 128 v2" - - , 'TRAIN': - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 3) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - "\n arg 4) speaker id: 0" - "\n arg 5) Save epoch iteration: 50" - "\n arg 6) Total epochs: 10000" - "\n arg 7) Batch size: 8" - "\n arg 8) Gpu card slot: 0 (0-1-2 if using 3 GPUs)" - "\n arg 9) Save only the latest checkpoint: 0 (0 for no, 1 for yes)" - "\n arg 10) Whether to cache training set to vram: 0 (0 for no, 1 for yes)" - "\n arg 11) Save extracted small model every generation?: 0 (0 for no, 1 for yes)" - "\n arg 12) Model architecture version: v2 (use either v1 or v2)\n" - "\nExample: mi-test 40k 1 0 50 10000 8 0 0 0 0 v2" - - , 'TRAIN-FEATURE': - "\n arg 1) Model folder name in ./logs: mi-test" - "\n arg 2) Model architecture version: v2 (use either v1 or v2)\n" - "\nExample: mi-test v2" - - , 'EXTRACT-MODEL': - "\n arg 1) Model Path: logs/mi-test/G_168000.pth" - "\n arg 2) Model save name: MyModel" - "\n arg 3) Sample rate: 40k (32k, 40k, 48k)" - "\n arg 4) Has Pitch Guidance?: 1 (0 for no, 1 for yes)" - '\n arg 5) Model information: "My Model"' - "\n arg 6) Model architecture version: v2 (use either v1 or v2)\n" - '\nExample: logs/mi-test/G_168000.pth MyModel 40k 1 "Created by Cole Mangio" v2' - - } - - print(page_description.get(cli_current_page, 'Invalid page')) - - -def change_page(page): - global cli_current_page - cli_current_page = page - return 0 -def execute_command(com): - command_to_page = { - "go home": "HOME", - "go infer": "INFER", - "go pre-process": "PRE-PROCESS", - "go extract-feature": "EXTRACT-FEATURE", - "go train": "TRAIN", - "go train-feature": "TRAIN-FEATURE", - "go extract-model": "EXTRACT-MODEL", - } - - page_to_function = { - "INFER": cli_infer, - "PRE-PROCESS": cli_pre_process, - "EXTRACT-FEATURE": cli_extract_feature, - "TRAIN": cli_train, - "TRAIN-FEATURE": cli_train_feature, - "EXTRACT-MODEL": cli_extract_model, - } - - if com in command_to_page: - return change_page(command_to_page[com]) - - if com[:3] == "go ": - print(f"page '{com[3:]}' does not exist!") - return 0 - - if cli_current_page in page_to_function: - page_to_function[cli_current_page](com) - -def cli_navigation_loop(): - while True: - print(f"\nYou are currently in '{cli_current_page}':") - print_page_details() - print(f"{cli_current_page}: ", end="") - try: execute_command(input()) - except Exception as e: print(f"An error occurred: {traceback.format_exc()}") - -if(config.is_cli): - print( - "\n\nMangio-RVC-Fork v2 CLI App!\n" - "Welcome to the CLI version of RVC. Please read the documentation on https://github.com/Mangio621/Mangio-RVC-Fork (README.MD) to understand how to use this app.\n" - ) - cli_navigation_loop() - -#endregion - -#region RVC WebUI App -''' -def get_presets(): - data = None - with open('../inference-presets.json', 'r') as file: - data = json.load(file) - preset_names = [] - for preset in data['presets']: - preset_names.append(preset['name']) - - return preset_names -''' - -def switch_pitch_controls(f0method0): - is_visible = f0method0 != 'rmvpe' - - if rvc_globals.NotesOrHertz: - return ( - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"} - ) - else: - return ( - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"}, - {"visible": is_visible, "__type__": "update"}, - {"visible": False, "__type__": "update"} - ) - -def match_index(sid0: str) -> tuple: - sid0strip = re.sub(r'\.pth|\.onnx$', '', sid0) - sid0name = os.path.split(sid0strip)[-1] # Extract only the name, not the directory - - # Check if the sid0strip has the specific ending format _eXXX_sXXX - if re.match(r'.+_e\d+_s\d+$', sid0name): - base_model_name = sid0name.rsplit('_', 2)[0] - else: - base_model_name = sid0name - - sid_directory = os.path.join(index_root, base_model_name) - directories_to_search = [sid_directory] if os.path.exists(sid_directory) else [] - directories_to_search.append(index_root) - - matching_index_files = [] - - for directory in directories_to_search: - for filename in os.listdir(directory): - if filename.endswith('.index') and 'trained' not in filename: - # Condition to match the name - name_match = any(name.lower() in filename.lower() for name in [sid0name, base_model_name]) - - # If in the specific directory, it's automatically a match - folder_match = directory == sid_directory - - if name_match or folder_match: - index_path = os.path.join(directory, filename) - if index_path in indexes_list: - matching_index_files.append((index_path, os.path.getsize(index_path), ' ' not in filename)) - - if matching_index_files: - # Sort by favoring files without spaces and by size (largest size first) - matching_index_files.sort(key=lambda x: (-x[2], -x[1])) - best_match_index_path = matching_index_files[0][0] - return best_match_index_path, best_match_index_path - - return '', '' -def stoptraining(mim): - if mim: - try: - with open('csvdb/stop.csv', 'w+') as file: file.write("True") - os.kill(PID, SIGTERM) - except Exception as e: - print(f"Couldn't click due to {e}") - return ( - {"visible": True , "__type__": "update"}, - {"visible": False, "__type__": "update"}) - return ( - {"visible": False, "__type__": "update"}, - {"visible": True , "__type__": "update"}) - -tab_faq = i18n("常见问题解答") -faq_file = "docs/faq.md" if tab_faq == "常见问题解答" else "docs/faq_en.md" -weights_dir = 'weights/' - -def note_to_hz(note_name): - SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2} - pitch_class, octave = note_name[:-1], int(note_name[-1]) - semitone = SEMITONES[pitch_class] - note_number = 12 * (octave - 4) + semitone - frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number - return frequency - -def save_to_wav(record_button): - if record_button is None: - pass - else: - path_to_file=record_button - new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav' - new_path='./audios/'+new_name - shutil.move(path_to_file,new_path) - return new_name - -def save_to_wav2(dropbox): - file_path = dropbox.name - target_path = os.path.join('audios', os.path.basename(file_path)) - - if os.path.exists(target_path): - os.remove(target_path) - print('Replacing old dropdown file...') - - shutil.move(file_path, target_path) - return target_path - -def change_choices2(): - return "" - -def GradioSetup(UTheme=gr.themes.Soft()): - - default_weight = names[0] if names else '' # Set the first found weight as the preloaded model - - with gr.Blocks(theme='JohnSmith9982/small_and_pretty', title="Applio") as app: - gr.HTML("

🍏 Applio (Mangio-RVC-Fork)

") - # gr.Markdown( - # value=i18n( - # "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责.
如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录使用需遵守的协议-LICENSE.txt." - # ) - #) - with gr.Tabs(): - with gr.TabItem(i18n("模型推理")): - with gr.Row(): - sid0 = gr.Dropdown(label=i18n("推理音色"), choices=sorted(names), value=default_weight) - refresh_button = gr.Button(i18n("刷新音色列表和索引路径"), variant="primary") - clean_button = gr.Button(i18n("卸载音色省显存"), variant="primary") - clean_button.click(fn=lambda: ({"value": "", "__type__": "update"}), inputs=[], outputs=[sid0]) - - - with gr.TabItem(i18n("单个")): - with gr.Row(): - spk_item = gr.Slider( - minimum=0, - maximum=2333, - step=1, - label=i18n("请选择说话人id"), - value=0, - visible=False, - interactive=True, - ) - #clean_button.click(fn=lambda: ({"value": "", "__type__": "update"}), inputs=[], outputs=[sid0]) - - with gr.Group(): # Defines whole single inference option section - with gr.Row(): - with gr.Column(): # First column for audio-related inputs - dropbox = gr.File(label=i18n("将音频拖到此处,然后点击刷新按钮")) - record_button=gr.Audio(source="microphone", label=i18n("或录制音频"), type="filepath") - input_audio0 = gr.Textbox( - label=i18n("Manual path to the audio file to be processed"), - value=os.path.join(now_dir, "audios", "someguy.mp3"), - visible=False - ) - input_audio1 = gr.Dropdown( - label=i18n("自动检测音频路径并从下拉菜单中选择:"), - choices=sorted(audio_paths), - value='', - interactive=True, - ) - - input_audio1.select(fn=lambda:'',inputs=[],outputs=[input_audio0]) - input_audio0.input(fn=lambda:'',inputs=[],outputs=[input_audio1]) - - dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio0]) - dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio1]) - record_button.change(fn=save_to_wav, inputs=[record_button], outputs=[input_audio0]) - record_button.change(fn=change_choices2, inputs=[], outputs=[input_audio1]) - - best_match_index_path1, _ = match_index(sid0.value) # Get initial index from default sid0 (first voice model in list) - - with gr.Column(): # Second column for pitch shift and other options - file_index2 = gr.Dropdown( - label=i18n("自动检测index路径,下拉式选择(dropdown)"), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - allow_custom_value=True, - ) - index_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=0.75, - interactive=True, - ) - refresh_button.click( - fn=change_choices, inputs=[], outputs=[sid0, file_index2, input_audio1] - ) - with gr.Column(): - vc_transform0 = gr.Number( - label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 - ) - - # Create a checkbox for advanced settings - advanced_settings_checkbox = gr.Checkbox( - value=False, - label=i18n("高级设置"), - interactive=True, - ) - - # Advanced settings container - with gr.Column(visible=False) as advanced_settings: # Initially hidden - with gr.Row(label = i18n("高级设置"), open = False): - with gr.Column(): - f0method0 = gr.Radio( - label=i18n( - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU" - ), - choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny", "rmvpe", "rmvpe+"], - value="rmvpe+", - interactive=True, - ) - crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label=i18n("crepe_hop_length"), - value=120, - interactive=True, - visible=False, - ) - filter_radius0 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - - minpitch_slider = gr.Slider( - label = i18n("音高最小值"), - info = i18n("指定推断的最小音高 [HZ]"), - step = 0.1, - minimum = 1, - scale = 0, - value = 50, - maximum = 16000, - interactive = True, - visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - ) - minpitch_txtbox = gr.Textbox( - label = i18n("音高最小值"), - info = i18n("为推断指定最小音高 [音符][八度]"), - placeholder = "C5", - visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - interactive = True, - ) - - maxpitch_slider = gr.Slider( - label = i18n("音高最大值"), - info = i18n("指定推断的最大音高 [HZ]"), - step = 0.1, - minimum = 1, - scale = 0, - value = 1100, - maximum = 16000, - interactive = True, - visible = (not rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - ) - maxpitch_txtbox = gr.Textbox( - label = i18n("音高最大值"), - info = i18n("为推断指定最大音高 [音符][八度]"), - placeholder = "C6", - visible = (rvc_globals.NotesOrHertz) and (f0method0.value != 'rmvpe'), - interactive = True, - ) - - with gr.Column(): - file_index1 = gr.Textbox( - label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), - value="", - interactive=True, - ) - - with gr.Accordion(label = i18n("自定义 f0 [根音] 文件"), open = False): - f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调")) - - f0method0.change( - fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), - inputs=[f0method0], - outputs=[crepe_hop_length] - ) - - f0method0.change( - fn=switch_pitch_controls, - inputs=[f0method0], - outputs=[minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox] - ) - - with gr.Column(): - resample_sr0 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("后处理重采样至最终采样率,0为不进行重采样"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate0 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), - value=0.25, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" - ), - value=0.33, - step=0.01, - interactive=True, - ) - formanting = gr.Checkbox( - value=bool(DoFormant), - label=i18n("共振声移动推理音频"), - info=i18n("用于将男性转换为女性,反之亦然"), - interactive=True, - visible=True, - ) - - formant_preset = gr.Dropdown( - value='', - choices=get_fshift_presets(), - label=i18n("浏览共振峰预设"), - info=i18n("预设位于 formantshiftcfg/ 文件夹中"), - visible=bool(DoFormant), - ) - - formant_refresh_button = gr.Button( - value='\U0001f504', - visible=bool(DoFormant), - variant='primary', - ) - - qfrency = gr.Slider( - value=Quefrency, - info=i18n("默认值为 1.0"), - label=i18n("用于共振峰变换的 Quefrency"), - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - - tmbre = gr.Slider( - value=Timbre, - info=i18n("默认值为 1.0"), - label=i18n("用于共振峰变换的音色"), - minimum=0.0, - maximum=16.0, - step=0.1, - visible=bool(DoFormant), - interactive=True, - ) - frmntbut = gr.Button(i18n("应用"), variant="primary", visible=bool(DoFormant)) - - formant_preset.change(fn=preset_apply, inputs=[formant_preset, qfrency, tmbre], outputs=[qfrency, tmbre]) - - formanting.change(fn=formant_enabled,inputs=[formanting,qfrency,tmbre],outputs=[formanting,qfrency,tmbre,frmntbut,formant_preset,formant_refresh_button]) - frmntbut.click(fn=formant_apply,inputs=[qfrency, tmbre], outputs=[qfrency, tmbre]) - formant_refresh_button.click(fn=update_fshift_presets,inputs=[formant_preset, qfrency, tmbre],outputs=[formant_preset, qfrency, tmbre]) - - # Function to toggle advanced settings - def toggle_advanced_settings(checkbox): - return {"visible": checkbox, "__type__": "update"} - - # Attach the change event - advanced_settings_checkbox.change( - fn=toggle_advanced_settings, - inputs=[advanced_settings_checkbox], - outputs=[advanced_settings] - ) - - - but0 = gr.Button(i18n("转换"), variant="primary").style(full_width=True) - - with gr.Row(): # Defines output info + output audio download after conversion - vc_output1 = gr.Textbox(label=i18n("输出信息")) - vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)")) - - with gr.Group(): # I think this defines the big convert button - with gr.Row(): - but0.click( - vc_single, - [ - spk_item, - input_audio0, - input_audio1, - vc_transform0, - f0_file, - f0method0, - file_index1, - file_index2, - index_rate1, - filter_radius0, - resample_sr0, - rms_mix_rate0, - protect0, - crepe_hop_length, - minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox, - ], - [vc_output1, vc_output2], - ) - - - with gr.TabItem(i18n("批处理")): # Dont Change - with gr.Group(): # Markdown explanation of batch inference - gr.Markdown( - value=i18n("批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ") - ) - with gr.Row(): - with gr.Column(): - vc_transform1 = gr.Number( - label=i18n("变调(整数, 半音数量, 升八度12降八度-12)"), value=0 - ) - opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt") - with gr.Column(): - file_index4 = gr.Dropdown( - label=i18n("自动检测index路径,下拉式选择(dropdown)"), - choices=get_indexes(), - value=best_match_index_path1, - interactive=True, - ) - sid0.select(fn=match_index, inputs=[sid0], outputs=[file_index2, file_index4]) - - refresh_button.click( - fn=lambda: change_choices()[1], - inputs=[], - outputs=file_index4, - ) - index_rate2 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("检索特征占比"), - value=0.75, - interactive=True, - ) - with gr.Row(): - dir_input = gr.Textbox( - label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"), - value=os.path.join(now_dir, "audios"), - ) - inputs = gr.File( - file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") - ) - - with gr.Row(): - with gr.Column(): - # Create a checkbox for advanced batch settings - advanced_settings_batch_checkbox = gr.Checkbox( - value=False, - label=i18n("高级设置"), - interactive=True, - ) - - # Advanced batch settings container - with gr.Row(visible=False) as advanced_settings_batch: # Initially hidden - with gr.Row(label = i18n("高级设置[批量]"), open = False): - with gr.Column(): - file_index3 = gr.Textbox( - label=i18n("特征检索库文件路径,为空则使用下拉的选择结果"), - value="", - interactive=True, - ) - - f0method1 = gr.Radio( - label=i18n( - "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU" - ), - choices=["pm", "harvest", "crepe", "rmvpe"], - value="rmvpe", - interactive=True, - ) - filter_radius1 = gr.Slider( - minimum=0, - maximum=7, - label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波,数值为滤波半径,使用可以削弱哑音"), - value=3, - step=1, - interactive=True, - ) - - with gr.Row(): - format1 = gr.Radio( - label=i18n("导出文件格式"), - choices=["wav", "flac", "mp3", "m4a"], - value="flac", - interactive=True, - ) - - - with gr.Column(): - resample_sr1 = gr.Slider( - minimum=0, - maximum=48000, - label=i18n("后处理重采样至最终采样率,0为不进行重采样"), - value=0, - step=1, - interactive=True, - ) - rms_mix_rate1 = gr.Slider( - minimum=0, - maximum=1, - label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"), - value=1, - interactive=True, - ) - protect1 = gr.Slider( - minimum=0, - maximum=0.5, - label=i18n( - "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果" - ), - value=0.33, - step=0.01, - interactive=True, - ) - vc_output3 = gr.Textbox(label=i18n("输出信息")) - but1 = gr.Button(i18n("转换"), variant="primary") - but1.click( - vc_multi, - [ - spk_item, - dir_input, - opt_input, - inputs, - vc_transform1, - f0method1, - file_index3, - file_index4, - index_rate2, - filter_radius1, - resample_sr1, - rms_mix_rate1, - protect1, - format1, - crepe_hop_length, - minpitch_slider if (not rvc_globals.NotesOrHertz) else minpitch_txtbox, - maxpitch_slider if (not rvc_globals.NotesOrHertz) else maxpitch_txtbox, - ], - [vc_output3], - ) - - sid0.change( - fn=get_vc, - inputs=[sid0, protect0, protect1], - outputs=[spk_item, protect0, protect1], - ) - - spk_item, protect0, protect1 = get_vc(sid0.value, protect0, protect1) # Set VC parameters for the preloaded model - - # Function to toggle advanced settings - def toggle_advanced_settings_batch(checkbox): - return {"visible": checkbox, "__type__": "update"} - - # Attach the change event - advanced_settings_batch_checkbox.change( - fn=toggle_advanced_settings_batch, - inputs=[advanced_settings_batch_checkbox], - outputs=[advanced_settings_batch] - ) - - - - - - - - # with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): # UVR section - # with gr.Group(): - # gr.Markdown( - # value=i18n( - # "人声伴奏分离批量处理, 使用UVR5模型。
" - # "合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
" - # "模型分为三类:
" - # "1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
" - # "2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
" - # "3、去混响、去延迟模型(by FoxJoy):
" - # "  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
" - # " (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
" - # "去混响/去延迟,附:
" - # "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
" - # "2、MDX-Net-Dereverb模型挺慢的;
" - # "3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。" - # ) - # ) - # with gr.Row(): - # with gr.Column(): - # dir_wav_input = gr.Textbox( - # label=i18n("输入待处理音频文件夹路径"), - # value=os.path.join(now_dir, "audios") - # ) - # wav_inputs = gr.File( - # file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") - # ) - # with gr.Column(): - # model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) - # agg = gr.Slider( - # minimum=0, - # maximum=20, - # step=1, - # label="人声提取激进程度", - # value=10, - # interactive=True, - # visible=False, - # ) - # opt_vocal_root = gr.Textbox( - # label=i18n("指定输出主人声文件夹"), value="opt" - # ) - # opt_ins_root = gr.Textbox( - # label=i18n("指定输出非主人声文件夹"), value="opt" - # ) - # format0 = gr.Radio( - # label=i18n("导出文件格式"), - # choices=["wav", "flac", "mp3", "m4a"], - # value="flac", - # interactive=True, - # ) - # but2 = gr.Button(i18n("转换"), variant="primary") - # vc_output4 = gr.Textbox(label=i18n("输出信息")) - # but2.click( - # uvr, - # [ - # model_choose, - # dir_wav_input, - # opt_vocal_root, - # wav_inputs, - # opt_ins_root, - # agg, - # format0, - # ], - # [vc_output4], - # ) - with gr.TabItem(i18n("训练")): - gr.Markdown( - value=i18n( - "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. " - ) - ) - with gr.Row(): - exp_dir1 = gr.Textbox(label=i18n("输入实验名"), value=i18n("宓模型")) - sr2 = gr.Radio( - label=i18n("目标采样率"), - choices=["40k", "48k", "32k"], - value="40k", - interactive=True, - ) - if_f0_3 = gr.Checkbox( - label=i18n("模型是否具有俯仰引导功能"), - value=True, - interactive=True, - ) - version19 = gr.Radio( - label=i18n("版本"), - choices=["v1", "v2"], - value="v2", - interactive=True, - visible=True, - ) - np7 = gr.Slider( - minimum=0, - maximum=config.n_cpu, - step=1, - label=i18n("提取音高和处理数据使用的CPU进程数"), - value=int(np.ceil(config.n_cpu / 1.5)), - interactive=True, - ) - with gr.Group(): - gr.Markdown( - value=i18n( - "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. " - ) - ) - with gr.Row(): - # trainset_dir4 = gr.Textbox( - # label=i18n("输入训练文件夹路径"), value=os.path.join(now_dir, datasets_root) - # ) - trainset_dir4 = gr.Dropdown(choices=sorted(datasets), label=i18n("选择你的数据集。"), value=get_dataset()) - btn_update_dataset_list = gr.Button(i18n("更新清单。"), variant="primary") - spk_id5 = gr.Slider( - minimum=0, - maximum=4, - step=1, - label=i18n("请指定说话人id"), - value=0, - interactive=True, - ) - btn_update_dataset_list.click( - easy_infer.update_dataset_list, [spk_id5], trainset_dir4 - ) - but1 = gr.Button(i18n("处理数据"), variant="primary") - info1 = gr.Textbox(label=i18n("输出信息"), value="") - but1.click( - preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1] - ) - with gr.Group(): - gr.Markdown(value=i18n("step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)")) - with gr.Row(): - with gr.Column(): - gpus6 = gr.Textbox( - label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"), - value=gpus, - interactive=True, - ) - gr.Textbox(label=i18n("显卡信息"), value=gpu_info) - with gr.Column(): - f0method8 = gr.Radio( - label=i18n( - "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢" - ), - choices=["pm", "harvest", "dio", "crepe", "mangio-crepe", "rmvpe"], - # [ MANGIO ]: Fork feature: Crepe on f0 extraction for training. - value="rmvpe", - interactive=True, - ) - - extraction_crepe_hop_length = gr.Slider( - minimum=1, - maximum=512, - step=1, - label=i18n("crepe_hop_length"), - value=64, - interactive=True, - visible=False, - ) - - f0method8.change( - fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), - inputs=[f0method8], - outputs=[extraction_crepe_hop_length] - ) - but2 = gr.Button(i18n("特征提取"), variant="primary") - info2 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8, interactive=False) - but2.click( - extract_f0_feature, - [gpus6, np7, f0method8, if_f0_3, exp_dir1, version19, extraction_crepe_hop_length], - [info2], - ) - with gr.Group(): - gr.Markdown(value=i18n("step3: 填写训练设置, 开始训练模型和索引")) - with gr.Row(): - save_epoch10 = gr.Slider( - minimum=1, - maximum=50, - step=1, - label=i18n("保存频率save_every_epoch"), - value=10, - interactive=True, - visible=True, - ) - total_epoch11 = gr.Slider( - minimum=1, - maximum=10000, - step=2, - label=i18n("总训练轮数total_epoch"), - value=750, - interactive=True, - ) - batch_size12 = gr.Slider( - minimum=1, - maximum=40, - step=1, - label=i18n("每张显卡的batch_size"), - #value=default_batch_size, - value=20, - interactive=True, - ) - if_save_latest13 = gr.Checkbox( - label=i18n("是否只保存最新的 .ckpt 文件以节省硬盘空间"), - value=True, - interactive=True, - ) - if_cache_gpu17 = gr.Checkbox( - label=i18n("将所有训练集缓存到 GPU 内存中。缓存小型数据集(少于 10 分钟)可以加快训练速度,但缓存大型数据集会消耗大量 GPU 内存,可能无法显著提高速度"), - value=False, - interactive=True, - ) - if_save_every_weights18 = gr.Checkbox( - label=i18n("在每个保存点将一个小的最终模型保存到 权重 文件夹中"), - value=True, - interactive=True, - ) - with gr.Row(): - pretrained_G14 = gr.Textbox( - lines=2, - label=i18n("加载预训练底模G路径"), - value="pretrained_v2/f0G40k.pth", - interactive=True, - ) - pretrained_D15 = gr.Textbox( - lines=2, - label=i18n("加载预训练底模D路径"), - value="pretrained_v2/f0D40k.pth", - interactive=True, - ) - sr2.change( - change_sr2, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15], - ) - version19.change( - change_version19, - [sr2, if_f0_3, version19], - [pretrained_G14, pretrained_D15, sr2], - ) - if_f0_3.change( - fn=change_f0, - inputs=[if_f0_3, sr2, version19], - outputs=[f0method8, pretrained_G14, pretrained_D15], - ) - if_f0_3.change(fn=lambda radio: ( - { - "visible": radio in ['mangio-crepe', 'mangio-crepe-tiny'], - "__type__": "update" - } - ), inputs=[f0method8], outputs=[extraction_crepe_hop_length]) - gpus16 = gr.Textbox( - label=i18n("以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2"), - value=gpus, - interactive=True, - ) - butstop = gr.Button(i18n("停止培训"), - variant='primary', - visible=False, - ) - but3 = gr.Button(i18n("训练模型"), variant="primary", visible=True) - but3.click(fn=stoptraining, inputs=[gr.Number(value=0, visible=False)], outputs=[but3, butstop]) - butstop.click(fn=stoptraining, inputs=[gr.Number(value=1, visible=False)], outputs=[but3, butstop]) - - with gr.Column(scale=0): - gr.Markdown(value="
") - gr.Markdown(value="### " + i18n("保存前构建索引。")) - but4 = gr.Button(i18n("训练特征索引"), variant="primary") - gr.Markdown(value="### " + i18n("训练结束后保存您的模型。")) - save_action = gr.Dropdown(label=i18n("存储类型"), choices=[i18n("保存所有"),i18n("保存 D 和 G"),i18n("保存声音")], value=i18n("选择模型保存方法"), interactive=True) - but7 = gr.Button(i18n("保存模型"), variant="primary") - - - # but4 = gr.Button(i18n("训练特征索引"), variant="primary") - info3 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=10) - - if_save_every_weights18.change( - fn=lambda if_save_every_weights: ( - { - "visible": if_save_every_weights, - "__type__": "update" - } - ), - inputs=[if_save_every_weights18], - outputs=[save_epoch10] - ) - - but3.click( - click_train, - [ - exp_dir1, - sr2, - if_f0_3, - spk_id5, - save_epoch10, - total_epoch11, - batch_size12, - if_save_latest13, - pretrained_G14, - pretrained_D15, - gpus16, - if_cache_gpu17, - if_save_every_weights18, - version19, - ], - [info3, butstop, but3], - ) - - but4.click(train_index, [exp_dir1, version19], info3) - but7.click(easy_infer.save_model, [exp_dir1, save_action], info3) - with gr.Group(): - gr.Markdown(value=i18n( - '步骤4:单击模型的导出最低点后,在模型图上的导出最低点,新文件将位于logs/[yourmodelname]/lowestvals/folder中') - ) - - with gr.Row(): - with gr.Accordion(label=i18n("最低点导出")): - - lowestval_weight_dir = gr.Textbox(visible=False) - ds = gr.Textbox(visible=False) - weights_dir1 = gr.Textbox(visible=False, value=weights_dir) - - - with gr.Row(): - amntlastmdls = gr.Slider( - minimum=1, - maximum=25, - label=i18n('保存多少个最低点'), - value=3, - step=1, - interactive=True, - ) - lpexport = gr.Button( - value=i18n('导出模型的最低点'), - variant='primary', - ) - lw_mdls = gr.File( - file_count="multiple", - label=i18n("输出型号"), - interactive=False, - ) ##### - - with gr.Row(): - infolpex = gr.Textbox(label=i18n("输出信息"), value="", max_lines=10) - mdlbl = gr.Dataframe(label=i18n('所选模型的统计数据'), datatype='number', type='pandas') - - lpexport.click( - lambda model_name: os.path.join("logs", model_name, "lowestvals"), - inputs=[exp_dir1], - outputs=[lowestval_weight_dir] - ) - - lpexport.click(fn=tensorlowest.main, inputs=[exp_dir1, save_epoch10, amntlastmdls], outputs=[ds]) - - ds.change( - fn=tensorlowest.selectweights, - inputs=[exp_dir1, ds, weights_dir1, lowestval_weight_dir], - outputs=[infolpex, lw_mdls, mdlbl], - ) - with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")): # UVR section - with gr.Group(): - gr.Markdown( - value=i18n( - "人声伴奏分离批量处理, 使用UVR5模型。
" - "合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。
" - "模型分为三类:
" - "1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点;
" - "2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型;
" - "3、去混响、去延迟模型(by FoxJoy):
" - "  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;
" - " (234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。
" - "去混响/去延迟,附:
" - "1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;
" - "2、MDX-Net-Dereverb模型挺慢的;
" - "3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。" - ) - ) - with gr.Row(): - with gr.Column(): - dir_wav_input = gr.Textbox( - label=i18n("输入待处理音频文件夹路径"), - value=os.path.join(now_dir, "audios") - ) - wav_inputs = gr.File( - file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹") - ) - with gr.Column(): - model_choose = gr.Dropdown(label=i18n("模型"), choices=uvr5_names) - agg = gr.Slider( - minimum=0, - maximum=20, - step=1, - label="人声提取激进程度", - value=10, - interactive=True, - visible=False, - ) - opt_vocal_root = gr.Textbox( - label=i18n("指定输出主人声文件夹"), value="opt" - ) - opt_ins_root = gr.Textbox( - label=i18n("指定输出非主人声文件夹"), value="opt" - ) - format0 = gr.Radio( - label=i18n("导出文件格式"), - choices=["wav", "flac", "mp3", "m4a"], - value="flac", - interactive=True, - ) - but2 = gr.Button(i18n("转换"), variant="primary") - vc_output4 = gr.Textbox(label=i18n("输出信息")) - but2.click( - uvr, - [ - model_choose, - dir_wav_input, - opt_vocal_root, - wav_inputs, - opt_ins_root, - agg, - format0, - ], - [vc_output4], - ) - - # with gr.TabItem(i18n("Onnx导出")): - # with gr.Row(): - # ckpt_dir = gr.Textbox(label=i18n("RVC模型路径"), value="", interactive=True, placeholder="RVC model path.") - # with gr.Row(): - # onnx_dir = gr.Textbox( - # label=i18n("Onnx输出路径"), value="", interactive=True, placeholder="Onnx model output path." - # ) - # with gr.Row(): - # infoOnnx = gr.Label(label="info") - # with gr.Row(): - # butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary") - # butOnnx.click(export_onnx, [ckpt_dir, onnx_dir], infoOnnx) - - with gr.TabItem(i18n("资源")): - - easy_infer.download_model() - easy_infer.download_backup() - easy_infer.download_dataset(trainset_dir4) - easy_infer.youtube_separator() - with gr.TabItem(i18n("额外")): - gr.Markdown( - value=i18n("此部分包含一些额外的实用工具,通常可能处于实验阶段") - ) - with gr.TabItem(i18n("合并音频")): - with gr.Group(): # Defines whole single inference option section - gr.Markdown( - value="## " + i18n("将生成的音频与其他音频(伴奏)合并,还可以用它来创建视频") - ) - gr.Markdown(value="",scale="-0.5",visible=True) - gr.Markdown(value="",scale="-0.5",visible=True) - with gr.Row(): - with gr.Column(): # First column for audio-related inputs - dropbox = gr.File(label=i18n("将音频拖到此处,然后点击刷新按钮")) - input_audio1 = gr.Dropdown( - label=i18n("选择您的伴奏"), - choices=sorted(audio_others_paths), - value='', - interactive=True, - ) - input_audio3 = gr.Dropdown( - label=i18n("选择生成的音频"), - choices=sorted(audio_paths), - value='', - interactive=True, - ) - - butnone = gr.Button(i18n("合并"), variant="primary").style(full_width=True) - - vc_output1 = gr.Textbox(label=i18n("输出信息")) - vc_output2 = gr.Audio(label=i18n("输出音频(右下角三个点,点了可以下载)"), type='filepath') - - dropbox.upload(fn=save_to_wav2, inputs=[dropbox], outputs=[input_audio1]) - dropbox.upload(fn=change_choices2, inputs=[], outputs=[input_audio1]) - - refresh_button.click( - fn=lambda: change_choices3(), - inputs=[], - outputs=[input_audio1, input_audio3], - ) - - butnone.click( - fn=audio_combined, - inputs=[input_audio1, input_audio3], - outputs=[vc_output1, vc_output2] - ) - - - - - - - # with gr.Column(): # Second column for pitch shift and other options - - with gr.TabItem(i18n("ckpt处理")): - with gr.Group(): - gr.Markdown(value=i18n("模型融合, 可用于测试音色融合")) - with gr.Row(): - ckpt_a = gr.Textbox(label=i18n("A模型路径"), value="", interactive=True, placeholder="Path to your model A.") - ckpt_b = gr.Textbox(label=i18n("B模型路径"), value="", interactive=True, placeholder="Path to your model B.") - alpha_a = gr.Slider( - minimum=0, - maximum=1, - label=i18n("A模型权重"), - value=0.5, - interactive=True, - ) - with gr.Row(): - sr_ = gr.Radio( - label=i18n("目标采样率"), - choices=["40k", "48k"], - value="40k", - interactive=True, - ) - if_f0_ = gr.Checkbox( - label="Whether the model has pitch guidance.", - value=True, - interactive=True, - ) - info__ = gr.Textbox( - label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True, placeholder="Model information to be placed." - ) - name_to_save0 = gr.Textbox( - label=i18n("保存的模型名不带后缀"), - value="", - placeholder="Name for saving.", - max_lines=1, - interactive=True, - ) - version_2 = gr.Radio( - label=i18n("模型版本型号"), - choices=["v1", "v2"], - value="v1", - interactive=True, - ) - with gr.Row(): - but6 = gr.Button(i18n("融合"), variant="primary") - info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) - but6.click( - merge, - [ - ckpt_a, - ckpt_b, - alpha_a, - sr_, - if_f0_, - info__, - name_to_save0, - version_2, - ], - info4, - ) # def merge(path1,path2,alpha1,sr,f0,info): - with gr.Group(): - gr.Markdown(value=i18n("修改模型信息(仅支持weights文件夹下提取的小模型文件)")) - with gr.Row(): ###### - ckpt_path0 = gr.Textbox( - label=i18n("模型路径"), placeholder="Path to your Model.", value="", interactive=True - ) - info_ = gr.Textbox( - label=i18n("要改的模型信息"), value="", max_lines=8, interactive=True, placeholder="Model information to be changed." - ) - name_to_save1 = gr.Textbox( - label=i18n("保存的文件名, 默认空为和源文件同名"), - placeholder="Either leave empty or put in the Name of the Model to be saved.", - value="", - max_lines=8, - interactive=True, - ) - with gr.Row(): - but7 = gr.Button(i18n("修改"), variant="primary") - info5 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) - but7.click(change_info, [ckpt_path0, info_, name_to_save1], info5) - with gr.Group(): - gr.Markdown(value=i18n("查看模型信息(仅支持weights文件夹下提取的小模型文件)")) - with gr.Row(): - ckpt_path1 = gr.Textbox( - label=i18n("模型路径"), value="", interactive=True, placeholder="Model path here." - ) - but8 = gr.Button(i18n("查看"), variant="primary") - info6 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) - but8.click(show_info, [ckpt_path1], info6) - with gr.Group(): - gr.Markdown( - value=i18n( - "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况" - ) - ) - with gr.Row(): - ckpt_path2 = gr.Textbox( - lines=3, - label=i18n("模型路径"), - value=os.path.join(now_dir, "logs", "[YOUR_MODEL]", "G_23333.pth"), - interactive=True, - ) - save_name = gr.Textbox( - label=i18n("保存名"), value="", interactive=True, - placeholder="Your filename here.", - ) - sr__ = gr.Radio( - label=i18n("目标采样率"), - choices=["32k", "40k", "48k"], - value="40k", - interactive=True, - ) - if_f0__ = gr.Checkbox( - label="Whether the model has pitch guidance.", - value=True, - interactive=True, - ) - version_1 = gr.Radio( - label=i18n("模型版本型号"), - choices=["v1", "v2"], - value="v2", - interactive=True, - ) - info___ = gr.Textbox( - label=i18n("要置入的模型信息"), value="", max_lines=8, interactive=True, placeholder="Model info here." - ) - but9 = gr.Button(i18n("提取"), variant="primary") - info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8) - ckpt_path2.change( - change_info_, [ckpt_path2], [sr__, if_f0__, version_1] - ) - but9.click( - extract_small_model, - [ckpt_path2, save_name, sr__, if_f0__, info___, version_1], - info7, - ) - - with gr.TabItem(i18n("设置")): - with gr.Row(): - gr.Markdown(value= - i18n("音调设置") - ) - noteshertz = gr.Checkbox( - label = i18n("是否使用音符名称而不是它们的赫兹值。例如,使用[C5,D6]代替[523.25,1174.66]赫兹。"), - value = rvc_globals.NotesOrHertz, - interactive = True, - ) - - noteshertz.change(fn=lambda nhertz: rvc_globals.__setattr__('NotesOrHertz', nhertz), inputs=[noteshertz], outputs=[]) - - noteshertz.change( - fn=switch_pitch_controls, - inputs=[f0method0], - outputs=[ - minpitch_slider, minpitch_txtbox, - maxpitch_slider, maxpitch_txtbox,] - ) - - #with gr.TabItem(tab_faq): - #try: - #with open(faq_file, "r", encoding="utf8") as f: - #info = f.read() - #gr.Markdown(value=info) - #except: - #gr.Markdown(traceback.format_exc()) - return app - -def GradioRun(app): - share_gradio_link = config.iscolab or config.paperspace - concurrency_count = 511 - max_size = 1022 - - if ( - config.iscolab or config.paperspace - ): - app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( - server_name="0.0.0.0", - inbrowser=not config.noautoopen, - server_port=config.listen_port, - quiet=True, - favicon_path="./icon.png", - share=share_gradio_link, - ) - else: - app.queue(concurrency_count=concurrency_count, max_size=max_size).launch( - server_name="0.0.0.0", - inbrowser=not config.noautoopen, - server_port=config.listen_port, - quiet=True, - favicon_path=".\icon.png", - share=share_gradio_link, - ) - -#endregion - -if __name__ == "__main__": - if os.name == 'nt': # Weird Windows async error when replacing a file. - print("Any ConnectionResetErrors post-conversion are irrelevant and purely visual; they can be ignored\n") - app = GradioSetup(UTheme=config.grtheme) - GradioRun(app) diff --git a/infer_batch_rvc.py b/infer_batch_rvc.py deleted file mode 100644 index 4ba8e05fc..000000000 --- a/infer_batch_rvc.py +++ /dev/null @@ -1,215 +0,0 @@ -""" -v1 -runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33 -v2 -runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33 -""" -import os, sys, pdb, torch - -now_dir = os.getcwd() -sys.path.append(now_dir) -import sys -import torch -import tqdm as tq -from multiprocessing import cpu_count - - -class Config: - def __init__(self, device, is_half): - self.device = device - self.is_half = is_half - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - print("16系/10系显卡和P40强制单精度") - self.is_half = False - for config_file in ["32k.json", "40k.json", "48k.json"]: - with open(f"configs/{config_file}", "r") as f: - strr = f.read().replace("true", "false") - with open(f"configs/{config_file}", "w") as f: - f.write(strr) - with open("trainset_preprocess_pipeline_print.py", "r") as f: - strr = f.read().replace("3.7", "3.0") - with open("trainset_preprocess_pipeline_print.py", "w") as f: - f.write(strr) - else: - self.gpu_name = None - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - if self.gpu_mem <= 4: - with open("trainset_preprocess_pipeline_print.py", "r") as f: - strr = f.read().replace("3.7", "3.0") - with open("trainset_preprocess_pipeline_print.py", "w") as f: - f.write(strr) - elif torch.backends.mps.is_available(): - print("没有发现支持的N卡, 使用MPS进行推理") - self.device = "mps" - else: - print("没有发现支持的N卡, 使用CPU进行推理") - self.device = "cpu" - self.is_half = True - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max - - -f0up_key = sys.argv[1] -input_path = sys.argv[2] -index_path = sys.argv[3] -f0method = sys.argv[4] # harvest or pm -opt_path = sys.argv[5] -model_path = sys.argv[6] -index_rate = float(sys.argv[7]) -device = sys.argv[8] -is_half = sys.argv[9].lower() != "false" -filter_radius = int(sys.argv[10]) -resample_sr = int(sys.argv[11]) -rms_mix_rate = float(sys.argv[12]) -protect = float(sys.argv[13]) -print(sys.argv) -config = Config(device, is_half) -now_dir = os.getcwd() -sys.path.append(now_dir) -from vc_infer_pipeline import VC -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from my_utils import load_audio -from fairseq import checkpoint_utils -from scipy.io import wavfile - -hubert_model = None - - -def load_hubert(): - global hubert_model - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(device) - if is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - - -def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate): - global tgt_sr, net_g, vc, hubert_model, version - if input_audio is None: - return "You need to upload an audio", None - f0_up_key = int(f0_up_key) - audio = load_audio(input_audio, 16000) - times = [0, 0, 0] - if hubert_model == None: - load_hubert() - if_f0 = cpt.get("f0", 1) - # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file) - audio_opt = vc.pipeline( - hubert_model, - net_g, - sid, - audio, - input_audio, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=f0_file, - ) - print(times) - return audio_opt - - -def get_vc(model_path): - global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version - print("loading pth %s" % model_path) - cpt = torch.load(model_path, map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: # - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩 - net_g.eval().to(device) - if is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - n_spk = cpt["config"][-3] - # return {"visible": True,"maximum": n_spk, "__type__": "update"} - - -get_vc(model_path) -audios = os.listdir(input_path) -for file in tq.tqdm(audios): - if file.endswith(".wav"): - file_path = input_path + "/" + file - wav_opt = vc_single( - 0, file_path, f0up_key, None, f0method, index_path, index_rate - ) - out_path = opt_path + "/" + file - wavfile.write(out_path, tgt_sr, wav_opt) diff --git a/infer_uvr5.py b/infer_uvr5.py deleted file mode 100644 index 0ffdb5da0..000000000 --- a/infer_uvr5.py +++ /dev/null @@ -1,363 +0,0 @@ -import os, sys, torch, warnings, pdb - -now_dir = os.getcwd() -sys.path.append(now_dir) -from json import load as ll - -warnings.filterwarnings("ignore") -import librosa -import importlib -import numpy as np -import hashlib, math -from tqdm import tqdm -from lib.uvr5_pack.lib_v5 import spec_utils -from lib.uvr5_pack.utils import _get_name_params, inference -from lib.uvr5_pack.lib_v5.model_param_init import ModelParameters -import soundfile as sf -from lib.uvr5_pack.lib_v5.nets_new import CascadedNet -from lib.uvr5_pack.lib_v5 import nets_61968KB as nets - - -class _audio_pre_: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v2.json") - model = nets.CascadedASPPNet(mp.param["bins"] * 2) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"): - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - print("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - print("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - - -class _audio_pre_new: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("lib/uvr5_pack/lib_v5/modelparams/4band_v3.json") - nout = 64 if "DeReverb" in model_path else 48 - model = CascadedNet(mp.param["bins"] * 2, nout) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_( - self, music_file, vocal_root=None, ins_root=None, format="flac" - ): # 3个VR模型vocal和ins是反的 - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - print("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - print("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - - -if __name__ == "__main__": - device = "cuda" - is_half = True - # model_path = "uvr5_weights/2_HP-UVR.pth" - # model_path = "uvr5_weights/VR-DeEchoDeReverb.pth" - # model_path = "uvr5_weights/VR-DeEchoNormal.pth" - model_path = "uvr5_weights/DeEchoNormal.pth" - # pre_fun = _audio_pre_(model_path=model_path, device=device, is_half=True,agg=10) - pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10) - audio_path = "雪雪伴奏对消HP5.wav" - save_path = "opt" - pre_fun._path_audio_(audio_path, save_path, save_path) diff --git a/inference-presets.json b/inference-presets.json deleted file mode 100644 index 55ddab746..000000000 --- a/inference-presets.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "presets": [ - { - "name": "Default Preset", - "model": "", - "transpose": 0, - "audio_file": "", - "f0_method": "pm", - "crepe_hop_length": 160, - "median_filtering": 3, - "feature_path": "", - "auto_feature_path": "", - "search_feature_ratio": 0.88, - "resample": 0, - "volume_envelope": 1, - "protect_voiceless": 0.33, - "f0_file_path": "" - } - ] -} \ No newline at end of file diff --git a/install_Applio.bat b/install_Applio.bat deleted file mode 100644 index 1b7d18ff8..000000000 --- a/install_Applio.bat +++ /dev/null @@ -1,206 +0,0 @@ -@echo off -Title Instalador de Applio -chcp 65001 > nul -setlocal -color 0a - -::: -::: _ _ -::: /\ | (_) -::: / \ _ __ _ __ | |_ ___ -::: / /\ \ | '_ \| '_ \| | |/ _ \ -::: / ____ \| |_) | |_) | | | (_) | -::: /_/ \_\ .__/| .__/|_|_|\___/ -::: | | | | -::: |_| |_| -::: -::: Versión 1.0.0 - Desarrollado por Aitron -::: - -set "repoUrl=https://github.com/IAHispano/Applio-RVC-Fork" -set "repoFolder=Applio-RVC-Fork" -set "fixesFolder=Fixes" -set "localFixesPy=local_fixes.py" -set "URL_BASE=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main" -echo. -cls - -echo AVISO: Recuerda instalar las Microsoft C++ Build Tools y El Redistributable antes de continuar. -echo. -echo Enlace 1: https://aka.ms/vs/17/release/vs_BuildTools.exe -echo Enlace 2: https://aka.ms/vs/17/release/vc_redist.x64.exe -echo. -pause -cls - -for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A -echo. - -echo Verificando si Git está instalado... -git --version > nul 2>&1 -if %errorlevel% equ 0 ( - echo Git está instalado. Continuando... -) else ( - echo Git no está instalado. Saliendo. - echo Presiona Enter para cerrar el script e ir a la página de descarga. - pause - start https://github.com/git-for-windows/git/releases/download/v2.42.0.windows.1/Git-2.42.0-64-bit.exe - exit -) - -echo. - -echo Verificando si Python 3.9.8 está instalado... -for /f %%A in ('python -c "import sys; print(sys.version)" 2^>^&1') do ( - set "py_version=%%A" -) - -echo %py_version% | find "3.9.8" > nul -if %errorlevel% equ 0 ( - echo Python 3.9.8 está instalado. Continuando... -) else ( - echo Python 3.9.8 no está instalado. Saliendo. - echo Presiona Enter para cerrar el script e ir a la página de descarga. - pause - start https://www.python.org/ftp/python/3.9.8/python-3.9.8-amd64.exe - exit -) - -echo. -cls - -echo Requisitos satisfechos, continuando... -echo. - -echo Creando carpeta para el repositorio... -mkdir "%repoFolder%" -cd "%repoFolder%" -echo. - -echo Clonando el repositorio... -git clone "%repoUrl%" . -echo. - -echo Verificando si el archivo local_fixes.py existe en la carpeta Fixes... -if exist "%fixesFolder%\%localFixesPy%" ( - echo Ejecutando el archivo... - python "%fixesFolder%\%localFixesPy%" -) else ( - echo El archivo "%localFixesBat%" no se encontró en la carpeta "Fixes". -) - -echo Pasando a descargar los modelos... -echo. - -echo AVISO -echo En este punto, se recomienda desactivar el antivirus o el firewall, ya que existe la posibilidad de que ocurran errores al descargar los modelos preentrenados. -echo Si has desactivado el antivirus o el firewall, presiona la tecla "Enter". -pause -cls - -echo Descargando la carpeta "pretrained"... -cd "pretrained" -curl -LJO "%URL_BASE%/pretrained/D32k.pth" -curl -LJO "%URL_BASE%/pretrained/D40k.pth" -curl -LJO "%URL_BASE%/pretrained/D48k.pth" -curl -LJO "%URL_BASE%/pretrained/G32k.pth" -curl -LJO "%URL_BASE%/pretrained/G40k.pth" -curl -LJO "%URL_BASE%/pretrained/G48k.pth" -curl -LJO "%URL_BASE%/pretrained/f0D32k.pth" -curl -LJO "%URL_BASE%/pretrained/f0D40k.pth" -curl -LJO "%URL_BASE%/pretrained/f0D48k.pth" -curl -LJO "%URL_BASE%/pretrained/f0G32k.pth" -curl -LJO "%URL_BASE%/pretrained/f0G40k.pth" -curl -LJO "%URL_BASE%/pretrained/f0G48k.pth" -cd ".." -echo. -cls - -echo Descargando la carpeta "pretrained_v2"... -cd "pretrained_v2" -curl -LJO "%URL_BASE%/pretrained_v2/D32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/D40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/D48k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/G32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/G40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/G48k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0D32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0D40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0D48k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0G32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0G40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0G48k.pth" -cd ".." -echo. -cls - -echo Descargando la carpeta "uvr5_weights"... -cd "uvr5_weights" -curl -LJO "%URL_BASE%/uvr5_weights/HP2_all_vocals.pth" -curl -LJO "%URL_BASE%/uvr5_weights/HP3_all_vocals.pth" -curl -LJO "%URL_BASE%/uvr5_weights/HP5_only_main_vocal.pth" -curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoAggressive.pth" -curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoDeReverb.pth" -curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoNormal.pth" -cd ".." -echo. -cls - -echo Descargando el archivo rmvpe.pt... -curl -LJO "%URL_BASE%/rmvpe.pt" -echo. -cls - -echo Descargando el archivo ffmpeg.exe... -curl -LJO "%URL_BASE%/ffmpeg.exe" -echo. -cls - -echo Descargando el archivo ffprobe.exe... -curl -LJO "%URL_BASE%/ffprobe.exe" -echo. -echo Descargas completadas. Procediendo con las dependencias. -cls - -echo ¿Tienes una GPU? -echo Esto determinará si se descargan dependencias ligeras (sin GPU) o pesadas (con GPU). -echo. - - -set /p op=Escribe "Si" o "No": -if "%op%"=="Si" goto gpu -if "%op%"=="No" goto non_gpu - - -:gpu -echo Se ha seleccionado GPU, continuando... -echo. -echo Descargando las dependencias... -echo. -pip install -r requirements-gpu.txt -pip uninstall torch torchvision torchaudio -y -echo. -echo NOTA: El ordenador puede experimentar lentitud durante este proceso; no te preocupes. -echo. -pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -endlocal -echo. -cls -echo ¡Applio ha sido descargado! -echo. -echo Presiona Enter para salir. -pause -exit - -:non_gpu -echo No se ha seleccionado GPU, continuando... -echo. -echo Descargando las dependencias... -echo. -pip install -r requirements.txt -echo. -echo ¡Applio ha sido descargado! -endlocal -echo. -pause -exit diff --git a/lib/globals/globals.py b/lib/globals/globals.py deleted file mode 100644 index d0da59d56..000000000 --- a/lib/globals/globals.py +++ /dev/null @@ -1,5 +0,0 @@ -DoFormant: bool = False -Quefrency: float = 8.0 -Timbre: float = 1.2 - -NotesOrHertz: bool = False \ No newline at end of file diff --git a/lib/infer_pack/attentions.py b/lib/infer_pack/attentions.py deleted file mode 100644 index 05501be18..000000000 --- a/lib/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from lib.infer_pack import commons -from lib.infer_pack import modules -from lib.infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/lib/infer_pack/commons.py b/lib/infer_pack/commons.py deleted file mode 100644 index 54470986f..000000000 --- a/lib/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/lib/infer_pack/models.py b/lib/infer_pack/models.py deleted file mode 100644 index 3665d03bc..000000000 --- a/lib/infer_pack/models.py +++ /dev/null @@ -1,1142 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/lib/infer_pack/models_dml.py b/lib/infer_pack/models_dml.py deleted file mode 100644 index 958d7b292..000000000 --- a/lib/infer_pack/models_dml.py +++ /dev/null @@ -1,1124 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv.float() - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/lib/infer_pack/models_onnx.py b/lib/infer_pack/models_onnx.py deleted file mode 100644 index 963e67b29..000000000 --- a/lib/infer_pack/models_onnx.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/lib/infer_pack/modules.py b/lib/infer_pack/modules.py deleted file mode 100644 index c83289df7..000000000 --- a/lib/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from lib.infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index ee3171bcb..000000000 --- a/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f..000000000 --- a/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index b412ba281..000000000 --- a/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,86 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class HarvestF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index b2c592527..000000000 --- a/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/lib/infer_pack/modules/F0Predictor/__init__.py b/lib/infer_pack/modules/F0Predictor/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/infer_pack/onnx_inference.py b/lib/infer_pack/onnx_inference.py deleted file mode 100644 index 6517853be..000000000 --- a/lib/infer_pack/onnx_inference.py +++ /dev/null @@ -1,145 +0,0 @@ -import onnxruntime -import librosa -import numpy as np -import soundfile - - -class ContentVec: - def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): - print("load model(s) from {}".format(vec_path)) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(vec_path, providers=providers) - - def __call__(self, wav): - return self.forward(wav) - - def forward(self, wav): - feats = wav - if feats.ndim == 2: # double channels - feats = feats.mean(-1) - assert feats.ndim == 1, feats.ndim - feats = np.expand_dims(np.expand_dims(feats, 0), 0) - onnx_input = {self.model.get_inputs()[0].name: feats} - logits = self.model.run(None, onnx_input)[0] - return logits.transpose(0, 2, 1) - - -def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs): - if f0_predictor == "pm": - from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor - - f0_predictor_object = PMF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "harvest": - from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import ( - HarvestF0Predictor, - ) - - f0_predictor_object = HarvestF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "dio": - from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor - - f0_predictor_object = DioF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - else: - raise Exception("Unknown f0 predictor") - return f0_predictor_object - - -class OnnxRVC: - def __init__( - self, - model_path, - sr=40000, - hop_size=512, - vec_path="vec-768-layer-12", - device="cpu", - ): - vec_path = f"pretrained/{vec_path}.onnx" - self.vec_model = ContentVec(vec_path, device) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(model_path, providers=providers) - self.sampling_rate = sr - self.hop_size = hop_size - - def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): - onnx_input = { - self.model.get_inputs()[0].name: hubert, - self.model.get_inputs()[1].name: hubert_length, - self.model.get_inputs()[2].name: pitch, - self.model.get_inputs()[3].name: pitchf, - self.model.get_inputs()[4].name: ds, - self.model.get_inputs()[5].name: rnd, - } - return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) - - def inference( - self, - raw_path, - sid, - f0_method="dio", - f0_up_key=0, - pad_time=0.5, - cr_threshold=0.02, - ): - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0_predictor = get_f0_predictor( - f0_method, - hop_length=self.hop_size, - sampling_rate=self.sampling_rate, - threshold=cr_threshold, - ) - wav, sr = librosa.load(raw_path, sr=self.sampling_rate) - org_length = len(wav) - if org_length / sr > 50.0: - raise RuntimeError("Reached Max Length") - - wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000) - wav16k = wav16k - - hubert = self.vec_model(wav16k) - hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) - hubert_length = hubert.shape[1] - - pitchf = f0_predictor.compute_f0(wav, hubert_length) - pitchf = pitchf * 2 ** (f0_up_key / 12) - pitch = pitchf.copy() - f0_mel = 1127 * np.log(1 + pitch / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - pitch = np.rint(f0_mel).astype(np.int64) - - pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32) - pitch = pitch.reshape(1, len(pitch)) - ds = np.array([sid]).astype(np.int64) - - rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) - hubert_length = np.array([hubert_length]).astype(np.int64) - - out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() - out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") - return out_wav[0:org_length] diff --git a/lib/infer_pack/transforms.py b/lib/infer_pack/transforms.py deleted file mode 100644 index a11f799e0..000000000 --- a/lib/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/lib/uvr5_pack/lib_v5/dataset.py b/lib/uvr5_pack/lib_v5/dataset.py deleted file mode 100644 index cfd01a174..000000000 --- a/lib/uvr5_pack/lib_v5/dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from . import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [ - os.path.join(mix_dir, fname) - for fname in os.listdir(mix_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - y_list = sorted( - [ - os.path.join(inst_dir, fname) - for fname in os.listdir(inst_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [ - pair for pair in filelist if list(pair) not in val_filelist - ] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError( - "The `val_filelist` option is not available in `subdirs` mode" - ) - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively( - X[idx], y[idx], reduction_mask - ) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( - cropsize, sr, hop_length, n_fft, offset - ) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/lib/uvr5_pack/lib_v5/layers.py b/lib/uvr5_pack/lib_v5/layers.py deleted file mode 100644 index b82f06bb4..000000000 --- a/lib/uvr5_pack/lib_v5/layers.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/lib/uvr5_pack/lib_v5/layers_123812KB .py b/lib/uvr5_pack/lib_v5/layers_123812KB .py deleted file mode 100644 index b82f06bb4..000000000 --- a/lib/uvr5_pack/lib_v5/layers_123812KB .py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/lib/uvr5_pack/lib_v5/layers_123821KB.py b/lib/uvr5_pack/lib_v5/layers_123821KB.py deleted file mode 100644 index b82f06bb4..000000000 --- a/lib/uvr5_pack/lib_v5/layers_123821KB.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/lib/uvr5_pack/lib_v5/layers_33966KB.py b/lib/uvr5_pack/lib_v5/layers_33966KB.py deleted file mode 100644 index a38b7bb3a..000000000 --- a/lib/uvr5_pack/lib_v5/layers_33966KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/lib/uvr5_pack/lib_v5/layers_537227KB.py b/lib/uvr5_pack/lib_v5/layers_537227KB.py deleted file mode 100644 index a38b7bb3a..000000000 --- a/lib/uvr5_pack/lib_v5/layers_537227KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/lib/uvr5_pack/lib_v5/layers_537238KB.py b/lib/uvr5_pack/lib_v5/layers_537238KB.py deleted file mode 100644 index a38b7bb3a..000000000 --- a/lib/uvr5_pack/lib_v5/layers_537238KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/lib/uvr5_pack/lib_v5/layers_new.py b/lib/uvr5_pack/lib_v5/layers_new.py deleted file mode 100644 index 0c13e60b0..000000000 --- a/lib/uvr5_pack/lib_v5/layers_new.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - - def __call__(self, x): - h = self.conv1(x) - h = self.conv2(h) - - return h - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - - h = self.conv1(x) - # h = self.conv2(h) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) - self.conv3 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = Conv2DBNActiv( - nin, nout, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - out = self.bottleneck(out) - - if self.dropout is not None: - out = self.dropout(out) - - return out - - -class LSTMModule(nn.Module): - def __init__(self, nin_conv, nin_lstm, nout_lstm): - super(LSTMModule, self).__init__() - self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) - self.lstm = nn.LSTM( - input_size=nin_lstm, hidden_size=nout_lstm // 2, bidirectional=True - ) - self.dense = nn.Sequential( - nn.Linear(nout_lstm, nin_lstm), nn.BatchNorm1d(nin_lstm), nn.ReLU() - ) - - def forward(self, x): - N, _, nbins, nframes = x.size() - h = self.conv(x)[:, 0] # N, nbins, nframes - h = h.permute(2, 0, 1) # nframes, N, nbins - h, _ = self.lstm(h) - h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins - h = h.reshape(nframes, N, 1, nbins) - h = h.permute(1, 2, 3, 0) - - return h diff --git a/lib/uvr5_pack/lib_v5/model_param_init.py b/lib/uvr5_pack/lib_v5/model_param_init.py deleted file mode 100644 index b995c0bfb..000000000 --- a/lib/uvr5_pack/lib_v5/model_param_init.py +++ /dev/null @@ -1,69 +0,0 @@ -import json -import os -import pathlib - -default_param = {} -default_param["bins"] = 768 -default_param["unstable_bins"] = 9 # training only -default_param["reduction_bins"] = 762 # training only -default_param["sr"] = 44100 -default_param["pre_filter_start"] = 757 -default_param["pre_filter_stop"] = 768 -default_param["band"] = {} - - -default_param["band"][1] = { - "sr": 11025, - "hl": 128, - "n_fft": 960, - "crop_start": 0, - "crop_stop": 245, - "lpf_start": 61, # inference only - "res_type": "polyphase", -} - -default_param["band"][2] = { - "sr": 44100, - "hl": 512, - "n_fft": 1536, - "crop_start": 24, - "crop_stop": 547, - "hpf_start": 81, # inference only - "res_type": "sinc_best", -} - - -def int_keys(d): - r = {} - for k, v in d: - if k.isdigit(): - k = int(k) - r[k] = v - return r - - -class ModelParameters(object): - def __init__(self, config_path=""): - if ".pth" == pathlib.Path(config_path).suffix: - import zipfile - - with zipfile.ZipFile(config_path, "r") as zip: - self.param = json.loads( - zip.read("param.json"), object_pairs_hook=int_keys - ) - elif ".json" == pathlib.Path(config_path).suffix: - with open(config_path, "r") as f: - self.param = json.loads(f.read(), object_pairs_hook=int_keys) - else: - self.param = default_param - - for k in [ - "mid_side", - "mid_side_b", - "mid_side_b2", - "stereo_w", - "stereo_n", - "reverse", - ]: - if not k in self.param: - self.param[k] = False diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json deleted file mode 100644 index 72cb44998..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 16000, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 16000, - "pre_filter_start": 1023, - "pre_filter_stop": 1024 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json deleted file mode 100644 index 3c00ecf0a..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 32000, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "kaiser_fast" - } - }, - "sr": 32000, - "pre_filter_start": 1000, - "pre_filter_stop": 1021 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json deleted file mode 100644 index 55666ac9a..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 33075, - "hl": 384, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 33075, - "pre_filter_start": 1000, - "pre_filter_stop": 1021 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json deleted file mode 100644 index 665abe20e..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 1024, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 1023, - "pre_filter_stop": 1024 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json deleted file mode 100644 index 0e8b16f89..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 256, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 256, - "n_fft": 512, - "crop_start": 0, - "crop_stop": 256, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 256, - "pre_filter_stop": 256 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json deleted file mode 100644 index 3b38fcaf6..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 1024, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 1023, - "pre_filter_stop": 1024 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json b/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json deleted file mode 100644 index 630df3524..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "bins": 1024, - "unstable_bins": 0, - "reduction_bins": 0, - "band": { - "1": { - "sr": 44100, - "hl": 512, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 700, - "hpf_start": -1, - "res_type": "sinc_best" - } - }, - "sr": 44100, - "pre_filter_start": 1023, - "pre_filter_stop": 700 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/2band_32000.json b/lib/uvr5_pack/lib_v5/modelparams/2band_32000.json deleted file mode 100644 index ab9cf1150..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/2band_32000.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 705, - "band": { - "1": { - "sr": 6000, - "hl": 66, - "n_fft": 512, - "crop_start": 0, - "crop_stop": 240, - "lpf_start": 60, - "lpf_stop": 118, - "res_type": "sinc_fastest" - }, - "2": { - "sr": 32000, - "hl": 352, - "n_fft": 1024, - "crop_start": 22, - "crop_stop": 505, - "hpf_start": 44, - "hpf_stop": 23, - "res_type": "sinc_medium" - } - }, - "sr": 32000, - "pre_filter_start": 710, - "pre_filter_stop": 731 -} diff --git a/lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json b/lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json deleted file mode 100644 index 7faa216d7..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "bins": 512, - "unstable_bins": 7, - "reduction_bins": 510, - "band": { - "1": { - "sr": 11025, - "hl": 160, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 192, - "lpf_start": 41, - "lpf_stop": 139, - "res_type": "sinc_fastest" - }, - "2": { - "sr": 44100, - "hl": 640, - "n_fft": 1024, - "crop_start": 10, - "crop_stop": 320, - "hpf_start": 47, - "hpf_stop": 15, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 510, - "pre_filter_stop": 512 -} diff --git a/lib/uvr5_pack/lib_v5/modelparams/2band_48000.json b/lib/uvr5_pack/lib_v5/modelparams/2band_48000.json deleted file mode 100644 index 7e7817505..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/2band_48000.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 705, - "band": { - "1": { - "sr": 6000, - "hl": 66, - "n_fft": 512, - "crop_start": 0, - "crop_stop": 240, - "lpf_start": 60, - "lpf_stop": 240, - "res_type": "sinc_fastest" - }, - "2": { - "sr": 48000, - "hl": 528, - "n_fft": 1536, - "crop_start": 22, - "crop_stop": 505, - "hpf_start": 82, - "hpf_stop": 22, - "res_type": "sinc_medium" - } - }, - "sr": 48000, - "pre_filter_start": 710, - "pre_filter_stop": 731 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/3band_44100.json b/lib/uvr5_pack/lib_v5/modelparams/3band_44100.json deleted file mode 100644 index d881d767f..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/3band_44100.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 5, - "reduction_bins": 733, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 278, - "lpf_start": 28, - "lpf_stop": 140, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 256, - "n_fft": 768, - "crop_start": 14, - "crop_stop": 322, - "hpf_start": 70, - "hpf_stop": 14, - "lpf_start": 283, - "lpf_stop": 314, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 131, - "crop_stop": 313, - "hpf_start": 154, - "hpf_stop": 141, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 757, - "pre_filter_stop": 768 -} diff --git a/lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json b/lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json deleted file mode 100644 index 77ec19857..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "mid_side": true, - "bins": 768, - "unstable_bins": 5, - "reduction_bins": 733, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 278, - "lpf_start": 28, - "lpf_stop": 140, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 256, - "n_fft": 768, - "crop_start": 14, - "crop_stop": 322, - "hpf_start": 70, - "hpf_stop": 14, - "lpf_start": 283, - "lpf_stop": 314, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 131, - "crop_stop": 313, - "hpf_start": 154, - "hpf_stop": 141, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 757, - "pre_filter_stop": 768 -} diff --git a/lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json b/lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json deleted file mode 100644 index 85ee8a7d4..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "mid_side_b2": true, - "bins": 640, - "unstable_bins": 7, - "reduction_bins": 565, - "band": { - "1": { - "sr": 11025, - "hl": 108, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 187, - "lpf_start": 92, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 216, - "n_fft": 768, - "crop_start": 0, - "crop_stop": 212, - "hpf_start": 68, - "hpf_stop": 34, - "lpf_start": 174, - "lpf_stop": 209, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 432, - "n_fft": 640, - "crop_start": 66, - "crop_stop": 307, - "hpf_start": 86, - "hpf_stop": 72, - "res_type": "kaiser_fast" - } - }, - "sr": 44100, - "pre_filter_start": 639, - "pre_filter_stop": 640 -} diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100.json deleted file mode 100644 index df1237542..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_44100.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json deleted file mode 100644 index e91b699eb..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "bins": 768, - "unstable_bins": 7, - "mid_side": true, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json deleted file mode 100644 index f852f280e..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "mid_side_b": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json deleted file mode 100644 index f852f280e..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "mid_side_b": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json deleted file mode 100644 index 7a07d5541..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "reverse": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json b/lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json deleted file mode 100644 index ba0cf3421..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "stereo_w": true, - "bins": 768, - "unstable_bins": 7, - "reduction_bins": 668, - "band": { - "1": { - "sr": 11025, - "hl": 128, - "n_fft": 1024, - "crop_start": 0, - "crop_stop": 186, - "lpf_start": 37, - "lpf_stop": 73, - "res_type": "polyphase" - }, - "2": { - "sr": 11025, - "hl": 128, - "n_fft": 512, - "crop_start": 4, - "crop_stop": 185, - "hpf_start": 36, - "hpf_stop": 18, - "lpf_start": 93, - "lpf_stop": 185, - "res_type": "polyphase" - }, - "3": { - "sr": 22050, - "hl": 256, - "n_fft": 512, - "crop_start": 46, - "crop_stop": 186, - "hpf_start": 93, - "hpf_stop": 46, - "lpf_start": 164, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 512, - "n_fft": 768, - "crop_start": 121, - "crop_stop": 382, - "hpf_start": 138, - "hpf_stop": 123, - "res_type": "sinc_medium" - } - }, - "sr": 44100, - "pre_filter_start": 740, - "pre_filter_stop": 768 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json b/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json deleted file mode 100644 index 33281a0cf..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "bins": 672, - "unstable_bins": 8, - "reduction_bins": 637, - "band": { - "1": { - "sr": 7350, - "hl": 80, - "n_fft": 640, - "crop_start": 0, - "crop_stop": 85, - "lpf_start": 25, - "lpf_stop": 53, - "res_type": "polyphase" - }, - "2": { - "sr": 7350, - "hl": 80, - "n_fft": 320, - "crop_start": 4, - "crop_stop": 87, - "hpf_start": 25, - "hpf_stop": 12, - "lpf_start": 31, - "lpf_stop": 62, - "res_type": "polyphase" - }, - "3": { - "sr": 14700, - "hl": 160, - "n_fft": 512, - "crop_start": 17, - "crop_stop": 216, - "hpf_start": 48, - "hpf_stop": 24, - "lpf_start": 139, - "lpf_stop": 210, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 480, - "n_fft": 960, - "crop_start": 78, - "crop_stop": 383, - "hpf_start": 130, - "hpf_stop": 86, - "res_type": "kaiser_fast" - } - }, - "sr": 44100, - "pre_filter_start": 668, - "pre_filter_stop": 672 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json b/lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json deleted file mode 100644 index 2e5c770fe..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "bins": 672, - "unstable_bins": 8, - "reduction_bins": 637, - "band": { - "1": { - "sr": 7350, - "hl": 80, - "n_fft": 640, - "crop_start": 0, - "crop_stop": 85, - "lpf_start": 25, - "lpf_stop": 53, - "res_type": "polyphase" - }, - "2": { - "sr": 7350, - "hl": 80, - "n_fft": 320, - "crop_start": 4, - "crop_stop": 87, - "hpf_start": 25, - "hpf_stop": 12, - "lpf_start": 31, - "lpf_stop": 62, - "res_type": "polyphase" - }, - "3": { - "sr": 14700, - "hl": 160, - "n_fft": 512, - "crop_start": 17, - "crop_stop": 216, - "hpf_start": 48, - "hpf_stop": 24, - "lpf_start": 139, - "lpf_stop": 210, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 480, - "n_fft": 960, - "crop_start": 78, - "crop_stop": 383, - "hpf_start": 130, - "hpf_stop": 86, - "convert_channels": "stereo_n", - "res_type": "kaiser_fast" - } - }, - "sr": 44100, - "pre_filter_start": 668, - "pre_filter_stop": 672 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json b/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json deleted file mode 100644 index edb908b88..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "bins": 672, - "unstable_bins": 8, - "reduction_bins": 530, - "band": { - "1": { - "sr": 7350, - "hl": 80, - "n_fft": 640, - "crop_start": 0, - "crop_stop": 85, - "lpf_start": 25, - "lpf_stop": 53, - "res_type": "polyphase" - }, - "2": { - "sr": 7350, - "hl": 80, - "n_fft": 320, - "crop_start": 4, - "crop_stop": 87, - "hpf_start": 25, - "hpf_stop": 12, - "lpf_start": 31, - "lpf_stop": 62, - "res_type": "polyphase" - }, - "3": { - "sr": 14700, - "hl": 160, - "n_fft": 512, - "crop_start": 17, - "crop_stop": 216, - "hpf_start": 48, - "hpf_stop": 24, - "lpf_start": 139, - "lpf_stop": 210, - "res_type": "polyphase" - }, - "4": { - "sr": 44100, - "hl": 480, - "n_fft": 960, - "crop_start": 78, - "crop_stop": 383, - "hpf_start": 130, - "hpf_stop": 86, - "res_type": "kaiser_fast" - } - }, - "sr": 44100, - "pre_filter_start": 668, - "pre_filter_stop": 672 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/modelparams/ensemble.json b/lib/uvr5_pack/lib_v5/modelparams/ensemble.json deleted file mode 100644 index ee69beb46..000000000 --- a/lib/uvr5_pack/lib_v5/modelparams/ensemble.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "mid_side_b2": true, - "bins": 1280, - "unstable_bins": 7, - "reduction_bins": 565, - "band": { - "1": { - "sr": 11025, - "hl": 108, - "n_fft": 2048, - "crop_start": 0, - "crop_stop": 374, - "lpf_start": 92, - "lpf_stop": 186, - "res_type": "polyphase" - }, - "2": { - "sr": 22050, - "hl": 216, - "n_fft": 1536, - "crop_start": 0, - "crop_stop": 424, - "hpf_start": 68, - "hpf_stop": 34, - "lpf_start": 348, - "lpf_stop": 418, - "res_type": "polyphase" - }, - "3": { - "sr": 44100, - "hl": 432, - "n_fft": 1280, - "crop_start": 132, - "crop_stop": 614, - "hpf_start": 172, - "hpf_stop": 144, - "res_type": "polyphase" - } - }, - "sr": 44100, - "pre_filter_start": 1280, - "pre_filter_stop": 1280 -} \ No newline at end of file diff --git a/lib/uvr5_pack/lib_v5/nets.py b/lib/uvr5_pack/lib_v5/nets.py deleted file mode 100644 index db4c5e339..000000000 --- a/lib/uvr5_pack/lib_v5/nets.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -import layers -from . import spec_utils - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/lib/uvr5_pack/lib_v5/nets_123812KB.py b/lib/uvr5_pack/lib_v5/nets_123812KB.py deleted file mode 100644 index becbfae85..000000000 --- a/lib/uvr5_pack/lib_v5/nets_123812KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/lib/uvr5_pack/lib_v5/nets_123821KB.py b/lib/uvr5_pack/lib_v5/nets_123821KB.py deleted file mode 100644 index becbfae85..000000000 --- a/lib/uvr5_pack/lib_v5/nets_123821KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/lib/uvr5_pack/lib_v5/nets_33966KB.py b/lib/uvr5_pack/lib_v5/nets_33966KB.py deleted file mode 100644 index b8986f968..000000000 --- a/lib/uvr5_pack/lib_v5/nets_33966KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_33966KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/lib/uvr5_pack/lib_v5/nets_537227KB.py b/lib/uvr5_pack/lib_v5/nets_537227KB.py deleted file mode 100644 index a1bb530e0..000000000 --- a/lib/uvr5_pack/lib_v5/nets_537227KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/lib/uvr5_pack/lib_v5/nets_537238KB.py b/lib/uvr5_pack/lib_v5/nets_537238KB.py deleted file mode 100644 index a1bb530e0..000000000 --- a/lib/uvr5_pack/lib_v5/nets_537238KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/lib/uvr5_pack/lib_v5/nets_61968KB.py b/lib/uvr5_pack/lib_v5/nets_61968KB.py deleted file mode 100644 index becbfae85..000000000 --- a/lib/uvr5_pack/lib_v5/nets_61968KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/lib/uvr5_pack/lib_v5/nets_new.py b/lib/uvr5_pack/lib_v5/nets_new.py deleted file mode 100644 index bfaf72e48..000000000 --- a/lib/uvr5_pack/lib_v5/nets_new.py +++ /dev/null @@ -1,132 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from . import layers_new - - -class BaseNet(nn.Module): - def __init__( - self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6)) - ): - super(BaseNet, self).__init__() - self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1) - self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1) - self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1) - self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1) - self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1) - - self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) - - self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) - self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) - self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) - self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm) - self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) - - def __call__(self, x): - e1 = self.enc1(x) - e2 = self.enc2(e1) - e3 = self.enc3(e2) - e4 = self.enc4(e3) - e5 = self.enc5(e4) - - h = self.aspp(e5) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = torch.cat([h, self.lstm_dec2(h)], dim=1) - h = self.dec1(h, e1) - - return h - - -class CascadedNet(nn.Module): - def __init__(self, n_fft, nout=32, nout_lstm=128): - super(CascadedNet, self).__init__() - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - self.nin_lstm = self.max_bin // 2 - self.offset = 64 - - self.stg1_low_band_net = nn.Sequential( - BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0), - ) - - self.stg1_high_band_net = BaseNet( - 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg2_low_band_net = nn.Sequential( - BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0), - ) - self.stg2_high_band_net = BaseNet( - nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg3_full_band_net = BaseNet( - 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm - ) - - self.out = nn.Conv2d(nout, 2, 1, bias=False) - self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) - - def forward(self, x): - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - l1_in = x[:, :, :bandw] - h1_in = x[:, :, bandw:] - l1 = self.stg1_low_band_net(l1_in) - h1 = self.stg1_high_band_net(h1_in) - aux1 = torch.cat([l1, h1], dim=2) - - l2_in = torch.cat([l1_in, l1], dim=1) - h2_in = torch.cat([h1_in, h1], dim=1) - l2 = self.stg2_low_band_net(l2_in) - h2 = self.stg2_high_band_net(h2_in) - aux2 = torch.cat([l2, h2], dim=2) - - f3_in = torch.cat([x, aux1, aux2], dim=1) - f3 = self.stg3_full_band_net(f3_in) - - mask = torch.sigmoid(self.out(f3)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux = torch.cat([aux1, aux2], dim=1) - aux = torch.sigmoid(self.aux_out(aux)) - aux = F.pad( - input=aux, - pad=(0, 0, 0, self.output_bin - aux.size()[2]), - mode="replicate", - ) - return mask, aux - else: - return mask - - def predict_mask(self, x): - mask = self.forward(x) - - if self.offset > 0: - mask = mask[:, :, :, self.offset : -self.offset] - assert mask.size()[3] > 0 - - return mask - - def predict(self, x, aggressiveness=None): - mask = self.forward(x) - pred_mag = x * mask - - if self.offset > 0: - pred_mag = pred_mag[:, :, :, self.offset : -self.offset] - assert pred_mag.size()[3] > 0 - - return pred_mag diff --git a/lib/uvr5_pack/lib_v5/spec_utils.py b/lib/uvr5_pack/lib_v5/spec_utils.py deleted file mode 100644 index a3fd46d33..000000000 --- a/lib/uvr5_pack/lib_v5/spec_utils.py +++ /dev/null @@ -1,667 +0,0 @@ -import os, librosa -import numpy as np -import soundfile as sf -from tqdm import tqdm -import json, math, hashlib - - -def crop_center(h1, h2): - h1_shape = h1.size() - h2_shape = h2.size() - - if h1_shape[3] == h2_shape[3]: - return h1 - elif h1_shape[3] < h2_shape[3]: - raise ValueError("h1_shape[3] must be greater than h2_shape[3]") - - # s_freq = (h2_shape[2] - h1_shape[2]) // 2 - # e_freq = s_freq + h1_shape[2] - s_time = (h1_shape[3] - h2_shape[3]) // 2 - e_time = s_time + h2_shape[3] - h1 = h1[:, :, :, s_time:e_time] - - return h1 - - -def wave_to_spectrogram( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def wave_to_spectrogram_mt( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - import threading - - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - def run_thread(**kwargs): - global spec_left - spec_left = librosa.stft(**kwargs) - - thread = threading.Thread( - target=run_thread, - kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length}, - ) - thread.start() - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - thread.join() - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def combine_spectrograms(specs, mp): - l = min([specs[i].shape[2] for i in specs]) - spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64) - offset = 0 - bands_n = len(mp.param["band"]) - - for d in range(1, bands_n + 1): - h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"] - spec_c[:, offset : offset + h, :l] = specs[d][ - :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l - ] - offset += h - - if offset > mp.param["bins"]: - raise ValueError("Too much bins") - - # lowpass fiter - if ( - mp.param["pre_filter_start"] > 0 - ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: - if bands_n == 1: - spec_c = fft_lp_filter( - spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"] - ) - else: - gp = 1 - for b in range( - mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"] - ): - g = math.pow( - 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0 - ) - gp = g - spec_c[:, b, :] *= g - - return np.asfortranarray(spec_c) - - -def spectrogram_to_image(spec, mode="magnitude"): - if mode == "magnitude": - if np.iscomplexobj(spec): - y = np.abs(spec) - else: - y = spec - y = np.log10(y**2 + 1e-8) - elif mode == "phase": - if np.iscomplexobj(spec): - y = np.angle(spec) - else: - y = spec - - y -= y.min() - y *= 255 / y.max() - img = np.uint8(y) - - if y.ndim == 3: - img = img.transpose(1, 2, 0) - img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2) - - return img - - -def reduce_vocal_aggressively(X, y, softmask): - v = X - y - y_mag_tmp = np.abs(y) - v_mag_tmp = np.abs(v) - - v_mask = v_mag_tmp > y_mag_tmp - y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) - - return y_mag * np.exp(1.0j * np.angle(y)) - - -def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): - if min_range < fade_size * 2: - raise ValueError("min_range must be >= fade_area * 2") - - mag = mag.copy() - - idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] - starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) - ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) - uninformative = np.where(ends - starts > min_range)[0] - if len(uninformative) > 0: - starts = starts[uninformative] - ends = ends[uninformative] - old_e = None - for s, e in zip(starts, ends): - if old_e is not None and s - old_e < fade_size: - s = old_e - fade_size * 2 - - if s != 0: - weight = np.linspace(0, 1, fade_size) - mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size] - else: - s -= fade_size - - if e != mag.shape[2]: - weight = np.linspace(1, 0, fade_size) - mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e] - else: - e += fade_size - - mag[:, :, s + fade_size : e - fade_size] += ref[ - :, :, s + fade_size : e - fade_size - ] - old_e = e - - return mag - - -def align_wave_head_and_tail(a, b): - l = min([a[0].size, b[0].size]) - - return a[:l, :l], b[:l, :l] - - -def cache_or_load(mix_path, inst_path, mp): - mix_basename = os.path.splitext(os.path.basename(mix_path))[0] - inst_basename = os.path.splitext(os.path.basename(inst_path))[0] - - cache_dir = "mph{}".format( - hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest() - ) - mix_cache_dir = os.path.join("cache", cache_dir) - inst_cache_dir = os.path.join("cache", cache_dir) - - os.makedirs(mix_cache_dir, exist_ok=True) - os.makedirs(inst_cache_dir, exist_ok=True) - - mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy") - inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy") - - if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): - X_spec_m = np.load(mix_cache_path) - y_spec_m = np.load(inst_cache_path) - else: - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - X_wave[d], _ = librosa.load( - mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"] - ) - y_wave[d], _ = librosa.load( - inst_path, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - else: # lower bands - X_wave[d] = librosa.resample( - X_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - y_wave[d] = librosa.resample( - y_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) - - X_spec_s[d] = wave_to_spectrogram( - X_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - y_spec_s[d] = wave_to_spectrogram( - y_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - del X_wave, y_wave - - X_spec_m = combine_spectrograms(X_spec_s, mp) - y_spec_m = combine_spectrograms(y_spec_s, mp) - - if X_spec_m.shape != y_spec_m.shape: - raise ValueError("The combined spectrograms are different: " + mix_path) - - _, ext = os.path.splitext(mix_path) - - np.save(mix_cache_path, X_spec_m) - np.save(inst_cache_path, y_spec_m) - - return X_spec_m, y_spec_m - - -def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hop_length) - wave_right = librosa.istft(spec_right, hop_length=hop_length) - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2): - import threading - - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - def run_thread(**kwargs): - global wave_left - wave_left = librosa.istft(**kwargs) - - thread = threading.Thread( - target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length} - ) - thread.start() - wave_right = librosa.istft(spec_right, hop_length=hop_length) - thread.join() - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): - wave_band = {} - bands_n = len(mp.param["band"]) - offset = 0 - - for d in range(1, bands_n + 1): - bp = mp.param["band"][d] - spec_s = np.ndarray( - shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex - ) - h = bp["crop_stop"] - bp["crop_start"] - spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[ - :, offset : offset + h, : - ] - - offset += h - if d == bands_n: # higher - if extra_bins_h: # if --high_end_process bypass - max_bin = bp["n_fft"] // 2 - spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[ - :, :extra_bins_h, : - ] - if bp["hpf_start"] > 0: - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - if bands_n == 1: - wave = spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - else: - wave = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - else: - sr = mp.param["band"][d + 1]["sr"] - if d == 1: # lower - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave = librosa.resample( - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - bp["sr"], - sr, - res_type="sinc_fastest", - ) - else: # mid - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave2 = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest") - wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy") - - return wave.T - - -def fft_lp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop): - g -= 1 / (bin_stop - bin_start) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, bin_stop:, :] *= 0 - - return spec - - -def fft_hp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop, -1): - g -= 1 / (bin_start - bin_stop) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, 0 : bin_stop + 1, :] *= 0 - - return spec - - -def mirroring(a, spec_m, input_high_end, mp): - if "mirroring" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mirror = mirror * np.exp(1.0j * np.angle(input_high_end)) - - return np.where( - np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror - ) - - if "mirroring2" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mi = np.multiply(mirror, input_high_end * 1.7) - - return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) - - -def ensembling(a, specs): - for i in range(1, len(specs)): - if i == 1: - spec = specs[0] - - ln = min([spec.shape[2], specs[i].shape[2]]) - spec = spec[:, :, :ln] - specs[i] = specs[i][:, :, :ln] - - if "min_mag" == a: - spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) - if "max_mag" == a: - spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) - - return spec - - -def stft(wave, nfft, hl): - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - spec_left = librosa.stft(wave_left, nfft, hop_length=hl) - spec_right = librosa.stft(wave_right, nfft, hop_length=hl) - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def istft(spec, hl): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hl) - wave_right = librosa.istft(spec_right, hop_length=hl) - wave = np.asfortranarray([wave_left, wave_right]) - - -if __name__ == "__main__": - import cv2 - import sys - import time - import argparse - from model_param_init import ModelParameters - - p = argparse.ArgumentParser() - p.add_argument( - "--algorithm", - "-a", - type=str, - choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"], - default="min_mag", - ) - p.add_argument( - "--model_params", - "-m", - type=str, - default=os.path.join("modelparams", "1band_sr44100_hl512.json"), - ) - p.add_argument("--output_name", "-o", type=str, default="output") - p.add_argument("--vocals_only", "-v", action="store_true") - p.add_argument("input", nargs="+") - args = p.parse_args() - - start_time = time.time() - - if args.algorithm.startswith("invert") and len(args.input) != 2: - raise ValueError("There should be two input files.") - - if not args.algorithm.startswith("invert") and len(args.input) < 2: - raise ValueError("There must be at least two input files.") - - wave, specs = {}, {} - mp = ModelParameters(args.model_params) - - for i in range(len(args.input)): - spec = {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - wave[d], _ = librosa.load( - args.input[i], - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - - if len(wave[d].shape) == 1: # mono to stereo - wave[d] = np.array([wave[d], wave[d]]) - else: # lower bands - wave[d] = librosa.resample( - wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - spec[d] = wave_to_spectrogram( - wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - specs[i] = combine_spectrograms(spec, mp) - - del wave - - if args.algorithm == "deep": - d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) - v_spec = d_spec - specs[1] - sf.write( - os.path.join("{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - - if args.algorithm.startswith("invert"): - ln = min([specs[0].shape[2], specs[1].shape[2]]) - specs[0] = specs[0][:, :, :ln] - specs[1] = specs[1][:, :, :ln] - - if "invert_p" == args.algorithm: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) - v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0])) - else: - specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) - v_spec = specs[0] - specs[1] - - if not args.vocals_only: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - v_mag = np.abs(v_spec) - - X_image = spectrogram_to_image(X_mag) - y_image = spectrogram_to_image(y_mag) - v_image = spectrogram_to_image(v_mag) - - cv2.imwrite("{}_X.png".format(args.output_name), X_image) - cv2.imwrite("{}_y.png".format(args.output_name), y_image) - cv2.imwrite("{}_v.png".format(args.output_name), v_image) - - sf.write( - "{}_X.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[0], mp), - mp.param["sr"], - ) - sf.write( - "{}_y.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[1], mp), - mp.param["sr"], - ) - - sf.write( - "{}_v.wav".format(args.output_name), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - else: - if not args.algorithm == "deep": - sf.write( - os.path.join("ensembled", "{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), - mp.param["sr"], - ) - - if args.algorithm == "align": - trackalignment = [ - { - "file1": '"{}"'.format(args.input[0]), - "file2": '"{}"'.format(args.input[1]), - } - ] - - for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): - os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}") - - # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) diff --git a/lib/uvr5_pack/name_params.json b/lib/uvr5_pack/name_params.json deleted file mode 100644 index 950adcf51..000000000 --- a/lib/uvr5_pack/name_params.json +++ /dev/null @@ -1,263 +0,0 @@ -{ - "equivalent" : [ - { - "model_hash_name" : [ - { - "hash_name": "47939caf0cfe52a0e81442b85b971dfd", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "4e4ecb9764c50a8c414fee6e10395bbe", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2.json", - "param_name": "4band_v2" - }, - { - "hash_name": "ca106edd563e034bde0bdec4bb7a4b36", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2.json", - "param_name": "4band_v2" - }, - { - "hash_name": "e60a1e84803ce4efc0a6551206cc4b71", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "a82f14e75892e55e994376edbf0c8435", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "6dd9eaa6f0420af9f1d403aaafa4cc06", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json", - "param_name": "4band_v2_sn" - }, - { - "hash_name": "08611fb99bd59eaa79ad27c58d137727", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json", - "param_name": "4band_v2_sn" - }, - { - "hash_name": "5c7bbca45a187e81abbbd351606164e5", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json", - "param_name": "3band_44100_msb2" - }, - { - "hash_name": "d6b2cb685a058a091e5e7098192d3233", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json", - "param_name": "3band_44100_msb2" - }, - { - "hash_name": "c1b9f38170a7c90e96f027992eb7c62b", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "c3448ec923fa0edf3d03a19e633faa53", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "68aa2c8093d0080704b200d140f59e54", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100.json", - "param_name": "3band_44100" - }, - { - "hash_name": "fdc83be5b798e4bd29fe00fe6600e147", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json", - "param_name": "3band_44100_mid.json" - }, - { - "hash_name": "2ce34bc92fd57f55db16b7a4def3d745", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json", - "param_name": "3band_44100_mid.json" - }, - { - "hash_name": "52fdca89576f06cf4340b74a4730ee5f", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100.json" - }, - { - "hash_name": "41191165b05d38fc77f072fa9e8e8a30", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100.json" - }, - { - "hash_name": "89e83b511ad474592689e562d5b1f80e", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_32000.json", - "param_name": "2band_32000.json" - }, - { - "hash_name": "0b954da81d453b716b114d6d7c95177f", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_32000.json", - "param_name": "2band_32000.json" - } - - ], - "v4 Models": [ - { - "hash_name": "6a00461c51c2920fd68937d4609ed6c8", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json", - "param_name": "1band_sr16000_hl512" - }, - { - "hash_name": "0ab504864d20f1bd378fe9c81ef37140", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr32000_hl512" - }, - { - "hash_name": "7dd21065bf91c10f7fccb57d7d83b07f", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr32000_hl512" - }, - { - "hash_name": "80ab74d65e515caa3622728d2de07d23", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr32000_hl512" - }, - { - "hash_name": "edc115e7fc523245062200c00caa847f", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json", - "param_name": "1band_sr33075_hl384" - }, - { - "hash_name": "28063e9f6ab5b341c5f6d3c67f2045b7", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json", - "param_name": "1band_sr33075_hl384" - }, - { - "hash_name": "b58090534c52cbc3e9b5104bad666ef2", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json", - "param_name": "1band_sr44100_hl512" - }, - { - "hash_name": "0cdab9947f1b0928705f518f3c78ea8f", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json", - "param_name": "1band_sr44100_hl512" - }, - { - "hash_name": "ae702fed0238afb5346db8356fe25f13", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json", - "param_name": "1band_sr44100_hl1024" - } - ] - } - ], - "User Models" : [ - { - "1 Band": [ - { - "hash_name": "1band_sr16000_hl512", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json", - "param_name": "1band_sr16000_hl512" - }, - { - "hash_name": "1band_sr32000_hl512", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json", - "param_name": "1band_sr16000_hl512" - }, - { - "hash_name": "1band_sr33075_hl384", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json", - "param_name": "1band_sr33075_hl384" - }, - { - "hash_name": "1band_sr44100_hl256", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json", - "param_name": "1band_sr44100_hl256" - }, - { - "hash_name": "1band_sr44100_hl512", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json", - "param_name": "1band_sr44100_hl512" - }, - { - "hash_name": "1band_sr44100_hl1024", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json", - "param_name": "1band_sr44100_hl1024" - } - ], - "2 Band": [ - { - "hash_name": "2band_44100_lofi", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json", - "param_name": "2band_44100_lofi" - }, - { - "hash_name": "2band_32000", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_32000.json", - "param_name": "2band_32000" - }, - { - "hash_name": "2band_48000", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/2band_48000.json", - "param_name": "2band_48000" - } - ], - "3 Band": [ - { - "hash_name": "3band_44100", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100.json", - "param_name": "3band_44100" - }, - { - "hash_name": "3band_44100_mid", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json", - "param_name": "3band_44100_mid" - }, - { - "hash_name": "3band_44100_msb2", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json", - "param_name": "3band_44100_msb2" - } - ], - "4 Band": [ - { - "hash_name": "4band_44100", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100.json", - "param_name": "4band_44100" - }, - { - "hash_name": "4band_44100_mid", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json", - "param_name": "4band_44100_mid" - }, - { - "hash_name": "4band_44100_msb", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json", - "param_name": "4band_44100_msb" - }, - { - "hash_name": "4band_44100_msb2", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json", - "param_name": "4band_44100_msb2" - }, - { - "hash_name": "4band_44100_reverse", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json", - "param_name": "4band_44100_reverse" - }, - { - "hash_name": "4band_44100_sw", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json", - "param_name": "4band_44100_sw" - }, - { - "hash_name": "4band_v2", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2.json", - "param_name": "4band_v2" - }, - { - "hash_name": "4band_v2_sn", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json", - "param_name": "4band_v2_sn" - }, - { - "hash_name": "tmodelparam", - "model_params": "lib/uvr5_pack/lib_v5/modelparams/tmodelparam.json", - "param_name": "User Model Param Set" - } - ] - } - ] -} \ No newline at end of file diff --git a/lib/uvr5_pack/utils.py b/lib/uvr5_pack/utils.py deleted file mode 100644 index 0fafe8793..000000000 --- a/lib/uvr5_pack/utils.py +++ /dev/null @@ -1,120 +0,0 @@ -import torch -import numpy as np -from tqdm import tqdm -import json - - -def load_data(file_name: str = "./lib/uvr5_pack/name_params.json") -> dict: - with open(file_name, "r") as f: - data = json.load(f) - - return data - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def inference(X_spec, device, model, aggressiveness, data): - """ - data : dic configs - """ - - def _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half=True - ): - model.eval() - with torch.no_grad(): - preds = [] - - iterations = [n_window] - - total_iterations = sum(iterations) - for i in tqdm(range(n_window)): - start = i * roi_size - X_mag_window = X_mag_pad[ - None, :, :, start : start + data["window_size"] - ] - X_mag_window = torch.from_numpy(X_mag_window) - if is_half: - X_mag_window = X_mag_window.half() - X_mag_window = X_mag_window.to(device) - - pred = model.predict(X_mag_window, aggressiveness) - - pred = pred.detach().cpu().numpy() - preds.append(pred[0]) - - pred = np.concatenate(preds, axis=2) - return pred - - def preprocess(X_spec): - X_mag = np.abs(X_spec) - X_phase = np.angle(X_spec) - - return X_mag, X_phase - - X_mag, X_phase = preprocess(X_spec) - - coef = X_mag.max() - X_mag_pre = X_mag / coef - - n_frame = X_mag_pre.shape[2] - pad_l, pad_r, roi_size = make_padding(n_frame, data["window_size"], model.offset) - n_window = int(np.ceil(n_frame / roi_size)) - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - if list(model.state_dict().values())[0].dtype == torch.float16: - is_half = True - else: - is_half = False - pred = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred = pred[:, :, :n_frame] - - if data["tta"]: - pad_l += roi_size // 2 - pad_r += roi_size // 2 - n_window += 1 - - X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode="constant") - - pred_tta = _execute( - X_mag_pad, roi_size, n_window, device, model, aggressiveness, is_half - ) - pred_tta = pred_tta[:, :, roi_size // 2 :] - pred_tta = pred_tta[:, :, :n_frame] - - return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.0j * X_phase) - else: - return pred * coef, X_mag, np.exp(1.0j * X_phase) - - -def _get_name_params(model_path, model_hash): - data = load_data() - flag = False - ModelName = model_path - for type in list(data): - for model in list(data[type][0]): - for i in range(len(data[type][0][model])): - if str(data[type][0][model][i]["hash_name"]) == model_hash: - flag = True - elif str(data[type][0][model][i]["hash_name"]) in ModelName: - flag = True - - if flag: - model_params_auto = data[type][0][model][i]["model_params"] - param_name_auto = data[type][0][model][i]["param_name"] - if type == "equivalent": - return param_name_auto, model_params_auto - else: - flag = False - return param_name_auto, model_params_auto diff --git a/logs/mute/0_gt_wavs/mute32k.wav b/logs/mute/0_gt_wavs/mute32k.wav deleted file mode 100644 index b4b502920..000000000 Binary files a/logs/mute/0_gt_wavs/mute32k.wav and /dev/null differ diff --git a/logs/mute/0_gt_wavs/mute40k.wav b/logs/mute/0_gt_wavs/mute40k.wav deleted file mode 100644 index fcf1281d4..000000000 Binary files a/logs/mute/0_gt_wavs/mute40k.wav and /dev/null differ diff --git a/logs/mute/0_gt_wavs/mute48k.wav b/logs/mute/0_gt_wavs/mute48k.wav deleted file mode 100644 index 72822a012..000000000 Binary files a/logs/mute/0_gt_wavs/mute48k.wav and /dev/null differ diff --git a/logs/mute/1_16k_wavs/mute.wav b/logs/mute/1_16k_wavs/mute.wav deleted file mode 100644 index 27a7d6385..000000000 Binary files a/logs/mute/1_16k_wavs/mute.wav and /dev/null differ diff --git a/logs/mute/2a_f0/mute.wav.npy b/logs/mute/2a_f0/mute.wav.npy deleted file mode 100644 index a7ecfbf92..000000000 Binary files a/logs/mute/2a_f0/mute.wav.npy and /dev/null differ diff --git a/logs/mute/2b-f0nsf/mute.wav.npy b/logs/mute/2b-f0nsf/mute.wav.npy deleted file mode 100644 index cf5c21bd4..000000000 Binary files a/logs/mute/2b-f0nsf/mute.wav.npy and /dev/null differ diff --git a/logs/mute/3_feature256/mute.npy b/logs/mute/3_feature256/mute.npy deleted file mode 100644 index ffe35e784..000000000 Binary files a/logs/mute/3_feature256/mute.npy and /dev/null differ diff --git a/logs/mute/3_feature768/mute.npy b/logs/mute/3_feature768/mute.npy deleted file mode 100644 index b14cfb83e..000000000 Binary files a/logs/mute/3_feature768/mute.npy and /dev/null differ diff --git a/mangio_utils/Readme.txt b/mangio_utils/Readme.txt deleted file mode 100644 index 7b014b8e6..000000000 --- a/mangio_utils/Readme.txt +++ /dev/null @@ -1 +0,0 @@ -Here, all utility scripts exist for unique features on My fork. \ No newline at end of file diff --git a/mangio_utils/donate.png b/mangio_utils/donate.png deleted file mode 100644 index 8f5d06320..000000000 Binary files a/mangio_utils/donate.png and /dev/null differ diff --git a/mangio_utils/inference_batcher.py b/mangio_utils/inference_batcher.py deleted file mode 100644 index 867175fcb..000000000 --- a/mangio_utils/inference_batcher.py +++ /dev/null @@ -1,5 +0,0 @@ -# Mangio-RVC-Fork Feature. Splits source audio into multiple segments (nparrays). - -from scipy.io import wavfile -import numpy as np -import os diff --git a/mangio_utils/lol.png b/mangio_utils/lol.png deleted file mode 100644 index 60078568c..000000000 Binary files a/mangio_utils/lol.png and /dev/null differ diff --git a/mangio_utils/segment-outputs/Readme.txt b/mangio_utils/segment-outputs/Readme.txt deleted file mode 100644 index ccc4aa789..000000000 --- a/mangio_utils/segment-outputs/Readme.txt +++ /dev/null @@ -1,2 +0,0 @@ -This folder (segment-outputs) is where all segment outputs created from the inference-batcher.py -are stored temporarily. \ No newline at end of file diff --git a/my_utils.py b/my_utils.py deleted file mode 100644 index 219aba7c6..000000000 --- a/my_utils.py +++ /dev/null @@ -1,99 +0,0 @@ -import ffmpeg -import numpy as np - -import os -import sys - -import random - -#import csv - -platform_stft_mapping = { - 'linux': 'stftpitchshift', - 'darwin': 'stftpitchshift', - 'win32': 'stftpitchshift.exe', -} - -stft = platform_stft_mapping.get(sys.platform) - -def load_audio(file, sr, DoFormant=False, Quefrency=1.0, Timbre=1.0): - converted = False - try: - file = ( - file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) - file_formanted = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - - if DoFormant: - numerator = round(random.uniform(1,4), 4) - if not file.endswith(".wav"): - if not os.path.isfile(f"{file_formanted}.wav"): - converted = True - converting = ( - ffmpeg.input(file_formanted, threads = 0) - .output(f"{file_formanted}.wav") - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - file_formanted = f"{file_formanted}.wav" if not file_formanted.endswith(".wav") else file_formanted - print(f" · Formanting {file_formanted}...\n") - - command = ( - f'{stft} -i "{file_formanted}" -q "{Quefrency}" ' - f'-t "{Timbre}" -o "{file_formanted}FORMANTED_{str(numerator)}.wav"' - ) - - os.system(command) - - print(f" · Formanted {file_formanted}!\n") - - out, _ = ( - ffmpeg.input(f"{file_formanted}FORMANTED_{str(numerator)}.wav", threads=0) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - - try: os.remove(f"{file_formanted}FORMANTED_{str(numerator)}.wav") - except Exception as e: pass; print(f"couldn't remove formanted type of file due to {e}") - - else: - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - except Exception as e: - raise RuntimeError(f"Failed to load audio: {e}") - - if converted: - try: os.remove(file_formanted) - except Exception as e: pass; print(f"Couldn't remove converted type of file due to {e}") - converted = False - - return np.frombuffer(out, np.float32).flatten() - - -def check_audio_duration(file): - try: - file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - - probe = ffmpeg.probe(file) - - duration = float(probe['streams'][0]['duration']) - - if duration < 0.76: - print( - f"\n------------\n" - f"Audio file, {file.split('/')[-1]}, under ~0.76s detected - file is too short. Target at least 1-2s for best results." - f"\n------------\n\n" - ) - return False - - return True - except Exception as e: - raise RuntimeError(f"Failed to check audio duration: {e}") \ No newline at end of file diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index f7aad0a17..000000000 --- a/poetry.lock +++ /dev/null @@ -1,3881 +0,0 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. - -[[package]] -name = "absl-py" -version = "1.4.0" -description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." -optional = false -python-versions = ">=3.6" -files = [ - {file = "absl-py-1.4.0.tar.gz", hash = "sha256:d2c244d01048ba476e7c080bd2c6df5e141d211de80223460d5b3b8a2a58433d"}, - {file = "absl_py-1.4.0-py3-none-any.whl", hash = "sha256:0d3fe606adfa4f7db64792dd4c7aee4ee0c38ab75dfd353b7a83ed3e957fcb47"}, -] - -[[package]] -name = "aiofiles" -version = "23.1.0" -description = "File support for asyncio." -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "aiofiles-23.1.0-py3-none-any.whl", hash = "sha256:9312414ae06472eb6f1d163f555e466a23aed1c8f60c30cccf7121dba2e53eb2"}, - {file = "aiofiles-23.1.0.tar.gz", hash = "sha256:edd247df9a19e0db16534d4baaf536d6609a43e1de5401d7a4c1c148753a1635"}, -] - -[[package]] -name = "aiohttp" -version = "3.8.4" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.6" -files = [ - {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1"}, - {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a"}, - {file = "aiohttp-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea"}, - {file = "aiohttp-3.8.4-cp310-cp310-win32.whl", hash = "sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1"}, - {file = "aiohttp-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24"}, - {file = "aiohttp-3.8.4-cp311-cp311-win32.whl", hash = "sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d"}, - {file = "aiohttp-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc"}, - {file = "aiohttp-3.8.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff"}, - {file = "aiohttp-3.8.4-cp36-cp36m-win32.whl", hash = "sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777"}, - {file = "aiohttp-3.8.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e"}, - {file = "aiohttp-3.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241"}, - {file = "aiohttp-3.8.4-cp37-cp37m-win32.whl", hash = "sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a"}, - {file = "aiohttp-3.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d"}, - {file = "aiohttp-3.8.4-cp38-cp38-win32.whl", hash = "sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54"}, - {file = "aiohttp-3.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4"}, - {file = "aiohttp-3.8.4-cp39-cp39-win32.whl", hash = "sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a"}, - {file = "aiohttp-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04"}, - {file = "aiohttp-3.8.4.tar.gz", hash = "sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c"}, -] - -[package.dependencies] -aiosignal = ">=1.1.2" -async-timeout = ">=4.0.0a3,<5.0" -attrs = ">=17.3.0" -charset-normalizer = ">=2.0,<4.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns", "cchardet"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" - -[[package]] -name = "altair" -version = "4.2.2" -description = "Altair: A declarative statistical visualization library for Python." -optional = false -python-versions = ">=3.7" -files = [ - {file = "altair-4.2.2-py3-none-any.whl", hash = "sha256:8b45ebeaf8557f2d760c5c77b79f02ae12aee7c46c27c06014febab6f849bc87"}, - {file = "altair-4.2.2.tar.gz", hash = "sha256:39399a267c49b30d102c10411e67ab26374156a84b1aeb9fcd15140429ba49c5"}, -] - -[package.dependencies] -entrypoints = "*" -jinja2 = "*" -jsonschema = ">=3.0" -numpy = "*" -pandas = ">=0.18" -toolz = "*" - -[package.extras] -dev = ["black", "docutils", "flake8", "ipython", "m2r", "mistune (<2.0.0)", "pytest", "recommonmark", "sphinx", "vega-datasets"] - -[[package]] -name = "antlr4-python3-runtime" -version = "4.8" -description = "ANTLR 4.8 runtime for Python 3.7" -optional = false -python-versions = "*" -files = [ - {file = "antlr4-python3-runtime-4.8.tar.gz", hash = "sha256:15793f5d0512a372b4e7d2284058ad32ce7dd27126b105fb0b2245130445db33"}, -] - -[[package]] -name = "anyio" -version = "3.6.2" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.6.2" -files = [ - {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"}, - {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"}, -] - -[package.dependencies] -idna = ">=2.8" -sniffio = ">=1.1" - -[package.extras] -doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"] -trio = ["trio (>=0.16,<0.22)"] - -[[package]] -name = "async-timeout" -version = "4.0.2" -description = "Timeout context manager for asyncio programs" -optional = false -python-versions = ">=3.6" -files = [ - {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, - {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, -] - -[[package]] -name = "attrs" -version = "22.2.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.6" -files = [ - {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, - {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] -tests = ["attrs[tests-no-zope]", "zope.interface"] -tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] - -[[package]] -name = "audioread" -version = "3.0.0" -description = "multi-library, cross-platform audio decoding" -optional = false -python-versions = ">=3.6" -files = [ - {file = "audioread-3.0.0.tar.gz", hash = "sha256:121995bd207eb1fda3d566beb851d3534275925bc35a4fb6da0cb11de0f7251a"}, -] - -[[package]] -name = "bitarray" -version = "2.7.3" -description = "efficient arrays of booleans -- C extension" -optional = false -python-versions = "*" -files = [ - {file = "bitarray-2.7.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:979d42e0b2c3113526f9716a461e08671788a23ce7e3b5cd090ce3e6a6762641"}, - {file = "bitarray-2.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:860edf8533223d82bd6201894bcaf540f828f49075f363390eecf04b12fb94cb"}, - {file = "bitarray-2.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:78378d8dacbe1f4f263347f42ec0a41cc2097cd671c6ac30a65a838284a5e141"}, - {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:888df211aafe5fad41c0792a686d95c8ba37345d5037f437aa3c09608f9c3b56"}, - {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb3f003dee96dbf24a6df71443557f249b17b20083c189995302b14eb01530bf"}, - {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c531532c21bc1063e65957a1a85a2d13601ec21801f70821c89d9339b16ebc78"}, - {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8fd92c8026e4ba6874e94f538890e35bef2a3a18ea54e3663c578b7916ade1"}, - {file = "bitarray-2.7.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d19c34a2121eccfeb642d4ad71163bd3342a8f3a99e6724fe824bdfbc0a5b65"}, - {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:102db74ee82ec5774aba01481e73eedaebd27ba167344a81d3b42e6fbf9ffb77"}, - {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7f6540b45b2230442f7a0614745131e0a6f28251f5d33ac19d0ed61d80db7153"}, - {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:99c9345c417a9cff98f9f6e59b0350dcc10c2e0e1ea66acf7946de1cd60541fa"}, - {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:a1d439c98e65ab8e5fbcc2b242a16e7a3f076974bff78185ff42ba2d4c220032"}, - {file = "bitarray-2.7.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:87897ec0e4876c9f2c1ae313519de0ed2ad8041a4d2210a083f9b4a239add2e3"}, - {file = "bitarray-2.7.3-cp310-cp310-win32.whl", hash = "sha256:cb46c3a4002c8322dd0e1b4b53f8a647dcb0f199f5c7a1fc03d3880c3eabbd2c"}, - {file = "bitarray-2.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:5df10eb9b794932b0cf806f412d1c6d04fb7655ca7ae5caf6354b9edc380a5f7"}, - {file = "bitarray-2.7.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:27524bc92fdeb464a5057a4677a35f482cf30be2e920bd1d11c46de533cafda6"}, - {file = "bitarray-2.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3cf37431de779b29e5c0d8e36868f77f6df53c3c19c20e8404137e257dc80040"}, - {file = "bitarray-2.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8abd23f94cdcce971d932a5f0a066d40fbc61901fd087aa70d32cccd1793bd20"}, - {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7659bdfe7716b14a39007e31e957fa64d7f0d9e40a1dbd024bd81b972d76bffb"}, - {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:da1570f301abdfda68f4fdb40c4d3f09af4bb6e4550b4fa5395db0d142b680bc"}, - {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8becbb9649fd29ee577f9f0405ce2fba5cf9fa2c290c9b044bc235c04473f213"}, - {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72fd7f6f940bc42914c86700591ccfd1daeff0e414cefcbd7843117df2fac4e9"}, - {file = "bitarray-2.7.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23b7bada6d6b62cba08f4a1b8a95da2d8592aae1db3c167dcb52abcba0a7bef5"}, - {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4b2d150a81a981537801ac7d4f4f5d082c48343612a21f4e2c4cd2e887973bd5"}, - {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1502660ab489b1f18c3493c766252cd5d24bc1cbf4bdf3594e0a30de142ed453"}, - {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:91f43f6b6c9129a56d3e2dccb8b88ffce0e4f4893dd9d69d285676bdf5b9ca14"}, - {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a69c99274aee2ffdc7f1cfd34044ccb7155790d6f5217d677ea46a6ddead6dd2"}, - {file = "bitarray-2.7.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d63f20299441e32171f08fc62f7ea7e401cc12a96f67a36ab2d76439ecfcb118"}, - {file = "bitarray-2.7.3-cp311-cp311-win32.whl", hash = "sha256:0b84fd9dbf999cbca1090a7703aa1404cd01af4035c6ba3adf69d41280611fb6"}, - {file = "bitarray-2.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:76bbbb9ceebb9cbb2b14369b3681fecab226792b339f612e79f6575ca31fed45"}, - {file = "bitarray-2.7.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50d5e2c026b3e3d145f64c457338ea99edcbdd302fdcbd96418251ac51a98a59"}, - {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d571056115bbdc18f199a9ee4c2a1b5884f5e63a3c05fe43d2fc7fc67320515"}, - {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2a0313657e6656efca2148cfc91c50fdafca6f811b6c7d0906e6ba57134e560"}, - {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d3b5abb73c45d40d27f9795dac9d6eb1515729c13f93dd67df2be07be6549990"}, - {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7776c070943f45cd8303543a6625cf82f2e000ef9c885d52d7828be099e52f42"}, - {file = "bitarray-2.7.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:057f9c53a34e42deed6e8813a82b9c85924f4728be28e3b9b65144569ac5a387"}, - {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8591ad5768860ad186dc94fd58b2932604a7639b57eefbbff2b4865af3407691"}, - {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:bd7f4b2df89bf4e298756c0be0be67fb84d6aa49bda60d46805d43f0e643abd5"}, - {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:433f91c8ab8338662aaa86b0677e6c15c35f8f7b65d4c43d7d1647a8198bc0b0"}, - {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:31e60d8341c3189aa156ca8cb2f6370b29d79cf132e3d091714b0a5a9097eb69"}, - {file = "bitarray-2.7.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ea33ed09157e032f0a7a2627ef87f156e9927697f59b55961439d34bf45af23a"}, - {file = "bitarray-2.7.3-cp36-cp36m-win32.whl", hash = "sha256:302149aaff75939beb8af7f32ac9bf922480033a24fb54f4ebc0c9dc175247c4"}, - {file = "bitarray-2.7.3-cp36-cp36m-win_amd64.whl", hash = "sha256:7a8995737fae8de03b31ed83acf4f4326a55b217022009d18be19ff87fc9010e"}, - {file = "bitarray-2.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8b2f31a4cc28aef27355ab896e4b4cc2da2204b2b7adb674d8be7fefa0c93868"}, - {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5df624ee8a4098c3b1149f4817f2a4a0121c4920e1c114af324bc52d6659e2b"}, - {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1d60ed709989e34e7158d97fdb077a2f2dfc505998a84161a70f81a6101172"}, - {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:748847e58c45a37f23db1f53a6dc16ae32aa80ee504653d79336830de1a79ed7"}, - {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4b7fdb9772e087174f446655bbc497a1600b5758f279c6d44fcf344c13d5c8a"}, - {file = "bitarray-2.7.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86e9c48ffeddb0f943e87ab65e1e95dccc9b44ef3761af3bf9642973ab7646d2"}, - {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0d1f49cc51919d6fa0f7eebd073d2c620b80079aa537d084a7fafb46a35c7a4d"}, - {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b43d56c7c96f5a055f4051be426496db2a616840645d0ab3733d5ceacb2f701b"}, - {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:01f8d02c3eae82c98d4259777cb2f042a0b3989d7dceeb37c643cb94b91d5a42"}, - {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d089b1d0b157c9a484f8f7475eecea813d0dc3818adc5bf352903da14fe88fc3"}, - {file = "bitarray-2.7.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1362e9fb78ca72aa52ec1f1fbd62872801302001b0156ed2a1e707850cd30ffd"}, - {file = "bitarray-2.7.3-cp37-cp37m-win32.whl", hash = "sha256:2cdf5700537e5aa4ec9f4a0b498b8d5b03b9859d503e01ea17a6a134a838aa30"}, - {file = "bitarray-2.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1e1553933f4533040491f4e4499bcbbfcee42c4056f56d7e18010e779daab33d"}, - {file = "bitarray-2.7.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1048a29b3d72b1821a3ae9e8d64e71ed96c53a1a36b1da6db02091a424a8f795"}, - {file = "bitarray-2.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:10dc358fe29d7a4c5be78ab2fb5aa50cb8066babd23e0b5589eb68e26afe58d8"}, - {file = "bitarray-2.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8ab6770833976448a9a973bc0df63adedc4c30de4774cec5a9928fc496423ebb"}, - {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abe2f829f6f2d330bccf1bcde2192264ab9a15d6d00e507265f46dc66557014"}, - {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87851a82bdf849e3c40ff6d8af5f734634e17f52a8f7f7e74486c2f8ce717578"}, - {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5fc2512bdf5289a1412c936c65d17881d2b46edb0036c63a8d5605dc8d398a3"}, - {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1289f408a8b5c87cdb4fd7975d4021c6e61209ccb956d0411e72bf43c7f78463"}, - {file = "bitarray-2.7.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ee181cc00aaba38d9812f4df4e7d828105b6dde3b068cd2c43f1d8f395e0046"}, - {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:00e93f70cbcbeabd1e79accf1b6f5b2424cd40556e7877f618549523d0031c98"}, - {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3fb6a952796d16c3a309d866eef56a8f4e5591d112c22446e67d33ecb096b44b"}, - {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:0fe747a134f7f5bc0877eee58090ae7e7f23628eeb459f681ade65719c3f246a"}, - {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:2c1b2c91bf991b5c641faee78dd5a751dff6155ec51c7a6c7f922dc85431898e"}, - {file = "bitarray-2.7.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c3956ae54285ab30d802756144887e30e013f81c9f03e5ffff9daa46d8ca0154"}, - {file = "bitarray-2.7.3-cp38-cp38-win32.whl", hash = "sha256:00a6fc4355bd4e6ead54d05187dc4ea39f0af439b336ae113f0194673ed730ae"}, - {file = "bitarray-2.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:305e6f7441c007f296644ba3899c0306ce9fd7a482dbbc06b6e7b7bd6e0ddabc"}, - {file = "bitarray-2.7.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fe80c23409efb41b86efb5e45f334420a9b5b7828f5b3d08b5ff28f03a024d9e"}, - {file = "bitarray-2.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:16345146b61e93ca20679c83537ccf7245f78b17035f5b1a436fd2b75da04c5e"}, - {file = "bitarray-2.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1af9b720a048c69e999094e2310138b7cfca5471a9d2c1dbe4b53dd10e516720"}, - {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:088e6e9ea7f0eaf8b672679a68096dbc0a7a7b7a4ed567860f7362e1588370a6"}, - {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:122cd70ee0de2cc9d94da8b8ebcb7dca12b9f4d3beefb94c11e110e1d87503bb"}, - {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb9a8ee23416bd0cfd457118978bc2f6f02c20b95336db486887f670bf92c2b7"}, - {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a544f99c24b6f658907eb9edf290a9c54f4106738b2ab84cd19dc6013cc3abf"}, - {file = "bitarray-2.7.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:980f6564218f853a9341fb045446539d4153338926ed2fb222e86dc9b2ae9b8f"}, - {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f64abe9301b918d2c352e42198cea0196f3639bc1ad23a4a9d8ae97f66068901"}, - {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:029c724bf38c6616b90b1c423b846b63f8d607ed5a23d270e3862696d88a5392"}, - {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:16cb00911584a6e9ca0f42c305714898120dc6bfbbec90dacedeed4690331a47"}, - {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:699b0134e87c0c4e3b224d879d218c4385a06e6b72df73b4c9c9d549155fb837"}, - {file = "bitarray-2.7.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b508e1bba4ec68fd0ef28505e2dad2f56de7df710c8334c97036705a562cb908"}, - {file = "bitarray-2.7.3-cp39-cp39-win32.whl", hash = "sha256:4b84230624d15868e407ba8b66df54fc69ee6a9e9cb6d51eb264b8f2614596f1"}, - {file = "bitarray-2.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:757a08bf0aed5a650a399f8c66bcba00c210bce34408b6d7b09b4837bee8f4da"}, - {file = "bitarray-2.7.3.tar.gz", hash = "sha256:f71256a32609b036adad932e1228b66a6b4e2cae6be397e588ddc0babd9a78b9"}, -] - -[[package]] -name = "cachetools" -version = "5.3.0" -description = "Extensible memoizing collections and decorators" -optional = false -python-versions = "~=3.7" -files = [ - {file = "cachetools-5.3.0-py3-none-any.whl", hash = "sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4"}, - {file = "cachetools-5.3.0.tar.gz", hash = "sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14"}, -] - -[[package]] -name = "certifi" -version = "2022.12.7" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] - -[[package]] -name = "cffi" -version = "1.15.1" -description = "Foreign Function Interface for Python calling C code." -optional = false -python-versions = "*" -files = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "charset-normalizer" -version = "3.1.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, - {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, - {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, - {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, - {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, - {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, - {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, -] - -[[package]] -name = "click" -version = "8.1.3" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "cmake" -version = "3.26.1" -description = "CMake is an open-source, cross-platform family of tools designed to build, test and package software" -optional = false -python-versions = "*" -files = [ - {file = "cmake-3.26.1-py2.py3-none-macosx_10_10_universal2.macosx_10_10_x86_64.macosx_11_0_arm64.macosx_11_0_universal2.whl", hash = "sha256:d8a7e0cc8677677a732aff3e3fd0ad64eeff43cac772614b03c436912247d0d8"}, - {file = "cmake-3.26.1-py2.py3-none-manylinux2010_i686.manylinux_2_12_i686.whl", hash = "sha256:f2f721f5aebe304c281ee4b1d2dfbf7f4a52fca003834b2b4a3ba838aeded63c"}, - {file = "cmake-3.26.1-py2.py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:63a012b72836702eadfe4fba9642aeb17337f26861f4768e837053f40e98cb46"}, - {file = "cmake-3.26.1-py2.py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2b72be88b7bfaa6ae59566cbb9d6a5553f19b2a8d14efa6ac0cf019a29860a1b"}, - {file = "cmake-3.26.1-py2.py3-none-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1278354f7210e22458aa9137d46a56da1f115a7b76ad2733f0bf6041fb40f1dc"}, - {file = "cmake-3.26.1-py2.py3-none-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:de96a5522917fba0ab0da2d01d9dd9462fa80f365218bf27162d539c2335758f"}, - {file = "cmake-3.26.1-py2.py3-none-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:449928ad7dfcd41e4dcff64c7d44f86557883c70577666a19e79e22d783bbbd0"}, - {file = "cmake-3.26.1-py2.py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:19fa3e457afecf2803265f71652ef17c3f1d317173c330ba46767a0853d38fa0"}, - {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:43360650d60d177d979e4ad0a5f31afa286e6d88f5350f7a38c29d94514900eb"}, - {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_i686.whl", hash = "sha256:16aac10363bc926da5109a59ef8fe46ddcd7e3d421de61f871b35524eef2f1ae"}, - {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:e460ba5070be4dcac9613cb526a46db4e5fa19d8b909a8d8d5244c6cc3c777e1"}, - {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_s390x.whl", hash = "sha256:fd2ecc0899f7939a014bd906df85e8681bd63ce457de3ab0b5d9e369fa3bdf79"}, - {file = "cmake-3.26.1-py2.py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:22781a23e274ba9bf380b970649654851c1b4b9d83b65fec12ee2e2e03b6ffc4"}, - {file = "cmake-3.26.1-py2.py3-none-win32.whl", hash = "sha256:7b4e81de30ac1fb2f1eb5287063e140b53f376fd9ed7e2060c1c7b5917bd5f83"}, - {file = "cmake-3.26.1-py2.py3-none-win_amd64.whl", hash = "sha256:90845b6c87a25be07e9220f67dd7f6c891c6ec14d764d37335218d97f9ea4520"}, - {file = "cmake-3.26.1-py2.py3-none-win_arm64.whl", hash = "sha256:43bd96327e2631183bb4829ba20cb810e20b4b0c68f852fcd7082fbb5359d57c"}, - {file = "cmake-3.26.1.tar.gz", hash = "sha256:4e0eb3c03dcf2d459f78d96cc85f7482476aeb1ae5ada65150b1db35c0f70cc7"}, -] - -[package.extras] -test = ["codecov (>=2.0.5)", "coverage (>=4.2)", "flake8 (>=3.0.4)", "path.py (>=11.5.0)", "pytest (>=3.0.3)", "pytest-cov (>=2.4.0)", "pytest-runner (>=2.9)", "pytest-virtualenv (>=1.7.0)", "scikit-build (>=0.10.0)", "setuptools (>=28.0.0)", "virtualenv (>=15.0.3)", "wheel"] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "contourpy" -version = "1.0.7" -description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false -python-versions = ">=3.8" -files = [ - {file = "contourpy-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:95c3acddf921944f241b6773b767f1cbce71d03307270e2d769fd584d5d1092d"}, - {file = "contourpy-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fc1464c97579da9f3ab16763c32e5c5d5bb5fa1ec7ce509a4ca6108b61b84fab"}, - {file = "contourpy-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8acf74b5d383414401926c1598ed77825cd530ac7b463ebc2e4f46638f56cce6"}, - {file = "contourpy-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c71fdd8f1c0f84ffd58fca37d00ca4ebaa9e502fb49825484da075ac0b0b803"}, - {file = "contourpy-1.0.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f99e9486bf1bb979d95d5cffed40689cb595abb2b841f2991fc894b3452290e8"}, - {file = "contourpy-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87f4d8941a9564cda3f7fa6a6cd9b32ec575830780677932abdec7bcb61717b0"}, - {file = "contourpy-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e20e5a1908e18aaa60d9077a6d8753090e3f85ca25da6e25d30dc0a9e84c2c6"}, - {file = "contourpy-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a877ada905f7d69b2a31796c4b66e31a8068b37aa9b78832d41c82fc3e056ddd"}, - {file = "contourpy-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6381fa66866b0ea35e15d197fc06ac3840a9b2643a6475c8fff267db8b9f1e69"}, - {file = "contourpy-1.0.7-cp310-cp310-win32.whl", hash = "sha256:3c184ad2433635f216645fdf0493011a4667e8d46b34082f5a3de702b6ec42e3"}, - {file = "contourpy-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:3caea6365b13119626ee996711ab63e0c9d7496f65641f4459c60a009a1f3e80"}, - {file = "contourpy-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed33433fc3820263a6368e532f19ddb4c5990855e4886088ad84fd7c4e561c71"}, - {file = "contourpy-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:38e2e577f0f092b8e6774459317c05a69935a1755ecfb621c0a98f0e3c09c9a5"}, - {file = "contourpy-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ae90d5a8590e5310c32a7630b4b8618cef7563cebf649011da80874d0aa8f414"}, - {file = "contourpy-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130230b7e49825c98edf0b428b7aa1125503d91732735ef897786fe5452b1ec2"}, - {file = "contourpy-1.0.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58569c491e7f7e874f11519ef46737cea1d6eda1b514e4eb5ac7dab6aa864d02"}, - {file = "contourpy-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54d43960d809c4c12508a60b66cb936e7ed57d51fb5e30b513934a4a23874fae"}, - {file = "contourpy-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:152fd8f730c31fd67fe0ffebe1df38ab6a669403da93df218801a893645c6ccc"}, - {file = "contourpy-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9056c5310eb1daa33fc234ef39ebfb8c8e2533f088bbf0bc7350f70a29bde1ac"}, - {file = "contourpy-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a9d7587d2fdc820cc9177139b56795c39fb8560f540bba9ceea215f1f66e1566"}, - {file = "contourpy-1.0.7-cp311-cp311-win32.whl", hash = "sha256:4ee3ee247f795a69e53cd91d927146fb16c4e803c7ac86c84104940c7d2cabf0"}, - {file = "contourpy-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:5caeacc68642e5f19d707471890f037a13007feba8427eb7f2a60811a1fc1350"}, - {file = "contourpy-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd7dc0e6812b799a34f6d12fcb1000539098c249c8da54f3566c6a6461d0dbad"}, - {file = "contourpy-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0f9d350b639db6c2c233d92c7f213d94d2e444d8e8fc5ca44c9706cf72193772"}, - {file = "contourpy-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e96a08b62bb8de960d3a6afbc5ed8421bf1a2d9c85cc4ea73f4bc81b4910500f"}, - {file = "contourpy-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:031154ed61f7328ad7f97662e48660a150ef84ee1bc8876b6472af88bf5a9b98"}, - {file = "contourpy-1.0.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e9ebb4425fc1b658e13bace354c48a933b842d53c458f02c86f371cecbedecc"}, - {file = "contourpy-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efb8f6d08ca7998cf59eaf50c9d60717f29a1a0a09caa46460d33b2924839dbd"}, - {file = "contourpy-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6c180d89a28787e4b73b07e9b0e2dac7741261dbdca95f2b489c4f8f887dd810"}, - {file = "contourpy-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b8d587cc39057d0afd4166083d289bdeff221ac6d3ee5046aef2d480dc4b503c"}, - {file = "contourpy-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:769eef00437edf115e24d87f8926955f00f7704bede656ce605097584f9966dc"}, - {file = "contourpy-1.0.7-cp38-cp38-win32.whl", hash = "sha256:62398c80ef57589bdbe1eb8537127321c1abcfdf8c5f14f479dbbe27d0322e66"}, - {file = "contourpy-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:57119b0116e3f408acbdccf9eb6ef19d7fe7baf0d1e9aaa5381489bc1aa56556"}, - {file = "contourpy-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:30676ca45084ee61e9c3da589042c24a57592e375d4b138bd84d8709893a1ba4"}, - {file = "contourpy-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e927b3868bd1e12acee7cc8f3747d815b4ab3e445a28d2e5373a7f4a6e76ba1"}, - {file = "contourpy-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:366a0cf0fc079af5204801786ad7a1c007714ee3909e364dbac1729f5b0849e5"}, - {file = "contourpy-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ba9bb365446a22411f0673abf6ee1fea3b2cf47b37533b970904880ceb72f3"}, - {file = "contourpy-1.0.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b0bf0c30d432278793d2141362ac853859e87de0a7dee24a1cea35231f0d50"}, - {file = "contourpy-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7281244c99fd7c6f27c1c6bfafba878517b0b62925a09b586d88ce750a016d2"}, - {file = "contourpy-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b6d0f9e1d39dbfb3977f9dd79f156c86eb03e57a7face96f199e02b18e58d32a"}, - {file = "contourpy-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7f6979d20ee5693a1057ab53e043adffa1e7418d734c1532e2d9e915b08d8ec2"}, - {file = "contourpy-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5dd34c1ae752515318224cba7fc62b53130c45ac6a1040c8b7c1a223c46e8967"}, - {file = "contourpy-1.0.7-cp39-cp39-win32.whl", hash = "sha256:c5210e5d5117e9aec8c47d9156d1d3835570dd909a899171b9535cb4a3f32693"}, - {file = "contourpy-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:60835badb5ed5f4e194a6f21c09283dd6e007664a86101431bf870d9e86266c4"}, - {file = "contourpy-1.0.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ce41676b3d0dd16dbcfabcc1dc46090aaf4688fd6e819ef343dbda5a57ef0161"}, - {file = "contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a011cf354107b47c58ea932d13b04d93c6d1d69b8b6dce885e642531f847566"}, - {file = "contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31a55dccc8426e71817e3fe09b37d6d48ae40aae4ecbc8c7ad59d6893569c436"}, - {file = "contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69f8ff4db108815addd900a74df665e135dbbd6547a8a69333a68e1f6e368ac2"}, - {file = "contourpy-1.0.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efe99298ba37e37787f6a2ea868265465410822f7bea163edcc1bd3903354ea9"}, - {file = "contourpy-1.0.7-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a1e97b86f73715e8670ef45292d7cc033548266f07d54e2183ecb3c87598888f"}, - {file = "contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc331c13902d0f50845099434cd936d49d7a2ca76cb654b39691974cb1e4812d"}, - {file = "contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24847601071f740837aefb730e01bd169fbcaa610209779a78db7ebb6e6a7051"}, - {file = "contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abf298af1e7ad44eeb93501e40eb5a67abbf93b5d90e468d01fc0c4451971afa"}, - {file = "contourpy-1.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:64757f6460fc55d7e16ed4f1de193f362104285c667c112b50a804d482777edd"}, - {file = "contourpy-1.0.7.tar.gz", hash = "sha256:d8165a088d31798b59e91117d1f5fc3df8168d8b48c4acc10fc0df0d0bdbcc5e"}, -] - -[package.dependencies] -numpy = ">=1.16" - -[package.extras] -bokeh = ["bokeh", "chromedriver", "selenium"] -docs = ["furo", "sphinx-copybutton"] -mypy = ["contourpy[bokeh]", "docutils-stubs", "mypy (==0.991)", "types-Pillow"] -test = ["Pillow", "matplotlib", "pytest"] -test-no-images = ["pytest"] - -[[package]] -name = "cycler" -version = "0.11.0" -description = "Composable style cycles" -optional = false -python-versions = ">=3.6" -files = [ - {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, - {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, -] - -[[package]] -name = "cython" -version = "0.29.34" -description = "The Cython compiler for writing C extensions for the Python language." -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "Cython-0.29.34-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:742544024ddb74314e2d597accdb747ed76bd126e61fcf49940a5b5be0a8f381"}, - {file = "Cython-0.29.34-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03daae07f8cbf797506446adae512c3dd86e7f27a62a541fa1ee254baf43e32c"}, - {file = "Cython-0.29.34-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5a8de3e793a576e40ca9b4f5518610cd416273c7dc5e254115656b6e4ec70663"}, - {file = "Cython-0.29.34-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:60969d38e6a456a67e7ef8ae20668eff54e32ba439d4068ccf2854a44275a30f"}, - {file = "Cython-0.29.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:21b88200620d80cfe193d199b259cdad2b9af56f916f0f7f474b5a3631ca0caa"}, - {file = "Cython-0.29.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:308c8f1e58bf5e6e8a1c4dcf8abbd2d13d0f9b1e582f4d9ae8b89857342d8bb5"}, - {file = "Cython-0.29.34-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:d8f822fb6ecd5d88c42136561f82960612421154fc5bf23c57103a367bb91356"}, - {file = "Cython-0.29.34-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56866323f1660cecb4d5ff3a1fba92a56b91b7cfae0a8253777aa4bdb3bdf9a8"}, - {file = "Cython-0.29.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e971db8aeb12e7c0697cefafe65eefcc33ff1224ae3d8c7f83346cbc42c6c270"}, - {file = "Cython-0.29.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4401270b0dc464c23671e2e9d52a60985f988318febaf51b047190e855bbe7d"}, - {file = "Cython-0.29.34-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:dce0a36d163c05ae8b21200059511217d79b47baf2b7b0f926e8367bd7a3cc24"}, - {file = "Cython-0.29.34-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dbd79221869ee9a6ccc4953b2c8838bb6ae08ab4d50ea4b60d7894f03739417b"}, - {file = "Cython-0.29.34-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0f4229df10bc4545ebbeaaf96ebb706011d8b333e54ed202beb03f2bee0a50e"}, - {file = "Cython-0.29.34-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fd1ea21f1cebf33ae288caa0f3e9b5563a709f4df8925d53bad99be693fc0d9b"}, - {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d7ef5f68f4c5baa93349ea54a352f8716d18bee9a37f3e93eff38a5d4e9b7262"}, - {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:459994d1de0f99bb18fad9f2325f760c4b392b1324aef37bcc1cd94922dfce41"}, - {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:1d6c809e2f9ce5950bbc52a1d2352ef3d4fc56186b64cb0d50c8c5a3c1d17661"}, - {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f674ceb5f722d364395f180fbac273072fc1a266aab924acc9cfd5afc645aae1"}, - {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9489de5b2044dcdfd9d6ca8242a02d560137b3c41b1f5ae1c4f6707d66d6e44d"}, - {file = "Cython-0.29.34-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:5c121dc185040f4333bfded68963b4529698e1b6d994da56be32c97a90c896b6"}, - {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b6149f7cc5b31bccb158c5b968e5a8d374fdc629792e7b928a9b66e08b03fca5"}, - {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0ab3cbf3d62b0354631a45dc93cfcdf79098663b1c65a6033af4a452b52217a7"}, - {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:4a2723447d1334484681d5aede34184f2da66317891f94b80e693a2f96a8f1a7"}, - {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e40cf86aadc29ecd1cb6de67b0d9488705865deea4fc185c7ad56d7a6fc78703"}, - {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8c3cd8bb8e880a3346f5685601004d96e0a2221e73edcaeea57ea848618b4ac6"}, - {file = "Cython-0.29.34-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9032cd650b0cb1d2c2ef2623f5714c14d14c28d7647d589c3eeed0baf7428e"}, - {file = "Cython-0.29.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bdb3285660e3068438791ace7dd7b1efd6b442a10b5c8d7a4f0c9d184d08c8ed"}, - {file = "Cython-0.29.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a8ad755f9364e720f10a36734a1c7a5ced5c679446718b589259261438a517c9"}, - {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:7595d29eaee95633dd8060f50f0e54b27472d01587659557ebcfe39da3ea946b"}, - {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e6ef7879668214d80ea3914c17e7d4e1ebf4242e0dd4dabe95ca5ccbe75589a5"}, - {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ccb223b5f0fd95d8d27561efc0c14502c0945f1a32274835831efa5d5baddfc1"}, - {file = "Cython-0.29.34-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:11b1b278b8edef215caaa5250ad65a10023bfa0b5a93c776552248fc6f60098d"}, - {file = "Cython-0.29.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5718319a01489688fdd22ddebb8e2fcbbd60be5f30de4336ea7063c3ae29fbe5"}, - {file = "Cython-0.29.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:cfb2302ef617d647ee590a4c0a00ba3c2da05f301dcefe7721125565d2e51351"}, - {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:67b850cf46b861bc27226d31e1d87c0e69869a02f8d3cc5d5bef549764029879"}, - {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0963266dad685812c1dbb758fcd4de78290e3adc7db271c8664dcde27380b13e"}, - {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7879992487d9060a61393eeefe00d299210256928dce44d887b6be313d342bac"}, - {file = "Cython-0.29.34-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:44733366f1604b0c327613b6918469284878d2f5084297d10d26072fc6948d51"}, - {file = "Cython-0.29.34-py2.py3-none-any.whl", hash = "sha256:be4f6b7be75a201c290c8611c0978549c60353890204573078e865423dbe3c83"}, - {file = "Cython-0.29.34.tar.gz", hash = "sha256:1909688f5d7b521a60c396d20bba9e47a1b2d2784bfb085401e1e1e7d29a29a8"}, -] - -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - -[[package]] -name = "entrypoints" -version = "0.4" -description = "Discover and load entry points from installed packages." -optional = false -python-versions = ">=3.6" -files = [ - {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, - {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"}, -] - -[[package]] -name = "fairseq" -version = "0.12.2" -description = "Facebook AI Research Sequence-to-Sequence Toolkit" -optional = false -python-versions = "*" -files = [ - {file = "fairseq-0.12.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fe65b07c5121b7cda0c7a17166994a6b0059259ce37881b6daa117b8c209b662"}, - {file = "fairseq-0.12.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0543905012e39f00bd8c3f3781d9f49e76ab309801eb2eb7de250f5984df0de3"}, - {file = "fairseq-0.12.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4877d65346797fc580a3a7e6e2364d2331a0026ef099c22eb8311441e49c2c6"}, - {file = "fairseq-0.12.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:26454f334ca705c67f898846dff34e14c148fcdaf53b4f52d64209773b509347"}, - {file = "fairseq-0.12.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3b8c8b6dc368d2fd23a06ff613a2af05959eee275fe90846d7cffef4a43c522a"}, - {file = "fairseq-0.12.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:08fa308c760f995cdc13d9c385e2b9d923a78b48275d8b4d78f3a854c71a8f29"}, - {file = "fairseq-0.12.2.tar.gz", hash = "sha256:34f1b18426bf3844714534162f065ab733e049597476daa35fffb4d06a92b524"}, -] - -[package.dependencies] -bitarray = "*" -cffi = "*" -cython = "*" -hydra-core = ">=1.0.7,<1.1" -numpy = {version = "*", markers = "python_version >= \"3.7\""} -omegaconf = "<2.1" -regex = "*" -sacrebleu = ">=1.4.12" -torch = "*" -torchaudio = ">=0.8.0" -tqdm = "*" - -[[package]] -name = "faiss-cpu" -version = "1.7.3" -description = "A library for efficient similarity search and clustering of dense vectors." -optional = false -python-versions = "*" -files = [ - {file = "faiss-cpu-1.7.3.tar.gz", hash = "sha256:cb71fe3f2934732d157d9d8cfb6ed2dd4020a0065571c84842ff6a3f0beab310"}, - {file = "faiss_cpu-1.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:343f025e0846239d987d0c719772387ad685b74e5ef62b2e5616cabef9062729"}, - {file = "faiss_cpu-1.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8b7b1cf693d7c24b5a633ff024717bd715fec501af4854357da0805b4899bcec"}, - {file = "faiss_cpu-1.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c37e5fc0a266839844798a53dd42dd6afbee0c5905611f3f278297053fccbd7"}, - {file = "faiss_cpu-1.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0628f7b0c6263ef4431995bb4f5f39833f999e96e6663935cbf0a1f2243dc4ac"}, - {file = "faiss_cpu-1.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:e22d1887c617156a673665c913ee82a30bfc1a3bc939ba8500b61328bce5a625"}, - {file = "faiss_cpu-1.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6d411449a5f3c3abfcafadaac3190ab1ab206023fc9110da86649506dcbe8a27"}, - {file = "faiss_cpu-1.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a10ea8622908f9f9ca4003e66da809dfad4af5c7d9fb7f582722d703bbc6c8bd"}, - {file = "faiss_cpu-1.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c5ced43ae058a62f63b12194ec9aa4c34066b0ea813ecbd936c65b7d52848c8"}, - {file = "faiss_cpu-1.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3df6371012248dea8e9509949e2d2c6d73dea7c1bdaa4ba4563eb1c3cd8021a6"}, - {file = "faiss_cpu-1.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:8b6ff7854c3f46104718c6b34e81cd48c156d970dd87703c5122ca90217bb8dc"}, - {file = "faiss_cpu-1.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ab6314a8fbcce11dc3ecb6f48dda8c4ec274ed11c1f336f599f480bf0561442c"}, - {file = "faiss_cpu-1.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:877c0bbf4c4a1806d88e091aba4c91ff3fa35c3ede5663b7fafc5b39247a369e"}, - {file = "faiss_cpu-1.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f199be10d30ecc6ed65350931006eca01b7bb8faa27d63069318eea0f6a0c1"}, - {file = "faiss_cpu-1.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1ca2b7cdbfdcc6a2e8fa75a09594916b50ec8260913ca48334dc3ce797179b5f"}, - {file = "faiss_cpu-1.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7b3f91856c19cfb8464178bab7e8ea94a391f6947b556be6754f9fc10b3c25fb"}, - {file = "faiss_cpu-1.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a238a0ef4d36c614d6f60e1ea308288b3920091638a3687f708de6071d007c1"}, - {file = "faiss_cpu-1.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af53bee502c629eaaaf8b5ec648484a726be0fd2768ad4ef2bd4b829384b2682"}, - {file = "faiss_cpu-1.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:441d1c305595d925138f2cde63dabe8c10ee05fc8ad66bf750e278a7e8c409bd"}, - {file = "faiss_cpu-1.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:2766cc14b9004c1aae3b3943e693c3a9566eb1a25168b681981f9048276fe1e7"}, - {file = "faiss_cpu-1.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20ef191bb6164c8e794b11d20427568a75d15980b6d66732071e9aa57ea06e2d"}, - {file = "faiss_cpu-1.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c57c293c4682066955626c2a2956be9a3b92594f69ed1a33abd72260a6911b69"}, - {file = "faiss_cpu-1.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd128170446ff3c3e28d89e813d32cd04f17fa3025794778a01a0d81524275dc"}, - {file = "faiss_cpu-1.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a14d832b5361ce9af21977eb1dcdebe23b9edcc12aad40316df7ca1bd86bc6b5"}, - {file = "faiss_cpu-1.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:52df8895c5e59d1c9eda368a63790381a6f7fceddb22bed08f9c90a706d8a148"}, -] - -[[package]] -name = "fastapi" -version = "0.95.2" -description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" -optional = false -python-versions = ">=3.7" -files = [ - {file = "fastapi-0.95.2-py3-none-any.whl", hash = "sha256:d374dbc4ef2ad9b803899bd3360d34c534adc574546e25314ab72c0c4411749f"}, - {file = "fastapi-0.95.2.tar.gz", hash = "sha256:4d9d3e8c71c73f11874bcf5e33626258d143252e329a01002f767306c64fb982"}, -] - -[package.dependencies] -pydantic = ">=1.6.2,<1.7 || >1.7,<1.7.1 || >1.7.1,<1.7.2 || >1.7.2,<1.7.3 || >1.7.3,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0" -starlette = ">=0.27.0,<0.28.0" - -[package.extras] -all = ["email-validator (>=1.1.1)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] -dev = ["pre-commit (>=2.17.0,<3.0.0)", "ruff (==0.0.138)", "uvicorn[standard] (>=0.12.0,<0.21.0)"] -doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-markdownextradata-plugin (>=0.1.7,<0.3.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pyyaml (>=5.3.1,<7.0.0)", "typer-cli (>=0.0.13,<0.0.14)", "typer[all] (>=0.6.1,<0.8.0)"] -test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==23.1.0)", "coverage[toml] (>=6.5.0,<8.0)", "databases[sqlite] (>=0.3.2,<0.7.0)", "email-validator (>=1.1.1,<2.0.0)", "flask (>=1.1.2,<3.0.0)", "httpx (>=0.23.0,<0.24.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.982)", "orjson (>=3.2.1,<4.0.0)", "passlib[bcrypt] (>=1.7.2,<2.0.0)", "peewee (>=3.13.3,<4.0.0)", "pytest (>=7.1.3,<8.0.0)", "python-jose[cryptography] (>=3.3.0,<4.0.0)", "python-multipart (>=0.0.5,<0.0.7)", "pyyaml (>=5.3.1,<7.0.0)", "ruff (==0.0.138)", "sqlalchemy (>=1.3.18,<1.4.43)", "types-orjson (==3.6.2)", "types-ujson (==5.7.0.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,<6.0.0)"] - -[[package]] -name = "ffmpeg-python" -version = "0.2.0" -description = "Python bindings for FFmpeg - with complex filtering support" -optional = false -python-versions = "*" -files = [ - {file = "ffmpeg-python-0.2.0.tar.gz", hash = "sha256:65225db34627c578ef0e11c8b1eb528bb35e024752f6f10b78c011f6f64c4127"}, - {file = "ffmpeg_python-0.2.0-py3-none-any.whl", hash = "sha256:ac441a0404e053f8b6a1113a77c0f452f1cfc62f6344a769475ffdc0f56c23c5"}, -] - -[package.dependencies] -future = "*" - -[package.extras] -dev = ["Sphinx (==2.1.0)", "future (==0.17.1)", "numpy (==1.16.4)", "pytest (==4.6.1)", "pytest-mock (==1.10.4)", "tox (==3.12.1)"] - -[[package]] -name = "ffmpy" -version = "0.3.0" -description = "A simple Python wrapper for ffmpeg" -optional = false -python-versions = "*" -files = [ - {file = "ffmpy-0.3.0.tar.gz", hash = "sha256:757591581eee25b4a50ac9ffb9b58035a2794533db47e0512f53fb2d7b6f9adc"}, -] - -[[package]] -name = "filelock" -version = "3.10.7" -description = "A platform independent file lock." -optional = false -python-versions = ">=3.7" -files = [ - {file = "filelock-3.10.7-py3-none-any.whl", hash = "sha256:bde48477b15fde2c7e5a0713cbe72721cb5a5ad32ee0b8f419907960b9d75536"}, - {file = "filelock-3.10.7.tar.gz", hash = "sha256:892be14aa8efc01673b5ed6589dbccb95f9a8596f0507e232626155495c18105"}, -] - -[package.extras] -docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.2.2)", "diff-cover (>=7.5)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"] - -[[package]] -name = "fonttools" -version = "4.39.3" -description = "Tools to manipulate font files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fonttools-4.39.3-py3-none-any.whl", hash = "sha256:64c0c05c337f826183637570ac5ab49ee220eec66cf50248e8df527edfa95aeb"}, - {file = "fonttools-4.39.3.zip", hash = "sha256:9234b9f57b74e31b192c3fc32ef1a40750a8fbc1cd9837a7b7bfc4ca4a5c51d7"}, -] - -[package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"] -graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "scipy"] -lxml = ["lxml (>=4.0,<5)"] -pathops = ["skia-pathops (>=0.5.0)"] -plot = ["matplotlib"] -repacker = ["uharfbuzz (>=0.23.0)"] -symfont = ["sympy"] -type1 = ["xattr"] -ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.0.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] - -[[package]] -name = "frozenlist" -version = "1.3.3" -description = "A list-like structure which implements collections.abc.MutableSequence" -optional = false -python-versions = ">=3.7" -files = [ - {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"}, - {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"}, - {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"}, - {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"}, - {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"}, - {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"}, - {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"}, - {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"}, - {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"}, - {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"}, - {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"}, - {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"}, - {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"}, - {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"}, - {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, -] - -[[package]] -name = "fsspec" -version = "2023.3.0" -description = "File-system specification" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fsspec-2023.3.0-py3-none-any.whl", hash = "sha256:bf57215e19dbfa4fe7edae53040cc1deef825e3b1605cca9a8d2c2fadd2328a0"}, - {file = "fsspec-2023.3.0.tar.gz", hash = "sha256:24e635549a590d74c6c18274ddd3ffab4753341753e923408b1904eaabafe04d"}, -] - -[package.extras] -abfs = ["adlfs"] -adl = ["adlfs"] -arrow = ["pyarrow (>=1)"] -dask = ["dask", "distributed"] -dropbox = ["dropbox", "dropboxdrivefs", "requests"] -fuse = ["fusepy"] -gcs = ["gcsfs"] -git = ["pygit2"] -github = ["requests"] -gs = ["gcsfs"] -gui = ["panel"] -hdfs = ["pyarrow (>=1)"] -http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] -libarchive = ["libarchive-c"] -oci = ["ocifs"] -s3 = ["s3fs"] -sftp = ["paramiko"] -smb = ["smbprotocol"] -ssh = ["paramiko"] -tqdm = ["tqdm"] - -[[package]] -name = "functorch" -version = "2.0.0" -description = "JAX-like composable function transforms for PyTorch" -optional = false -python-versions = "*" -files = [ - {file = "functorch-2.0.0-py2.py3-none-any.whl", hash = "sha256:ca21ace6b9048e2ec6d132fa0fd18c776eb165ca1c91ef7e3584fdc668eaa4ea"}, -] - -[package.dependencies] -torch = ">=2.0,<2.1" - -[package.extras] -aot = ["networkx"] - -[[package]] -name = "future" -version = "0.18.3" -description = "Clean single-source support for Python 3 and 2" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "future-0.18.3.tar.gz", hash = "sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307"}, -] - -[[package]] -name = "google-auth" -version = "2.17.1" -description = "Google Authentication Library" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*" -files = [ - {file = "google-auth-2.17.1.tar.gz", hash = "sha256:8f379b46bad381ad2a0b989dfb0c13ad28d3c2a79f27348213f8946a1d15d55a"}, - {file = "google_auth-2.17.1-py2.py3-none-any.whl", hash = "sha256:357ff22a75b4c0f6093470f21816a825d2adee398177569824e37b6c10069e19"}, -] - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = {version = ">=3.1.4,<5", markers = "python_version >= \"3.6\""} -six = ">=1.9.0" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "requests (>=2.20.0,<3.0.0dev)"] -enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0dev)"] - -[[package]] -name = "google-auth-oauthlib" -version = "1.0.0" -description = "Google Authentication Library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "google-auth-oauthlib-1.0.0.tar.gz", hash = "sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5"}, - {file = "google_auth_oauthlib-1.0.0-py2.py3-none-any.whl", hash = "sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb"}, -] - -[package.dependencies] -google-auth = ">=2.15.0" -requests-oauthlib = ">=0.7.0" - -[package.extras] -tool = ["click (>=6.0.0)"] - -[[package]] -name = "gradio" -version = "3.34.0" -description = "Python library for easily interacting with trained machine learning models" -optional = false -python-versions = ">=3.7" -files = [ - {file = "gradio-3.34.0-py3-none-any.whl", hash = "sha256:1cd8b25b598d983561d64f0a039af819382f1376c676aa9f84972c46b6875741"}, - {file = "gradio-3.34.0.tar.gz", hash = "sha256:fd7fa7257ffc749f9dc7c297eba554eaa1e5acd1a5f9c973250b2080932d6a41"}, -] - -[package.dependencies] -aiofiles = "*" -aiohttp = "*" -altair = ">=4.2.0" -fastapi = "*" -ffmpy = "*" -gradio-client = ">=0.2.6" -httpx = "*" -huggingface-hub = ">=0.14.0" -jinja2 = "*" -markdown-it-py = {version = ">=2.0.0", extras = ["linkify"]} -markupsafe = "*" -matplotlib = "*" -mdit-py-plugins = "<=0.3.3" -numpy = "*" -orjson = "*" -pandas = "*" -pillow = "*" -pydantic = "*" -pydub = "*" -pygments = ">=2.12.0" -python-multipart = "*" -pyyaml = "*" -requests = "*" -semantic-version = "*" -typing-extensions = "*" -uvicorn = ">=0.14.0" -websockets = ">=10.0" - -[[package]] -name = "gradio-client" -version = "0.2.7" -description = "Python library for easily interacting with trained machine learning models" -optional = false -python-versions = ">=3.8" -files = [ - {file = "gradio_client-0.2.7-py3-none-any.whl", hash = "sha256:4a7ec6bb1341c626051f1ed24d50cb960ff1a4cd1a5db031dd4caaf1ee7d2d0a"}, - {file = "gradio_client-0.2.7.tar.gz", hash = "sha256:c83008df8a1dd3f81a290c0a24c03d0ab70317741991b60f713620ed39ad8f12"}, -] - -[package.dependencies] -fsspec = "*" -httpx = "*" -huggingface-hub = ">=0.13.0" -packaging = "*" -requests = "*" -typing-extensions = "*" -websockets = "*" - -[[package]] -name = "grpcio" -version = "1.53.0" -description = "HTTP/2-based RPC framework" -optional = false -python-versions = ">=3.7" -files = [ - {file = "grpcio-1.53.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:752d2949b40e12e6ad3ed8cc552a65b54d226504f6b1fb67cab2ccee502cc06f"}, - {file = "grpcio-1.53.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:8a48fd3a7222be226bb86b7b413ad248f17f3101a524018cdc4562eeae1eb2a3"}, - {file = "grpcio-1.53.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:f3e837d29f0e1b9d6e7b29d569e2e9b0da61889e41879832ea15569c251c303a"}, - {file = "grpcio-1.53.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aef7d30242409c3aa5839b501e877e453a2c8d3759ca8230dd5a21cda029f046"}, - {file = "grpcio-1.53.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6f90698b5d1c5dd7b3236cd1fa959d7b80e17923f918d5be020b65f1c78b173"}, - {file = "grpcio-1.53.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a96c3c7f564b263c5d7c0e49a337166c8611e89c4c919f66dba7b9a84abad137"}, - {file = "grpcio-1.53.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ee81349411648d1abc94095c68cd25e3c2812e4e0367f9a9355be1e804a5135c"}, - {file = "grpcio-1.53.0-cp310-cp310-win32.whl", hash = "sha256:fdc6191587de410a184550d4143e2b24a14df495c86ca15e59508710681690ac"}, - {file = "grpcio-1.53.0-cp310-cp310-win_amd64.whl", hash = "sha256:658ffe1e39171be00490db5bd3b966f79634ac4215a1eb9a85c6cd6783bf7f6e"}, - {file = "grpcio-1.53.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:1b172e6d497191940c4b8d75b53de82dc252e15b61de2951d577ec5b43316b29"}, - {file = "grpcio-1.53.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:82434ba3a5935e47908bc861ce1ebc43c2edfc1001d235d6e31e5d3ed55815f7"}, - {file = "grpcio-1.53.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:1c734a2d4843e4e14ececf5600c3c4750990ec319e1299db7e4f0d02c25c1467"}, - {file = "grpcio-1.53.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a2ead3de3b2d53119d473aa2f224030257ef33af1e4ddabd4afee1dea5f04c"}, - {file = "grpcio-1.53.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a34d6e905f071f9b945cabbcc776e2055de1fdb59cd13683d9aa0a8f265b5bf9"}, - {file = "grpcio-1.53.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eaf8e3b97caaf9415227a3c6ca5aa8d800fecadd526538d2bf8f11af783f1550"}, - {file = "grpcio-1.53.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:da95778d37be8e4e9afca771a83424f892296f5dfb2a100eda2571a1d8bbc0dc"}, - {file = "grpcio-1.53.0-cp311-cp311-win32.whl", hash = "sha256:e4f513d63df6336fd84b74b701f17d1bb3b64e9d78a6ed5b5e8a198bbbe8bbfa"}, - {file = "grpcio-1.53.0-cp311-cp311-win_amd64.whl", hash = "sha256:ddb2511fbbb440ed9e5c9a4b9b870f2ed649b7715859fd6f2ebc585ee85c0364"}, - {file = "grpcio-1.53.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:2a912397eb8d23c177d6d64e3c8bc46b8a1c7680b090d9f13a640b104aaec77c"}, - {file = "grpcio-1.53.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:55930c56b8f5b347d6c8c609cc341949a97e176c90f5cbb01d148d778f3bbd23"}, - {file = "grpcio-1.53.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:6601d812105583948ab9c6e403a7e2dba6e387cc678c010e74f2d6d589d1d1b3"}, - {file = "grpcio-1.53.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c705e0c21acb0e8478a00e7e773ad0ecdb34bd0e4adc282d3d2f51ba3961aac7"}, - {file = "grpcio-1.53.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba074af9ca268ad7b05d3fc2b920b5fb3c083da94ab63637aaf67f4f71ecb755"}, - {file = "grpcio-1.53.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:14817de09317dd7d3fbc8272864288320739973ef0f4b56bf2c0032349da8cdf"}, - {file = "grpcio-1.53.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c7ad9fbedb93f331c2e9054e202e95cf825b885811f1bcbbdfdc301e451442db"}, - {file = "grpcio-1.53.0-cp37-cp37m-win_amd64.whl", hash = "sha256:dad5b302a4c21c604d88a5d441973f320134e6ff6a84ecef9c1139e5ffd466f6"}, - {file = "grpcio-1.53.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fa8eaac75d3107e3f5465f2c9e3bbd13db21790c6e45b7de1756eba16b050aca"}, - {file = "grpcio-1.53.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:104a2210edd3776c38448b4f76c2f16e527adafbde171fc72a8a32976c20abc7"}, - {file = "grpcio-1.53.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:dbc1ba968639c1d23476f75c356e549e7bbf2d8d6688717dcab5290e88e8482b"}, - {file = "grpcio-1.53.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:95952d3fe795b06af29bb8ec7bbf3342cdd867fc17b77cc25e6733d23fa6c519"}, - {file = "grpcio-1.53.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f144a790f14c51b8a8e591eb5af40507ffee45ea6b818c2482f0457fec2e1a2e"}, - {file = "grpcio-1.53.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0698c094688a2dd4c7c2f2c0e3e142cac439a64d1cef6904c97f6cde38ba422f"}, - {file = "grpcio-1.53.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6b6d60b0958be711bab047e9f4df5dbbc40367955f8651232bfdcdd21450b9ab"}, - {file = "grpcio-1.53.0-cp38-cp38-win32.whl", hash = "sha256:1948539ce78805d4e6256ab0e048ec793956d54787dc9d6777df71c1d19c7f81"}, - {file = "grpcio-1.53.0-cp38-cp38-win_amd64.whl", hash = "sha256:df9ba1183b3f649210788cf80c239041dddcb375d6142d8bccafcfdf549522cd"}, - {file = "grpcio-1.53.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:19caa5b7282a89b799e63776ff602bb39604f7ca98db6df27e2de06756ae86c3"}, - {file = "grpcio-1.53.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:b5bd026ac928c96cc23149e6ef79183125542062eb6d1ccec34c0a37e02255e7"}, - {file = "grpcio-1.53.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:7dc8584ca6c015ad82e186e82f4c0fe977394588f66b8ecfc4ec873285314619"}, - {file = "grpcio-1.53.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2eddaae8af625e45b5c8500dcca1043264d751a6872cde2eda5022df8a336959"}, - {file = "grpcio-1.53.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5fb6f3d7824696c1c9f2ad36ddb080ba5a86f2d929ef712d511b4d9972d3d27"}, - {file = "grpcio-1.53.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8270d1dc2c98ab57e6dbf36fa187db8df4c036f04a398e5d5e25b4e01a766d70"}, - {file = "grpcio-1.53.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:976a7f24eb213e8429cab78d5e120500dfcdeb01041f1f5a77b17b9101902615"}, - {file = "grpcio-1.53.0-cp39-cp39-win32.whl", hash = "sha256:9c84a481451e7174f3a764a44150f93b041ab51045aa33d7b5b68b6979114e48"}, - {file = "grpcio-1.53.0-cp39-cp39-win_amd64.whl", hash = "sha256:6beb84f83360ff29a3654f43f251ec11b809dcb5524b698d711550243debd289"}, - {file = "grpcio-1.53.0.tar.gz", hash = "sha256:a4952899b4931a6ba12951f9a141ef3e74ff8a6ec9aa2dc602afa40f63595e33"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.53.0)"] - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "0.16.3" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.7" -files = [ - {file = "httpcore-0.16.3-py3-none-any.whl", hash = "sha256:da1fb708784a938aa084bde4feb8317056c55037247c787bd7e19eb2c2949dc0"}, - {file = "httpcore-0.16.3.tar.gz", hash = "sha256:c5d6f04e2fc530f39e0c077e6a30caa53f1451096120f1f38b954afd0b17c0cb"}, -] - -[package.dependencies] -anyio = ">=3.0,<5.0" -certifi = "*" -h11 = ">=0.13,<0.15" -sniffio = "==1.*" - -[package.extras] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] - -[[package]] -name = "httpx" -version = "0.23.3" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.7" -files = [ - {file = "httpx-0.23.3-py3-none-any.whl", hash = "sha256:a211fcce9b1254ea24f0cd6af9869b3d29aba40154e947d2a07bb499b3e310d6"}, - {file = "httpx-0.23.3.tar.gz", hash = "sha256:9818458eb565bb54898ccb9b8b251a28785dd4a55afbc23d0eb410754fe7d0f9"}, -] - -[package.dependencies] -certifi = "*" -httpcore = ">=0.15.0,<0.17.0" -rfc3986 = {version = ">=1.3,<2", extras = ["idna2008"]} -sniffio = "*" - -[package.extras] -brotli = ["brotli", "brotlicffi"] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<13)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] - -[[package]] -name = "huggingface-hub" -version = "0.15.1" -description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"}, - {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"}, -] - -[package.dependencies] -filelock = "*" -fsspec = "*" -packaging = ">=20.9" -pyyaml = ">=5.1" -requests = "*" -tqdm = ">=4.42.1" -typing-extensions = ">=3.7.4.3" - -[package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] -cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] -fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"] -tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["torch"] -typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] - -[[package]] -name = "hydra-core" -version = "1.0.7" -description = "A framework for elegantly configuring complex applications" -optional = false -python-versions = "*" -files = [ - {file = "hydra-core-1.0.7.tar.gz", hash = "sha256:58cc3f7531995b6d8de162ca21f936e17bdaebd4d1e8614d63c32e17c2e41e45"}, - {file = "hydra_core-1.0.7-py3-none-any.whl", hash = "sha256:e800c6deb8309395508094851fa93bc13408f2285261eb97e626d37193b58a9f"}, -] - -[package.dependencies] -antlr4-python3-runtime = "4.8" -importlib-resources = {version = "*", markers = "python_version < \"3.9\""} -omegaconf = ">=2.0.5,<2.1" - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "importlib-metadata" -version = "6.1.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"}, - {file = "importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"}, -] - -[package.dependencies] -zipp = ">=0.5" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] - -[[package]] -name = "importlib-resources" -version = "5.12.0" -description = "Read resources from Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, - {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, -] - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[[package]] -name = "jinja2" -version = "3.1.2" -description = "A very fast and expressive template engine." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "joblib" -version = "1.2.0" -description = "Lightweight pipelining with Python functions" -optional = false -python-versions = ">=3.7" -files = [ - {file = "joblib-1.2.0-py3-none-any.whl", hash = "sha256:091138ed78f800342968c523bdde947e7a305b8594b910a0fea2ab83c3c6d385"}, - {file = "joblib-1.2.0.tar.gz", hash = "sha256:e1cee4a79e4af22881164f218d4311f60074197fb707e082e803b61f6d137018"}, -] - -[[package]] -name = "json5" -version = "0.9.11" -description = "A Python implementation of the JSON5 data format." -optional = false -python-versions = "*" -files = [ - {file = "json5-0.9.11-py2.py3-none-any.whl", hash = "sha256:1aa54b80b5e507dfe31d12b7743a642e2ffa6f70bf73b8e3d7d1d5fba83d99bd"}, - {file = "json5-0.9.11.tar.gz", hash = "sha256:4f1e196acc55b83985a51318489f345963c7ba84aa37607e49073066c562e99b"}, -] - -[package.extras] -dev = ["hypothesis"] - -[[package]] -name = "jsonschema" -version = "4.17.3" -description = "An implementation of JSON Schema validation for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, - {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, -] - -[package.dependencies] -attrs = ">=17.4.0" -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} -pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} -pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" - -[package.extras] -format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] - -[[package]] -name = "kiwisolver" -version = "1.4.4" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.7" -files = [ - {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"}, - {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"}, - {file = "kiwisolver-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3"}, - {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938"}, - {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d"}, - {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09"}, - {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de"}, - {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32"}, - {file = "kiwisolver-1.4.4-cp310-cp310-win32.whl", hash = "sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408"}, - {file = "kiwisolver-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004"}, - {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6"}, - {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2"}, - {file = "kiwisolver-1.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca"}, - {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69"}, - {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514"}, - {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494"}, - {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5"}, - {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51"}, - {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da"}, - {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4"}, - {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626"}, - {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750"}, - {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4"}, - {file = "kiwisolver-1.4.4-cp311-cp311-win32.whl", hash = "sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e"}, - {file = "kiwisolver-1.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-win32.whl", hash = "sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3"}, - {file = "kiwisolver-1.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166"}, - {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454"}, - {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0"}, - {file = "kiwisolver-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c"}, - {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae"}, - {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0"}, - {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1"}, - {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d"}, - {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c"}, - {file = "kiwisolver-1.4.4-cp38-cp38-win32.whl", hash = "sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191"}, - {file = "kiwisolver-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766"}, - {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8"}, - {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897"}, - {file = "kiwisolver-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824"}, - {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29"}, - {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f"}, - {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd"}, - {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac"}, - {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9"}, - {file = "kiwisolver-1.4.4-cp39-cp39-win32.whl", hash = "sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea"}, - {file = "kiwisolver-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b"}, - {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a"}, - {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d"}, - {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a"}, - {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871"}, - {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9"}, - {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8"}, - {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286"}, - {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb"}, - {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f"}, - {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008"}, - {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767"}, - {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9"}, - {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2"}, - {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b"}, - {file = "kiwisolver-1.4.4.tar.gz", hash = "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955"}, -] - -[[package]] -name = "librosa" -version = "0.9.2" -description = "Python module for audio and music processing" -optional = false -python-versions = ">=3.6" -files = [ - {file = "librosa-0.9.2-py3-none-any.whl", hash = "sha256:322a813e6d37af9fbc369e6a637dcf5fdc5c6925ce806a0d27c68de61a81350f"}, - {file = "librosa-0.9.2.tar.gz", hash = "sha256:5b576b5efdce428e90bc988bdd5a953d12a727e5f931f30d74c53b63abbe3c89"}, -] - -[package.dependencies] -audioread = ">=2.1.9" -decorator = ">=4.0.10" -joblib = ">=0.14" -numba = ">=0.45.1" -numpy = ">=1.17.0" -packaging = ">=20.0" -pooch = ">=1.0" -resampy = ">=0.2.2" -scikit-learn = ">=0.19.1" -scipy = ">=1.2.0" -soundfile = ">=0.10.2" - -[package.extras] -display = ["matplotlib (>=3.3.0)"] -docs = ["ipython (>=7.0)", "matplotlib (>=3.3.0)", "mir-eval (>=0.5)", "numba (<0.50)", "numpydoc", "presets", "sphinx (!=1.3.1)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (==1.*)", "sphinxcontrib-svg2pdfconverter"] -tests = ["contextlib2", "matplotlib (>=3.3.0)", "pytest", "pytest-cov", "pytest-mpl", "samplerate", "soxr"] - -[[package]] -name = "linkify-it-py" -version = "2.0.0" -description = "Links recognition library with FULL unicode support." -optional = false -python-versions = ">=3.6" -files = [ - {file = "linkify-it-py-2.0.0.tar.gz", hash = "sha256:476464480906bed8b2fa3813bf55566282e55214ad7e41b7d1c2b564666caf2f"}, - {file = "linkify_it_py-2.0.0-py3-none-any.whl", hash = "sha256:1bff43823e24e507a099e328fc54696124423dd6320c75a9da45b4b754b748ad"}, -] - -[package.dependencies] -uc-micro-py = "*" - -[package.extras] -benchmark = ["pytest", "pytest-benchmark"] -dev = ["black", "flake8", "isort", "pre-commit"] -doc = ["myst-parser", "sphinx", "sphinx-book-theme"] -test = ["coverage", "pytest", "pytest-cov"] - -[[package]] -name = "lit" -version = "16.0.0" -description = "A Software Testing Tool" -optional = false -python-versions = "*" -files = [ - {file = "lit-16.0.0.tar.gz", hash = "sha256:3c4ac372122a1de4a88deb277b956f91b7209420a0bef683b1ab2d2b16dabe11"}, -] - -[[package]] -name = "llvmlite" -version = "0.39.0" -description = "lightweight wrapper around basic LLVM functionality" -optional = false -python-versions = ">=3.7" -files = [ - {file = "llvmlite-0.39.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:589f08a1b1920e6004735819ce9aafdd85d030d4a231c1e7adaca9360724b1ed"}, - {file = "llvmlite-0.39.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:44a9a5cbe76db8ba01a5f6fa21649d91aa8a2634cc6f3a60291797e42e67d79e"}, - {file = "llvmlite-0.39.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74d89f2ec4734d3e200fb90ea0b3ca5e9be40f3b3e50eb368ca9002ed5b3e4f8"}, - {file = "llvmlite-0.39.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8b4cb4f433b48792f02ec4ab619b86b145689302a3088a3f3853f50df6c2559d"}, - {file = "llvmlite-0.39.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35db4122182cc5112912a3ec94a3c18eab9a990bd588bfda8445087c1b748563"}, - {file = "llvmlite-0.39.0-cp310-cp310-win32.whl", hash = "sha256:c00bf7a8dc56b4b3618c65b67e75046410f751512871d9e23919cf1feb1007b2"}, - {file = "llvmlite-0.39.0-cp310-cp310-win_amd64.whl", hash = "sha256:72bd2e5db9790344ec39cef77098486635853829ecb0e66e6fa516488ff6dd9e"}, - {file = "llvmlite-0.39.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:53c7c608baffdcdc2213926f4e3600036d4048aed08d6209b9f76a5439e529d6"}, - {file = "llvmlite-0.39.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3bbd23e42593f85a842614d8ddb2b2943630e4c4c8418ea0d8cf1dce9f2fa7a"}, - {file = "llvmlite-0.39.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d733eb9c02bb8b01373228a1339901b1e50be4581105239c6052b9573ddb9298"}, - {file = "llvmlite-0.39.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f575fcb9bebe5bcbe20373c56ad3ebf63bae0e27d3c22c1a4dc27fa4666d0324"}, - {file = "llvmlite-0.39.0-cp37-cp37m-win32.whl", hash = "sha256:5ca4ea962da6ec3b007bedab17065781803d71159b03435f24ce6845cf3d1c66"}, - {file = "llvmlite-0.39.0-cp37-cp37m-win_amd64.whl", hash = "sha256:8e461608135859ac40e39211d9c63a1ce35176513f6b8be87efb554d4af3a388"}, - {file = "llvmlite-0.39.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:62a11b8e9e5fc4783d94da45d94c5a047ce6ccc4c112ae5f764109e9405fcc2c"}, - {file = "llvmlite-0.39.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9272b7e344d12b36dafeb6911054eff32d2a9be7256a2866f0c09d08f945e17f"}, - {file = "llvmlite-0.39.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3df59a7c2b60764fb9eeaf9c442d757eca1f3e87298d4f88849203667528581e"}, - {file = "llvmlite-0.39.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cfd6688efd0f551168dd8626f386464aef25663268a2400c0f6a089b97a73dc"}, - {file = "llvmlite-0.39.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7292b394956749e51ae3d51a2085932a0e3261108b35eda61d702c1b977102c"}, - {file = "llvmlite-0.39.0-cp38-cp38-win32.whl", hash = "sha256:f8e9463a7d0152994b6f7d630012297bb160db237ad9ca8e75c8dceef7a747cf"}, - {file = "llvmlite-0.39.0-cp38-cp38-win_amd64.whl", hash = "sha256:8d8149fdaab40ae48ea4ec816ae2ae5d36d664795e1b1dfb911fc2c62bc73184"}, - {file = "llvmlite-0.39.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0929e3c26bcafb53545c77bcf7020b943dcefcf8d7d3010f414384458f805cc1"}, - {file = "llvmlite-0.39.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:56ea23c6bbcd25a7c050a26b6effe836a575a33183744cbc28fb21358b3801f8"}, - {file = "llvmlite-0.39.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82d605c5d6c8df96fe19bc3a61c934580e24cafa694cbf79cb227cdc0e426a"}, - {file = "llvmlite-0.39.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7f7a7278ba6d75533be46abc3d9e242030ab017f0016dd081b55f821cc03be9"}, - {file = "llvmlite-0.39.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56ccfe736a12aef2e39450a22e4c027eee4f488c5786c81d0b18ff8a6cf52531"}, - {file = "llvmlite-0.39.0-cp39-cp39-win32.whl", hash = "sha256:0706abf522dc510ddc818f5c9e1cdae521a1416d3c399bbfc4827813379f0164"}, - {file = "llvmlite-0.39.0-cp39-cp39-win_amd64.whl", hash = "sha256:d4a8199263859b97f174035e39297e770617d3497fac44fe738f74ce9c51d22b"}, - {file = "llvmlite-0.39.0.tar.gz", hash = "sha256:01098be54f1aa25e391cebba8ea71cd1533f8cd1f50e34c7dd7540c2560a93af"}, -] - -[[package]] -name = "lxml" -version = "4.9.2" -description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" -files = [ - {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"}, - {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"}, - {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"}, - {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"}, - {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"}, - {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"}, - {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"}, - {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"}, - {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"}, - {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"}, - {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"}, - {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"}, - {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"}, - {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"}, - {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"}, - {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"}, - {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"}, - {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"}, - {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"}, - {file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"}, - {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"}, - {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"}, - {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"}, - {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"}, - {file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"}, - {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"}, - {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"}, - {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"}, - {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"}, - {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"}, - {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"}, - {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"}, - {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"}, - {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"}, - {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"}, - {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"}, - {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"}, - {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"}, - {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"}, - {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"}, - {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"}, - {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"}, - {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"}, -] - -[package.extras] -cssselect = ["cssselect (>=0.7)"] -html5 = ["html5lib"] -htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=0.29.7)"] - -[[package]] -name = "markdown" -version = "3.4.3" -description = "Python implementation of John Gruber's Markdown." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Markdown-3.4.3-py3-none-any.whl", hash = "sha256:065fd4df22da73a625f14890dd77eb8040edcbd68794bcd35943be14490608b2"}, - {file = "Markdown-3.4.3.tar.gz", hash = "sha256:8bf101198e004dc93e84a12a7395e31aac6a9c9942848ae1d99b9d72cf9b3520"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} - -[package.extras] -testing = ["coverage", "pyyaml"] - -[[package]] -name = "markdown-it-py" -version = "2.2.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.7" -files = [ - {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, - {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, -] - -[package.dependencies] -linkify-it-py = {version = ">=1,<3", optional = true, markers = "extra == \"linkify\""} -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "markupsafe" -version = "2.1.2" -description = "Safely add untrusted strings to HTML/XML markup." -optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, - {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, -] - -[[package]] -name = "matplotlib" -version = "3.7.1" -description = "Python plotting package" -optional = false -python-versions = ">=3.8" -files = [ - {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:95cbc13c1fc6844ab8812a525bbc237fa1470863ff3dace7352e910519e194b1"}, - {file = "matplotlib-3.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:08308bae9e91aca1ec6fd6dda66237eef9f6294ddb17f0d0b3c863169bf82353"}, - {file = "matplotlib-3.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:544764ba51900da4639c0f983b323d288f94f65f4024dc40ecb1542d74dc0500"}, - {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56d94989191de3fcc4e002f93f7f1be5da476385dde410ddafbb70686acf00ea"}, - {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99bc9e65901bb9a7ce5e7bb24af03675cbd7c70b30ac670aa263240635999a4"}, - {file = "matplotlib-3.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb7d248c34a341cd4c31a06fd34d64306624c8cd8d0def7abb08792a5abfd556"}, - {file = "matplotlib-3.7.1-cp310-cp310-win32.whl", hash = "sha256:ce463ce590f3825b52e9fe5c19a3c6a69fd7675a39d589e8b5fbe772272b3a24"}, - {file = "matplotlib-3.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d7bc90727351fb841e4d8ae620d2d86d8ed92b50473cd2b42ce9186104ecbba"}, - {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:770a205966d641627fd5cf9d3cb4b6280a716522cd36b8b284a8eb1581310f61"}, - {file = "matplotlib-3.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f67bfdb83a8232cb7a92b869f9355d677bce24485c460b19d01970b64b2ed476"}, - {file = "matplotlib-3.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2bf092f9210e105f414a043b92af583c98f50050559616930d884387d0772aba"}, - {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89768d84187f31717349c6bfadc0e0d8c321e8eb34522acec8a67b1236a66332"}, - {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83111e6388dec67822e2534e13b243cc644c7494a4bb60584edbff91585a83c6"}, - {file = "matplotlib-3.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a867bf73a7eb808ef2afbca03bcdb785dae09595fbe550e1bab0cd023eba3de0"}, - {file = "matplotlib-3.7.1-cp311-cp311-win32.whl", hash = "sha256:fbdeeb58c0cf0595efe89c05c224e0a502d1aa6a8696e68a73c3efc6bc354304"}, - {file = "matplotlib-3.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:c0bd19c72ae53e6ab979f0ac6a3fafceb02d2ecafa023c5cca47acd934d10be7"}, - {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6eb88d87cb2c49af00d3bbc33a003f89fd9f78d318848da029383bfc08ecfbfb"}, - {file = "matplotlib-3.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:cf0e4f727534b7b1457898c4f4ae838af1ef87c359b76dcd5330fa31893a3ac7"}, - {file = "matplotlib-3.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:46a561d23b91f30bccfd25429c3c706afe7d73a5cc64ef2dfaf2b2ac47c1a5dc"}, - {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8704726d33e9aa8a6d5215044b8d00804561971163563e6e6591f9dcf64340cc"}, - {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4cf327e98ecf08fcbb82685acaf1939d3338548620ab8dfa02828706402c34de"}, - {file = "matplotlib-3.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:617f14ae9d53292ece33f45cba8503494ee199a75b44de7717964f70637a36aa"}, - {file = "matplotlib-3.7.1-cp38-cp38-win32.whl", hash = "sha256:7c9a4b2da6fac77bcc41b1ea95fadb314e92508bf5493ceff058e727e7ecf5b0"}, - {file = "matplotlib-3.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:14645aad967684e92fc349493fa10c08a6da514b3d03a5931a1bac26e6792bd1"}, - {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:81a6b377ea444336538638d31fdb39af6be1a043ca5e343fe18d0f17e098770b"}, - {file = "matplotlib-3.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:28506a03bd7f3fe59cd3cd4ceb2a8d8a2b1db41afede01f66c42561b9be7b4b7"}, - {file = "matplotlib-3.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8c587963b85ce41e0a8af53b9b2de8dddbf5ece4c34553f7bd9d066148dc719c"}, - {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8bf26ade3ff0f27668989d98c8435ce9327d24cffb7f07d24ef609e33d582439"}, - {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:def58098f96a05f90af7e92fd127d21a287068202aa43b2a93476170ebd99e87"}, - {file = "matplotlib-3.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f883a22a56a84dba3b588696a2b8a1ab0d2c3d41be53264115c71b0a942d8fdb"}, - {file = "matplotlib-3.7.1-cp39-cp39-win32.whl", hash = "sha256:4f99e1b234c30c1e9714610eb0c6d2f11809c9c78c984a613ae539ea2ad2eb4b"}, - {file = "matplotlib-3.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:3ba2af245e36990facf67fde840a760128ddd71210b2ab6406e640188d69d136"}, - {file = "matplotlib-3.7.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3032884084f541163f295db8a6536e0abb0db464008fadca6c98aaf84ccf4717"}, - {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a2cb34336110e0ed8bb4f650e817eed61fa064acbefeb3591f1b33e3a84fd96"}, - {file = "matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b867e2f952ed592237a1828f027d332d8ee219ad722345b79a001f49df0936eb"}, - {file = "matplotlib-3.7.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:57bfb8c8ea253be947ccb2bc2d1bb3862c2bccc662ad1b4626e1f5e004557042"}, - {file = "matplotlib-3.7.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:438196cdf5dc8d39b50a45cb6e3f6274edbcf2254f85fa9b895bf85851c3a613"}, - {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21e9cff1a58d42e74d01153360de92b326708fb205250150018a52c70f43c290"}, - {file = "matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d4725d70b7c03e082bbb8a34639ede17f333d7247f56caceb3801cb6ff703d"}, - {file = "matplotlib-3.7.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:97cc368a7268141afb5690760921765ed34867ffb9655dd325ed207af85c7529"}, - {file = "matplotlib-3.7.1.tar.gz", hash = "sha256:7b73305f25eab4541bd7ee0b96d87e53ae9c9f1823be5659b806cd85786fe882"}, -] - -[package.dependencies] -contourpy = ">=1.0.1" -cycler = ">=0.10" -fonttools = ">=4.22.0" -importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} -kiwisolver = ">=1.0.1" -numpy = ">=1.20" -packaging = ">=20.0" -pillow = ">=6.2.0" -pyparsing = ">=2.3.1" -python-dateutil = ">=2.7" - -[[package]] -name = "matplotlib-inline" -version = "0.1.6" -description = "Inline Matplotlib backend for Jupyter" -optional = false -python-versions = ">=3.5" -files = [ - {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, - {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, -] - -[package.dependencies] -traitlets = "*" - -[[package]] -name = "mdit-py-plugins" -version = "0.3.3" -description = "Collection of plugins for markdown-it-py" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdit-py-plugins-0.3.3.tar.gz", hash = "sha256:5cfd7e7ac582a594e23ba6546a2f406e94e42eb33ae596d0734781261c251260"}, - {file = "mdit_py_plugins-0.3.3-py3-none-any.whl", hash = "sha256:36d08a29def19ec43acdcd8ba471d3ebab132e7879d442760d963f19913e04b9"}, -] - -[package.dependencies] -markdown-it-py = ">=1.0.0,<3.0.0" - -[package.extras] -code-style = ["pre-commit"] -rtd = ["attrs", "myst-parser (>=0.16.1,<0.17.0)", "sphinx-book-theme (>=0.1.0,<0.2.0)"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - -[[package]] -name = "mpmath" -version = "1.3.0" -description = "Python library for arbitrary-precision floating-point arithmetic" -optional = false -python-versions = "*" -files = [ - {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, - {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, -] - -[package.extras] -develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] -docs = ["sphinx"] -gmpy = ["gmpy2 (>=2.1.0a4)"] -tests = ["pytest (>=4.6)"] - -[[package]] -name = "multidict" -version = "6.0.4" -description = "multidict implementation" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, - {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, - {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, - {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, - {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, - {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, - {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, - {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, - {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, - {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, - {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, - {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, - {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, -] - -[[package]] -name = "networkx" -version = "3.1" -description = "Python package for creating and manipulating graphs and networks" -optional = false -python-versions = ">=3.8" -files = [ - {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, - {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, -] - -[package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] - -[[package]] -name = "numba" -version = "0.56.4" -description = "compiling Python code using LLVM" -optional = false -python-versions = ">=3.7" -files = [ - {file = "numba-0.56.4-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:9f62672145f8669ec08762895fe85f4cf0ead08ce3164667f2b94b2f62ab23c3"}, - {file = "numba-0.56.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c602d015478b7958408d788ba00a50272649c5186ea8baa6cf71d4a1c761bba1"}, - {file = "numba-0.56.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:85dbaed7a05ff96492b69a8900c5ba605551afb9b27774f7f10511095451137c"}, - {file = "numba-0.56.4-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f4cfc3a19d1e26448032049c79fc60331b104f694cf570a9e94f4e2c9d0932bb"}, - {file = "numba-0.56.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e08e203b163ace08bad500b0c16f6092b1eb34fd1fce4feaf31a67a3a5ecf3b"}, - {file = "numba-0.56.4-cp310-cp310-win32.whl", hash = "sha256:0611e6d3eebe4cb903f1a836ffdb2bda8d18482bcd0a0dcc56e79e2aa3fefef5"}, - {file = "numba-0.56.4-cp310-cp310-win_amd64.whl", hash = "sha256:fbfb45e7b297749029cb28694abf437a78695a100e7c2033983d69f0ba2698d4"}, - {file = "numba-0.56.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:3cb1a07a082a61df80a468f232e452d818f5ae254b40c26390054e4e868556e0"}, - {file = "numba-0.56.4-cp37-cp37m-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d69ad934e13c15684e7887100a8f5f0f61d7a8e57e0fd29d9993210089a5b531"}, - {file = "numba-0.56.4-cp37-cp37m-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:dbcc847bac2d225265d054993a7f910fda66e73d6662fe7156452cac0325b073"}, - {file = "numba-0.56.4-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8a95ca9cc77ea4571081f6594e08bd272b66060634b8324e99cd1843020364f9"}, - {file = "numba-0.56.4-cp37-cp37m-win32.whl", hash = "sha256:fcdf84ba3ed8124eb7234adfbb8792f311991cbf8aed1cad4b1b1a7ee08380c1"}, - {file = "numba-0.56.4-cp37-cp37m-win_amd64.whl", hash = "sha256:42f9e1be942b215df7e6cc9948cf9c15bb8170acc8286c063a9e57994ef82fd1"}, - {file = "numba-0.56.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:553da2ce74e8862e18a72a209ed3b6d2924403bdd0fb341fa891c6455545ba7c"}, - {file = "numba-0.56.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4373da9757049db7c90591e9ec55a2e97b2b36ba7ae3bf9c956a513374077470"}, - {file = "numba-0.56.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a993349b90569518739009d8f4b523dfedd7e0049e6838c0e17435c3e70dcc4"}, - {file = "numba-0.56.4-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:720886b852a2d62619ae3900fe71f1852c62db4f287d0c275a60219e1643fc04"}, - {file = "numba-0.56.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e64d338b504c9394a4a34942df4627e1e6cb07396ee3b49fe7b8d6420aa5104f"}, - {file = "numba-0.56.4-cp38-cp38-win32.whl", hash = "sha256:03fe94cd31e96185cce2fae005334a8cc712fc2ba7756e52dff8c9400718173f"}, - {file = "numba-0.56.4-cp38-cp38-win_amd64.whl", hash = "sha256:91f021145a8081f881996818474ef737800bcc613ffb1e618a655725a0f9e246"}, - {file = "numba-0.56.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:d0ae9270a7a5cc0ede63cd234b4ff1ce166c7a749b91dbbf45e0000c56d3eade"}, - {file = "numba-0.56.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c75e8a5f810ce80a0cfad6e74ee94f9fde9b40c81312949bf356b7304ef20740"}, - {file = "numba-0.56.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a12ef323c0f2101529d455cfde7f4135eaa147bad17afe10b48634f796d96abd"}, - {file = "numba-0.56.4-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:03634579d10a6129181129de293dd6b5eaabee86881369d24d63f8fe352dd6cb"}, - {file = "numba-0.56.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0240f9026b015e336069329839208ebd70ec34ae5bfbf402e4fcc8e06197528e"}, - {file = "numba-0.56.4-cp39-cp39-win32.whl", hash = "sha256:14dbbabf6ffcd96ee2ac827389afa59a70ffa9f089576500434c34abf9b054a4"}, - {file = "numba-0.56.4-cp39-cp39-win_amd64.whl", hash = "sha256:0da583c532cd72feefd8e551435747e0e0fbb3c0530357e6845fcc11e38d6aea"}, - {file = "numba-0.56.4.tar.gz", hash = "sha256:32d9fef412c81483d7efe0ceb6cf4d3310fde8b624a9cecca00f790573ac96ee"}, -] - -[package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} -llvmlite = "==0.39.*" -numpy = ">=1.18,<1.24" -setuptools = "*" - -[[package]] -name = "numpy" -version = "1.23.5" -description = "NumPy is the fundamental package for array computing with Python." -optional = false -python-versions = ">=3.8" -files = [ - {file = "numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63"}, - {file = "numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d"}, - {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43"}, - {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1"}, - {file = "numpy-1.23.5-cp310-cp310-win32.whl", hash = "sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280"}, - {file = "numpy-1.23.5-cp310-cp310-win_amd64.whl", hash = "sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6"}, - {file = "numpy-1.23.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96"}, - {file = "numpy-1.23.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa"}, - {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2"}, - {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387"}, - {file = "numpy-1.23.5-cp311-cp311-win32.whl", hash = "sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0"}, - {file = "numpy-1.23.5-cp311-cp311-win_amd64.whl", hash = "sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d"}, - {file = "numpy-1.23.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a"}, - {file = "numpy-1.23.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9"}, - {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398"}, - {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb"}, - {file = "numpy-1.23.5-cp38-cp38-win32.whl", hash = "sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07"}, - {file = "numpy-1.23.5-cp38-cp38-win_amd64.whl", hash = "sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e"}, - {file = "numpy-1.23.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f"}, - {file = "numpy-1.23.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de"}, - {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d"}, - {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719"}, - {file = "numpy-1.23.5-cp39-cp39-win32.whl", hash = "sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481"}, - {file = "numpy-1.23.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df"}, - {file = "numpy-1.23.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8"}, - {file = "numpy-1.23.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135"}, - {file = "numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"}, - {file = "numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"}, -] - -[[package]] -name = "nvidia-cublas-cu11" -version = "11.10.3.66" -description = "CUBLAS native runtime libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl", hash = "sha256:d32e4d75f94ddfb93ea0a5dda08389bcc65d8916a25cb9f37ac89edaeed3bded"}, - {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-win_amd64.whl", hash = "sha256:8ac17ba6ade3ed56ab898a036f9ae0756f1e81052a317bf98f8c6d18dc3ae49e"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-cuda-cupti-cu11" -version = "11.7.101" -description = "CUDA profiling tools runtime libs." -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cuda_cupti_cu11-11.7.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:e0cfd9854e1f2edaa36ca20d21cd0bdd5dcfca4e3b9e130a082e05b33b6c5895"}, - {file = "nvidia_cuda_cupti_cu11-11.7.101-py3-none-win_amd64.whl", hash = "sha256:7cc5b8f91ae5e1389c3c0ad8866b3b016a175e827ea8f162a672990a402ab2b0"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-cuda-nvrtc-cu11" -version = "11.7.99" -description = "NVRTC native runtime libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:9f1562822ea264b7e34ed5930567e89242d266448e936b85bc97a3370feabb03"}, - {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:f7d9610d9b7c331fa0da2d1b2858a4a8315e6d49765091d28711c8946e7425e7"}, - {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:f2effeb1309bdd1b3854fc9b17eaf997808f8b25968ce0c7070945c4265d64a3"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-cuda-runtime-cu11" -version = "11.7.99" -description = "CUDA Runtime native Libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:cc768314ae58d2641f07eac350f40f99dcb35719c4faff4bc458a7cd2b119e31"}, - {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:bc77fa59a7679310df9d5c70ab13c4e34c64ae2124dd1efd7e5474b71be125c7"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-cudnn-cu11" -version = "8.5.0.96" -description = "cuDNN runtime libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:402f40adfc6f418f9dae9ab402e773cfed9beae52333f6d86ae3107a1b9527e7"}, - {file = "nvidia_cudnn_cu11-8.5.0.96-py3-none-manylinux1_x86_64.whl", hash = "sha256:71f8111eb830879ff2836db3cccf03bbd735df9b0d17cd93761732ac50a8a108"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-cufft-cu11" -version = "10.9.0.58" -description = "CUFFT native runtime libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux1_x86_64.whl", hash = "sha256:222f9da70c80384632fd6035e4c3f16762d64ea7a843829cb278f98b3cb7dd81"}, - {file = "nvidia_cufft_cu11-10.9.0.58-py3-none-win_amd64.whl", hash = "sha256:c4d316f17c745ec9c728e30409612eaf77a8404c3733cdf6c9c1569634d1ca03"}, -] - -[[package]] -name = "nvidia-curand-cu11" -version = "10.2.10.91" -description = "CURAND native runtime libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_curand_cu11-10.2.10.91-py3-none-manylinux1_x86_64.whl", hash = "sha256:eecb269c970fa599a2660c9232fa46aaccbf90d9170b96c462e13bcb4d129e2c"}, - {file = "nvidia_curand_cu11-10.2.10.91-py3-none-win_amd64.whl", hash = "sha256:f742052af0e1e75523bde18895a9ed016ecf1e5aa0ecddfcc3658fd11a1ff417"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-cusolver-cu11" -version = "11.4.0.1" -description = "CUDA solver native runtime libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cusolver_cu11-11.4.0.1-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:72fa7261d755ed55c0074960df5904b65e2326f7adce364cbe4945063c1be412"}, - {file = "nvidia_cusolver_cu11-11.4.0.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:700b781bfefd57d161443aff9ace1878584b93e0b2cfef3d6e9296d96febbf99"}, - {file = "nvidia_cusolver_cu11-11.4.0.1-py3-none-win_amd64.whl", hash = "sha256:00f70b256add65f8c1eb3b6a65308795a93e7740f6df9e273eccbba770d370c4"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-cusparse-cu11" -version = "11.7.4.91" -description = "CUSPARSE native runtime libraries" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_cusparse_cu11-11.7.4.91-py3-none-manylinux1_x86_64.whl", hash = "sha256:a3389de714db63321aa11fbec3919271f415ef19fda58aed7f2ede488c32733d"}, - {file = "nvidia_cusparse_cu11-11.7.4.91-py3-none-win_amd64.whl", hash = "sha256:304a01599534f5186a8ed1c3756879282c72c118bc77dd890dc1ff868cad25b9"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "nvidia-nccl-cu11" -version = "2.14.3" -description = "NVIDIA Collective Communication Library (NCCL) Runtime" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_nccl_cu11-2.14.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:5e5534257d1284b8e825bc3a182c6f06acd6eb405e9f89d49340e98cd8f136eb"}, -] - -[[package]] -name = "nvidia-nvtx-cu11" -version = "11.7.91" -description = "NVIDIA Tools Extension" -optional = false -python-versions = ">=3" -files = [ - {file = "nvidia_nvtx_cu11-11.7.91-py3-none-manylinux1_x86_64.whl", hash = "sha256:b22c64eee426a62fc00952b507d6d29cf62b4c9df7a480fcc417e540e05fd5ac"}, - {file = "nvidia_nvtx_cu11-11.7.91-py3-none-win_amd64.whl", hash = "sha256:dfd7fcb2a91742513027d63a26b757f38dd8b07fecac282c4d132a9d373ff064"}, -] - -[package.dependencies] -setuptools = "*" -wheel = "*" - -[[package]] -name = "oauthlib" -version = "3.2.2" -description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -optional = false -python-versions = ">=3.6" -files = [ - {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, - {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, -] - -[package.extras] -rsa = ["cryptography (>=3.0.0)"] -signals = ["blinker (>=1.4.0)"] -signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] - -[[package]] -name = "omegaconf" -version = "2.0.6" -description = "A flexible configuration library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "omegaconf-2.0.6-py3-none-any.whl", hash = "sha256:9e349fd76819b95b47aa628edea1ff83fed5b25108608abdd6c7fdca188e302a"}, - {file = "omegaconf-2.0.6.tar.gz", hash = "sha256:92ca535a788d21651bf4c2eaf5c1ca4c7a8003b2dab4a87cbb09109784268806"}, -] - -[package.dependencies] -PyYAML = ">=5.1" -typing-extensions = "*" - -[[package]] -name = "orjson" -version = "3.8.9" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -optional = false -python-versions = ">=3.7" -files = [ - {file = "orjson-3.8.9-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:5d029843eae7b6cbd6468b63517b8b61471afed6572162171d8b6471b6dbf41f"}, - {file = "orjson-3.8.9-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:405933c05490efb209d0f940d8ef1403d2932a97e47010a26d2694e9dd49f84d"}, - {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:183de66eff4d41c330a3006f210ab0bce7affe398da6f6eda9579b67245a34ff"}, - {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb4081fe340ed1df42dddfd055e1d50479cb0ccb976d13e6b5e8667a07fec6f4"}, - {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d11593a2e736055dd7b9587dbf89cd1cbe4a42a70e70f186e51aee7e1b38902e"}, - {file = "orjson-3.8.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e20649359e28f34d01b2570e4650a076f439a959bae3a8bbe7f5923ad80f54e8"}, - {file = "orjson-3.8.9-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c02ece4f36a160c83efe74adfba5f189c7c7702361f02b809ab73744923ee139"}, - {file = "orjson-3.8.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f0e19801836cf1b30f333d475b05d79051b8ae8639a8e2422fb5f64e82676ae7"}, - {file = "orjson-3.8.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d4850fe5650cead3c0f8822192e381cee9d4c3b8162eb082c86c927124572dc6"}, - {file = "orjson-3.8.9-cp310-none-win_amd64.whl", hash = "sha256:5fd4193f260d9d30112b5e379d0870b54dc88040807c93cbe8d67bfea148ba5a"}, - {file = "orjson-3.8.9-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:70eae063ad8d7405dc63873760567b600fc10728ba0da24a69d49c1a5d318d6d"}, - {file = "orjson-3.8.9-cp311-cp311-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:251653437632583d02203e6b118b72b99c04425175853f35340f4bac7034a36e"}, - {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ea833751f017ba321c277e7425b51c0b1a18a2c60f8c9c0f4c6c4d7e16cbd6c"}, - {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8563c2cdeb923b82a5cc5bfc76c28c786777428263ee39292d928e9687165fb4"}, - {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f33e9ea45b4c9457eedca0c40f38cf5732c91b0fb68f091ac59e6ea68e03eb2"}, - {file = "orjson-3.8.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855dee152daecb7de7b4cd7069d7854e11aa291687bffe8433156af0a224417e"}, - {file = "orjson-3.8.9-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:74fa9e02589339defc9d3662de9e7eef51d8f9f3a7f6304b43b18b39d7bbf10f"}, - {file = "orjson-3.8.9-cp311-none-win_amd64.whl", hash = "sha256:6c5b10ba1e62df8f96cbc37f6d5ae9acb3f6475926dea8b1b6a1a60f201a64f7"}, - {file = "orjson-3.8.9-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a651123d01bc399fcd866e56acc2d76512e62aae3673652b13b470ea69faf1f4"}, - {file = "orjson-3.8.9-cp37-cp37m-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:73019b6d2cc998c99556020c6bd8f8bc28420c69583186ca290c66a27916a3b7"}, - {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f5c3daa8b02786ad5f0e14ae16a59bbb4e02cbae3a41989a25188e5a6c962ff"}, - {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:598598b7f81f8fda7c3e09c88165f844152b7be223bc4ea929ec8ad59b00ea17"}, - {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:090b10bdb06baae6d5cd3550d772ecbabd833bfceed7592ff167c0a82f5b4c20"}, - {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd46f688ddf9c2ea10367446fe9bf3ceba0f7490c15b4f96420491c7f00bb283"}, - {file = "orjson-3.8.9-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:b8ed8d780e9fab01bc404a70d755a8b2b34ea6c0b6604b65de135daaaadaf9a9"}, - {file = "orjson-3.8.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8a32c9fb742868a34346f3c52e12d893a9d27f8e0c0bf3c480db7e6903d8be28"}, - {file = "orjson-3.8.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2ba366009b98ac8899e935eff6fef7672d3ea43d3ce9deb3ee33452134b6cc3a"}, - {file = "orjson-3.8.9-cp37-none-win_amd64.whl", hash = "sha256:236b9313425cb2570626c64dd5cb6caff13882d1717d491da542cff228b96e97"}, - {file = "orjson-3.8.9-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e8efc7e9ec35336f7cc98b6692536b1262046ff1d2a545295a4d89b8a2495903"}, - {file = "orjson-3.8.9-cp38-cp38-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:8c7eba3610ae69f4aba4032ecb61b0a6fbd1e4537283d1553eb8c1cb136e9118"}, - {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7742649e4c357d4e7ad483a35ff5f55d519e895de56772cc486913614ee7d23b"}, - {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6566fb8daa538c7848fd6822e2409a7e1c41dae8e65e6536598d505f641a318"}, - {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ce8a2a667221e2e5160021e26b09e9c13eeedafb5cda1981340c8c0c0bc8f9d"}, - {file = "orjson-3.8.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0399631b88fa4868956badef2561fba07dffcaf050bf53959ee50d26edf6f6"}, - {file = "orjson-3.8.9-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:189ccb16ed140a824d133fa1c55175cf0d2207edaade54f1db0456a526cb5fd8"}, - {file = "orjson-3.8.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b707fa4481e1af19b3052ec9352c688bad3f539d7bdd8aa4a451f6dd7e4bae73"}, - {file = "orjson-3.8.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c3d988eb562da1dda7d49e9abd8a64b3cabc632b4299d177fb9e0c0ca9f06b8c"}, - {file = "orjson-3.8.9-cp38-none-win_amd64.whl", hash = "sha256:b30240eb6b22daab604f1595f6aacf92bcdac0d29e2d7ad507dfac68d2b39182"}, - {file = "orjson-3.8.9-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:81869a6de00bc676d10056fa8bb28cbe805b1cf498a45c14cb7b1765eee33fcb"}, - {file = "orjson-3.8.9-cp39-cp39-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:a25a5a215b19d414de8d416a3c5414f29165843a06f704cc0345ded9eac34ac1"}, - {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dec0f2bea52e30ea98ce095f1f42da04535791f9a31b2aab2499caa88307bc49"}, - {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b91d88fe96b698b28bb1b95b1fce226f72757ab3ab7d8d97551e23bc629c84f"}, - {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7629841ccdcccd3c43ebc6a4165abe9844909fcedb2041994c0153470f610801"}, - {file = "orjson-3.8.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d875b304e19f4b2758d233bbf2b9d627c66fac50b3150b8d31a35ba6cda3db67"}, - {file = "orjson-3.8.9-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:723ec880c5290fe4de330febb8030e57c1978fbd624fc5b9399969e7d7d74984"}, - {file = "orjson-3.8.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b11f8a71c82d19fce11ce487efeec2ca0dc3bcf5b4564445fecfc68d9c268744"}, - {file = "orjson-3.8.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b2079bf86dec62731c1b90fdfea3211f993f0c894d9261e0ce9b68ed9c9dfbec"}, - {file = "orjson-3.8.9-cp39-none-win_amd64.whl", hash = "sha256:97d94322a2eaab767ba8d52f6bf9d0ec0f35313fe36287be6e6085dd65d55d37"}, - {file = "orjson-3.8.9.tar.gz", hash = "sha256:c40bece58c11cb09aff17424d21b41f6f767d2b1252b2f745ec3ff29cce6a240"}, -] - -[[package]] -name = "packaging" -version = "23.0" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, - {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, -] - -[[package]] -name = "pandas" -version = "2.0.0" -description = "Powerful data structures for data analysis, time series, and statistics" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pandas-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbb2c5e94d6aa4e632646a3bacd05c2a871c3aa3e85c9bec9be99cb1267279f2"}, - {file = "pandas-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5337c87c4e963f97becb1217965b6b75c6fe5f54c4cf09b9a5ac52fc0bd03d3"}, - {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ded51f7e3dd9b4f8b87f2ceb7bd1a8df2491f7ee72f7074c6927a512607199e"}, - {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c858de9e9fc422d25e67e1592a6e6135d7bcf9a19fcaf4d0831a0be496bf21"}, - {file = "pandas-2.0.0-cp310-cp310-win32.whl", hash = "sha256:2d1d138848dd71b37e3cbe7cd952ff84e2ab04d8988972166e18567dcc811245"}, - {file = "pandas-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:d08e41d96bc4de6f500afe80936c68fce6099d5a434e2af7c7fd8e7c72a3265d"}, - {file = "pandas-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:24472cfc7ced511ac90608728b88312be56edc8f19b9ed885a7d2e47ffaf69c0"}, - {file = "pandas-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ffb14f50c74ee541610668137830bb93e9dfa319b1bef2cedf2814cd5ac9c70"}, - {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c24c7d12d033a372a9daf9ff2c80f8b0af6f98d14664dbb0a4f6a029094928a7"}, - {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8318de0f886e4dcb8f9f36e45a3d6a6c3d1cfdc508354da85e739090f0222991"}, - {file = "pandas-2.0.0-cp311-cp311-win32.whl", hash = "sha256:57c34b79c13249505e850d0377b722961b99140f81dafbe6f19ef10239f6284a"}, - {file = "pandas-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f987ec26e96a8490909bc5d98c514147236e49830cba7df8690f6087c12bbae"}, - {file = "pandas-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b3ba8f5dd470d8bfbc4259829589f4a32881151c49e36384d9eb982b35a12020"}, - {file = "pandas-2.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcd471c9d9f60926ab2f15c6c29164112f458acb42280365fbefa542d0c2fc74"}, - {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9253edfd015520ce77a9343eb7097429479c039cd3ebe81d7810ea11b4b24695"}, - {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977326039bd1ded620001a1889e2ed4798460a6bc5a24fbaebb5f07a41c32a55"}, - {file = "pandas-2.0.0-cp38-cp38-win32.whl", hash = "sha256:78425ca12314b23356c28b16765639db10ebb7d8983f705d6759ff7fe41357fa"}, - {file = "pandas-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:d93b7fcfd9f3328072b250d6d001dcfeec5d3bb66c1b9c8941e109a46c0c01a8"}, - {file = "pandas-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:425705cee8be54db2504e8dd2a730684790b15e5904b750c367611ede49098ab"}, - {file = "pandas-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4f789b7c012a608c08cda4ff0872fd979cb18907a37982abe884e6f529b8793"}, - {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bb9d840bf15656805f6a3d87eea9dcb7efdf1314a82adcf7f00b820427c5570"}, - {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0778ab54c8f399d83d98ffb674d11ec716449956bc6f6821891ab835848687f2"}, - {file = "pandas-2.0.0-cp39-cp39-win32.whl", hash = "sha256:70db5c278bbec0306d32bf78751ff56b9594c05a5098386f6c8a563659124f91"}, - {file = "pandas-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f3320bb55f34af4193020158ef8118ee0fb9aec7cc47d2084dbfdd868a0a24f"}, - {file = "pandas-2.0.0.tar.gz", hash = "sha256:cda9789e61b44463c1c4fe17ef755de77bcd13b09ba31c940d20f193d63a5dc8"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.20.3", markers = "python_version < \"3.10\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, - {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, -] -python-dateutil = ">=2.8.2" -pytz = ">=2020.1" -tzdata = ">=2022.1" - -[package.extras] -all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] -aws = ["s3fs (>=2021.08.0)"] -clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] -compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] -computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] -feather = ["pyarrow (>=7.0.0)"] -fss = ["fsspec (>=2021.07.0)"] -gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] -hdf5 = ["tables (>=3.6.1)"] -html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] -mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] -parquet = ["pyarrow (>=7.0.0)"] -performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] -plot = ["matplotlib (>=3.6.1)"] -postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] -spss = ["pyreadstat (>=1.1.2)"] -sql-other = ["SQLAlchemy (>=1.4.16)"] -test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.6.3)"] - -[[package]] -name = "pillow" -version = "9.3.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"}, - {file = "Pillow-9.3.0-1-cp37-cp37m-win_amd64.whl", hash = "sha256:32a44128c4bdca7f31de5be641187367fe2a450ad83b833ef78910397db491aa"}, - {file = "Pillow-9.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:0b7257127d646ff8676ec8a15520013a698d1fdc48bc2a79ba4e53df792526f2"}, - {file = "Pillow-9.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b90f7616ea170e92820775ed47e136208e04c967271c9ef615b6fbd08d9af0e3"}, - {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68943d632f1f9e3dce98908e873b3a090f6cba1cbb1b892a9e8d97c938871fbe"}, - {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be55f8457cd1eac957af0c3f5ece7bc3f033f89b114ef30f710882717670b2a8"}, - {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d77adcd56a42d00cc1be30843d3426aa4e660cab4a61021dc84467123f7a00c"}, - {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:829f97c8e258593b9daa80638aee3789b7df9da5cf1336035016d76f03b8860c"}, - {file = "Pillow-9.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:801ec82e4188e935c7f5e22e006d01611d6b41661bba9fe45b60e7ac1a8f84de"}, - {file = "Pillow-9.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:871b72c3643e516db4ecf20efe735deb27fe30ca17800e661d769faab45a18d7"}, - {file = "Pillow-9.3.0-cp310-cp310-win32.whl", hash = "sha256:655a83b0058ba47c7c52e4e2df5ecf484c1b0b0349805896dd350cbc416bdd91"}, - {file = "Pillow-9.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:9f47eabcd2ded7698106b05c2c338672d16a6f2a485e74481f524e2a23c2794b"}, - {file = "Pillow-9.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:57751894f6618fd4308ed8e0c36c333e2f5469744c34729a27532b3db106ee20"}, - {file = "Pillow-9.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7db8b751ad307d7cf238f02101e8e36a128a6cb199326e867d1398067381bff4"}, - {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3033fbe1feb1b59394615a1cafaee85e49d01b51d54de0cbf6aa8e64182518a1"}, - {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22b012ea2d065fd163ca096f4e37e47cd8b59cf4b0fd47bfca6abb93df70b34c"}, - {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a65733d103311331875c1dca05cb4606997fd33d6acfed695b1232ba1df193"}, - {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:502526a2cbfa431d9fc2a079bdd9061a2397b842bb6bc4239bb176da00993812"}, - {file = "Pillow-9.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:90fb88843d3902fe7c9586d439d1e8c05258f41da473952aa8b328d8b907498c"}, - {file = "Pillow-9.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89dca0ce00a2b49024df6325925555d406b14aa3efc2f752dbb5940c52c56b11"}, - {file = "Pillow-9.3.0-cp311-cp311-win32.whl", hash = "sha256:3168434d303babf495d4ba58fc22d6604f6e2afb97adc6a423e917dab828939c"}, - {file = "Pillow-9.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:18498994b29e1cf86d505edcb7edbe814d133d2232d256db8c7a8ceb34d18cef"}, - {file = "Pillow-9.3.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:772a91fc0e03eaf922c63badeca75e91baa80fe2f5f87bdaed4280662aad25c9"}, - {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa4107d1b306cdf8953edde0534562607fe8811b6c4d9a486298ad31de733b2"}, - {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4012d06c846dc2b80651b120e2cdd787b013deb39c09f407727ba90015c684f"}, - {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77ec3e7be99629898c9a6d24a09de089fa5356ee408cdffffe62d67bb75fdd72"}, - {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6c738585d7a9961d8c2821a1eb3dcb978d14e238be3d70f0a706f7fa9316946b"}, - {file = "Pillow-9.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:828989c45c245518065a110434246c44a56a8b2b2f6347d1409c787e6e4651ee"}, - {file = "Pillow-9.3.0-cp37-cp37m-win32.whl", hash = "sha256:82409ffe29d70fd733ff3c1025a602abb3e67405d41b9403b00b01debc4c9a29"}, - {file = "Pillow-9.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41e0051336807468be450d52b8edd12ac60bebaa97fe10c8b660f116e50b30e4"}, - {file = "Pillow-9.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b03ae6f1a1878233ac620c98f3459f79fd77c7e3c2b20d460284e1fb370557d4"}, - {file = "Pillow-9.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4390e9ce199fc1951fcfa65795f239a8a4944117b5935a9317fb320e7767b40f"}, - {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40e1ce476a7804b0fb74bcfa80b0a2206ea6a882938eaba917f7a0f004b42502"}, - {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a06a052c5f37b4ed81c613a455a81f9a3a69429b4fd7bb913c3fa98abefc20"}, - {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040"}, - {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:15c42fb9dea42465dfd902fb0ecf584b8848ceb28b41ee2b58f866411be33f07"}, - {file = "Pillow-9.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e0e543a33ed92db9f5ef69a0356e0b1a7a6b6a71b80df99f1d181ae5875636"}, - {file = "Pillow-9.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3dd6caf940756101205dffc5367babf288a30043d35f80936f9bfb37f8355b32"}, - {file = "Pillow-9.3.0-cp38-cp38-win32.whl", hash = "sha256:f1ff2ee69f10f13a9596480335f406dd1f70c3650349e2be67ca3139280cade0"}, - {file = "Pillow-9.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:276a5ca930c913f714e372b2591a22c4bd3b81a418c0f6635ba832daec1cbcfc"}, - {file = "Pillow-9.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:73bd195e43f3fadecfc50c682f5055ec32ee2c933243cafbfdec69ab1aa87cad"}, - {file = "Pillow-9.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c7c8ae3864846fc95f4611c78129301e203aaa2af813b703c55d10cc1628535"}, - {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0918e03aa0c72ea56edbb00d4d664294815aa11291a11504a377ea018330d3"}, - {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0915e734b33a474d76c28e07292f196cdf2a590a0d25bcc06e64e545f2d146c"}, - {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0372acb5d3598f36ec0914deed2a63f6bcdb7b606da04dc19a88d31bf0c05b"}, - {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ad58d27a5b0262c0c19b47d54c5802db9b34d38bbf886665b626aff83c74bacd"}, - {file = "Pillow-9.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:97aabc5c50312afa5e0a2b07c17d4ac5e865b250986f8afe2b02d772567a380c"}, - {file = "Pillow-9.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9aaa107275d8527e9d6e7670b64aabaaa36e5b6bd71a1015ddd21da0d4e06448"}, - {file = "Pillow-9.3.0-cp39-cp39-win32.whl", hash = "sha256:bac18ab8d2d1e6b4ce25e3424f709aceef668347db8637c2296bcf41acb7cf48"}, - {file = "Pillow-9.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b472b5ea442148d1c3e2209f20f1e0bb0eb556538690fa70b5e1f79fa0ba8dc2"}, - {file = "Pillow-9.3.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ab388aaa3f6ce52ac1cb8e122c4bd46657c15905904b3120a6248b5b8b0bc228"}, - {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbb8e7f2abee51cef77673be97760abff1674ed32847ce04b4af90f610144c7b"}, - {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca31dd6014cb8b0b2db1e46081b0ca7d936f856da3b39744aef499db5d84d02"}, - {file = "Pillow-9.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c7025dce65566eb6e89f56c9509d4f628fddcedb131d9465cacd3d8bac337e7e"}, - {file = "Pillow-9.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ebf2029c1f464c59b8bdbe5143c79fa2045a581ac53679733d3a91d400ff9efb"}, - {file = "Pillow-9.3.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b59430236b8e58840a0dfb4099a0e8717ffb779c952426a69ae435ca1f57210c"}, - {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12ce4932caf2ddf3e41d17fc9c02d67126935a44b86df6a206cf0d7161548627"}, - {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae5331c23ce118c53b172fa64a4c037eb83c9165aba3a7ba9ddd3ec9fa64a699"}, - {file = "Pillow-9.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0b07fffc13f474264c336298d1b4ce01d9c5a011415b79d4ee5527bb69ae6f65"}, - {file = "Pillow-9.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8"}, - {file = "Pillow-9.3.0.tar.gz", hash = "sha256:c935a22a557a560108d780f9a0fc426dd7459940dc54faa49d83249c8d3e760f"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "pkgutil-resolve-name" -version = "1.3.10" -description = "Resolve a name to an object." -optional = false -python-versions = ">=3.6" -files = [ - {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, - {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, -] - -[[package]] -name = "platformdirs" -version = "3.2.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.2.0-py3-none-any.whl", hash = "sha256:ebe11c0d7a805086e99506aa331612429a72ca7cd52a1f0d277dc4adc20cb10e"}, - {file = "platformdirs-3.2.0.tar.gz", hash = "sha256:d5b638ca397f25f979350ff789db335903d7ea010ab28903f57b27e1b16c2b08"}, -] - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] - -[[package]] -name = "pooch" -version = "1.7.0" -description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pooch-1.7.0-py3-none-any.whl", hash = "sha256:74258224fc33d58f53113cf955e8d51bf01386b91492927d0d1b6b341a765ad7"}, - {file = "pooch-1.7.0.tar.gz", hash = "sha256:f174a1041b6447f0eef8860f76d17f60ed2f857dc0efa387a7f08228af05d998"}, -] - -[package.dependencies] -packaging = ">=20.0" -platformdirs = ">=2.5.0" -requests = ">=2.19.0" - -[package.extras] -progress = ["tqdm (>=4.41.0,<5.0.0)"] -sftp = ["paramiko (>=2.7.0)"] -xxhash = ["xxhash (>=1.4.3)"] - -[[package]] -name = "portalocker" -version = "2.7.0" -description = "Wraps the portalocker recipe for easy usage" -optional = false -python-versions = ">=3.5" -files = [ - {file = "portalocker-2.7.0-py2.py3-none-any.whl", hash = "sha256:a07c5b4f3985c3cf4798369631fb7011adb498e2a46d8440efc75a8f29a0f983"}, - {file = "portalocker-2.7.0.tar.gz", hash = "sha256:032e81d534a88ec1736d03f780ba073f047a06c478b06e2937486f334e955c51"}, -] - -[package.dependencies] -pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} - -[package.extras] -docs = ["sphinx (>=1.7.1)"] -redis = ["redis"] -tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)"] - -[[package]] -name = "praat-parselmouth" -version = "0.4.3" -description = "Praat in Python, the Pythonic way" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" -files = [ - {file = "praat-parselmouth-0.4.3.tar.gz", hash = "sha256:93538d0ba06444b68d18b793efb436b0d645c62c0397c4977c1d27b679aee168"}, - {file = "praat_parselmouth-0.4.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:124925f3e40a6d626d65789d449bdabe43078528efbee6f3a1df6e67db60c971"}, - {file = "praat_parselmouth-0.4.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0d3023d9b625c6b0a3cbe8a4f09cc23f666f9b9df40c59e33c4c9ca5b8ea1dac"}, - {file = "praat_parselmouth-0.4.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:6841b9d9d2a614382cf186311610d663f0170ba20824296878eb98905b04899a"}, - {file = "praat_parselmouth-0.4.3-cp27-cp27m-win32.whl", hash = "sha256:4fee56603cb57326457c6af779b89f96e7b2745114baa996659e1d52e5f245a3"}, - {file = "praat_parselmouth-0.4.3-cp27-cp27m-win_amd64.whl", hash = "sha256:dc688749a0db4144936d3ed5180996500eb927bbf321192019ddee535fb97f3d"}, - {file = "praat_parselmouth-0.4.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:c0ccf73de16c0f69162952b0d1865d4dbc929de0f9b88a9d7aea57f454de3cb8"}, - {file = "praat_parselmouth-0.4.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:87fa2dd7f8b5dd5e3127af82e97b229ae2db8e1656525329224df4c0bffa024c"}, - {file = "praat_parselmouth-0.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2bc111055efccf2bb25039a7891ec9ef106b13ddc5680293659ff0b4c5f4353f"}, - {file = "praat_parselmouth-0.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd38542210b1f381086b4a9424832b2330c42712e0fb7ea6c28c9200119c294b"}, - {file = "praat_parselmouth-0.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a536b37411c52171500984c97bfd66dc000701a7dc0807e11061b85a653a600a"}, - {file = "praat_parselmouth-0.4.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6ea1ab0632eff129516f147041aaf7874e50770561a2e9b9c81913b6de243f2a"}, - {file = "praat_parselmouth-0.4.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:543ba3deb32502e93074b76b1cfb3f09e598e5d9f74a0345fa5b3928fedb5a51"}, - {file = "praat_parselmouth-0.4.3-cp310-cp310-win32.whl", hash = "sha256:e0addf774a57d57a54df2b06de04ad0de34e81a3abfda03f744c732776c779ec"}, - {file = "praat_parselmouth-0.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:fc497357aeea2e3cbca2fb308d66b9de9739dc6b320ca2661ca6250f7a7489bd"}, - {file = "praat_parselmouth-0.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:afac52cb7a72cda7fe2ec1d9573d8f402786abcb06bd7a22f2ca240f95e33263"}, - {file = "praat_parselmouth-0.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b2261a79c2dc5387a7a678ec304ef8dd00ed93d9e028148bbb064fd0ac222a3a"}, - {file = "praat_parselmouth-0.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de31b458d3c1ca7ee45506871a38fdc3aec44526c065552adf8bec2876e816bd"}, - {file = "praat_parselmouth-0.4.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63ff24e045bed7c44f140fb7bab910d89fd3a45b7e8afe5b5e936aa2eea62904"}, - {file = "praat_parselmouth-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a40c51c628235c54c8956306fc58fd14cd04127d85359134ef73ef35ff19d651"}, - {file = "praat_parselmouth-0.4.3-cp311-cp311-win32.whl", hash = "sha256:f8ad9ee3be60d33f1ad593ec5f99466b1c266e00d29a5ec5787f969c618a7a9a"}, - {file = "praat_parselmouth-0.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:c32b1f3632e69ed67f501c635fff37ad72e1eae4ddd1c2c0827c4690c06ee990"}, - {file = "praat_parselmouth-0.4.3-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:1dcb6f55376f193c83d123953a55de471bcadd756af3b157c13d455b0c052999"}, - {file = "praat_parselmouth-0.4.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:0970facd26b771f5799a396a0e54d12a69fbf8904a4f6ae0442f3831175e4508"}, - {file = "praat_parselmouth-0.4.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:5c1104f41d9fef48cd44247738b9c8735e10a12ba0a1860e478e0bd69201813e"}, - {file = "praat_parselmouth-0.4.3-cp35-cp35m-win32.whl", hash = "sha256:3d12469e301d9a25f29f6cb5427aa9a1276e7f2f1edf1a3caede69a84c46170f"}, - {file = "praat_parselmouth-0.4.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c4142faf664dd6c7f1773d04331b278d92e17064eaaef09132954f72a9041ea0"}, - {file = "praat_parselmouth-0.4.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5ea2079d519e8d42ed8d2de3c4f68803110060a8ae5d1c56df795c600aa1c3be"}, - {file = "praat_parselmouth-0.4.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2e88f00b740548cf3de5768b2d06e296e525164ea71ccc991920f41f2e277ad2"}, - {file = "praat_parselmouth-0.4.3-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2da226bccd52fd81223eb94a7ea43a1a7588e4384ea65ce0818329b73ef8df6d"}, - {file = "praat_parselmouth-0.4.3-cp36-cp36m-win32.whl", hash = "sha256:0f3af0413992398ac613b0eefdfbcb8cad064c36a28b972300a2bb760523c109"}, - {file = "praat_parselmouth-0.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:e0ed79941b6e37a440860511767eedd85ec003060870d10ff1f98773b2a268ae"}, - {file = "praat_parselmouth-0.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:10f3113ad4f5f6df5fe81d4080ca3ad46de2fe0fdb8ebbcad1ba884b1cae3b9d"}, - {file = "praat_parselmouth-0.4.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6e9338f7a1b304390014bb2eec619e5a306527a4df438e68439c92aa968627dc"}, - {file = "praat_parselmouth-0.4.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cb3798b2ca8163444662b6ae84a74b1add38b2c04e5af8d07bde55cf0335300a"}, - {file = "praat_parselmouth-0.4.3-cp37-cp37m-win32.whl", hash = "sha256:d947f9d1fb092b91acca1259ce4dd62ff4f456338958fd1fd41ee65efc53ca2c"}, - {file = "praat_parselmouth-0.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f3e026f590aeec8f68921359f56a42efa43076942f271244bee57fd22db8eef"}, - {file = "praat_parselmouth-0.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:28844229dab2a9335629b4526188b9540d02208856f48b1a46776279c022f937"}, - {file = "praat_parselmouth-0.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410748af84eb8c2eb69e408e300694a45090ed7c4f31375c4ec75a8c18f87169"}, - {file = "praat_parselmouth-0.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:30ff6f17babad25b9d6ab086465a54494eef9d1b4368b0722230c5282be2bf94"}, - {file = "praat_parselmouth-0.4.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ff7096bc3e87a8f719e66f5e16a90e2f6de445612abd234f86837d390b947421"}, - {file = "praat_parselmouth-0.4.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f41d121c4d2322ff12808bb2c4490609f750f89064170e327dfd74fca13cc212"}, - {file = "praat_parselmouth-0.4.3-cp38-cp38-win32.whl", hash = "sha256:9af9945db11fab0e1ed29ad20f7c97a3e7a8d016328ad6d7237a0d7819db075e"}, - {file = "praat_parselmouth-0.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:ae0c63c432e8216d7c70da44131f51c845fb81d48ac04eb5f39ebcfae34624be"}, - {file = "praat_parselmouth-0.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8e25658af5a87ed502753de6924c51bf3400d4078e67a611b5874ab08b478fdb"}, - {file = "praat_parselmouth-0.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7aa2ffd0c6e47feda35343a9d6722b2558f3677a4a51bf5ec864f27ab80e2f42"}, - {file = "praat_parselmouth-0.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3b245d9457ab39f12142da160cda12c4c2a58d9b916e5bb33e6b3ac267882d46"}, - {file = "praat_parselmouth-0.4.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:da9779a694941074bc5b199dd3cb41ad4af3306552f06af8dbfdea6ab0a87dec"}, - {file = "praat_parselmouth-0.4.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cfa141c04fd8a0477f91c168878112098a25cbac7ac4a20de627bc9293ee4667"}, - {file = "praat_parselmouth-0.4.3-cp39-cp39-win32.whl", hash = "sha256:6941fe602802fd57ecbedecd612b41493b7d1c6bf722ac0cbf3f47f805fbbd43"}, - {file = "praat_parselmouth-0.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:5252496e0391754a642973837670c56ecd39c8e0a1f7ec6e6b60b0cd2cc9f51d"}, - {file = "praat_parselmouth-0.4.3-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:fd7c143c6511807b67c92b3ab94733746c0ae3a7b4ba52d6763585c4d459061d"}, - {file = "praat_parselmouth-0.4.3-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:7ad0739ad6c102817c7d43b67b7270f78cb431eb72b6ecd9a17e354d1b379deb"}, - {file = "praat_parselmouth-0.4.3-pp27-pypy_73-win32.whl", hash = "sha256:f5e98ec1f41efba90bedab358cff8e6a3c6473978e1f42b55d0977e580efe673"}, - {file = "praat_parselmouth-0.4.3-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7b58c1c8fd967446f6d74775b5d9bceadfe35a928fa5f192d4d03d80cb005d92"}, - {file = "praat_parselmouth-0.4.3-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:d217df07c770156fa284aff3e7a5c11eb43e37f0226730d729d6b45be8a7c4d7"}, - {file = "praat_parselmouth-0.4.3-pp36-pypy36_pp73-win32.whl", hash = "sha256:29cb47438989f8155c3b3dca987afd48999dec71e4b79564aa7e922c3c5c1f9a"}, - {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5f772b4a097654883f4bba41efae419f9ebdd5e83ef7a857e547100d26663e2c"}, - {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bf9634a6986732dc43a88b3a16a0000cff903da1db6556b7959a6a4897f25570"}, - {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fab1bbb6a88f47cb5d0db07a4fd6d88b9294d2775a7556aeb459e96ac372e29f"}, - {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-win32.whl", hash = "sha256:261f03f95f25943da2cf746599e47acfcf79b7fc823c871571901d6c97bad948"}, - {file = "praat_parselmouth-0.4.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:199b8df2659a1e6f30e9ae3064b0a28a661d834d2bccb56d22051c40cc348817"}, - {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ef1f3f6bd08cc410d0d595f6a9c7dd72558e30ad3bd7949c94ea4e07a2de2605"}, - {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28a61b7a3cf95a53554dd3ebb4f48e991d4b913ae2d2fbc3868a4e864d69794f"}, - {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:488833ee33690fa1a57a3c429d286e42e6882748f5c3d28dc50889abec12b8c2"}, - {file = "praat_parselmouth-0.4.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:10f181e199c47fa90fe7cad065275f7f3ccda2de6febf86394cf96aa48531079"}, - {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:52702bc5cdf59b2b4db87448fe9042307e5ebce6b67ee5ea55c2b8627ce803e0"}, - {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7d4f5d7c701517986654365f0a41b8b4a610a2ddc0365da60e48c098774259b"}, - {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dc013608a536ad74efdc3242421cabfcb8cb2e9cd1259ec9de9aeaa141c2d14"}, - {file = "praat_parselmouth-0.4.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d593065ed1500d305d9cf3d20f5ac7e3671061c3c073ef6e94e97817a664d399"}, -] - -[package.dependencies] -numpy = ">=1.7.0" - -[[package]] -name = "protobuf" -version = "3.20.3" -description = "Protocol Buffers" -optional = false -python-versions = ">=3.7" -files = [ - {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, - {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, - {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"}, - {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"}, - {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"}, - {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"}, - {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"}, - {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"}, - {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"}, - {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"}, - {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"}, - {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"}, - {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"}, - {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"}, - {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"}, - {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"}, - {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"}, - {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"}, - {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"}, - {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"}, - {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"}, - {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, -] - -[[package]] -name = "pyasn1" -version = "0.4.8" -description = "ASN.1 types and codecs" -optional = false -python-versions = "*" -files = [ - {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, - {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, -] - -[[package]] -name = "pyasn1-modules" -version = "0.2.8" -description = "A collection of ASN.1-based protocols modules." -optional = false -python-versions = "*" -files = [ - {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, - {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, -] - -[package.dependencies] -pyasn1 = ">=0.4.6,<0.5.0" - -[[package]] -name = "pycparser" -version = "2.21" -description = "C parser in Python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] - -[[package]] -name = "pydantic" -version = "1.10.7" -description = "Data validation and settings management using python type hints" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydantic-1.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e79e999e539872e903767c417c897e729e015872040e56b96e67968c3b918b2d"}, - {file = "pydantic-1.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01aea3a42c13f2602b7ecbbea484a98169fb568ebd9e247593ea05f01b884b2e"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:516f1ed9bc2406a0467dd777afc636c7091d71f214d5e413d64fef45174cfc7a"}, - {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae150a63564929c675d7f2303008d88426a0add46efd76c3fc797cd71cb1b46f"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecbbc51391248116c0a055899e6c3e7ffbb11fb5e2a4cd6f2d0b93272118a209"}, - {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f4a2b50e2b03d5776e7f21af73e2070e1b5c0d0df255a827e7c632962f8315af"}, - {file = "pydantic-1.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:a7cd2251439988b413cb0a985c4ed82b6c6aac382dbaff53ae03c4b23a70e80a"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68792151e174a4aa9e9fc1b4e653e65a354a2fa0fed169f7b3d09902ad2cb6f1"}, - {file = "pydantic-1.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe2507b8ef209da71b6fb5f4e597b50c5a34b78d7e857c4f8f3115effaef5fe"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a86d8c8db68086f1e30a530f7d5f83eb0685e632e411dbbcf2d5c0150e8dcd"}, - {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75ae19d2a3dbb146b6f324031c24f8a3f52ff5d6a9f22f0683694b3afcb16fb"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:464855a7ff7f2cc2cf537ecc421291b9132aa9c79aef44e917ad711b4a93163b"}, - {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:193924c563fae6ddcb71d3f06fa153866423ac1b793a47936656e806b64e24ca"}, - {file = "pydantic-1.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:b4a849d10f211389502059c33332e91327bc154acc1845f375a99eca3afa802d"}, - {file = "pydantic-1.10.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cc1dde4e50a5fc1336ee0581c1612215bc64ed6d28d2c7c6f25d2fe3e7c3e918"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0cfe895a504c060e5d36b287ee696e2fdad02d89e0d895f83037245218a87fe"}, - {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:670bb4683ad1e48b0ecb06f0cfe2178dcf74ff27921cdf1606e527d2617a81ee"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:950ce33857841f9a337ce07ddf46bc84e1c4946d2a3bba18f8280297157a3fd1"}, - {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c15582f9055fbc1bfe50266a19771bbbef33dd28c45e78afbe1996fd70966c2a"}, - {file = "pydantic-1.10.7-cp37-cp37m-win_amd64.whl", hash = "sha256:82dffb306dd20bd5268fd6379bc4bfe75242a9c2b79fec58e1041fbbdb1f7914"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c7f51861d73e8b9ddcb9916ae7ac39fb52761d9ea0df41128e81e2ba42886cd"}, - {file = "pydantic-1.10.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6434b49c0b03a51021ade5c4daa7d70c98f7a79e95b551201fff682fc1661245"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64d34ab766fa056df49013bb6e79921a0265204c071984e75a09cbceacbbdd5d"}, - {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:701daea9ffe9d26f97b52f1d157e0d4121644f0fcf80b443248434958fd03dc3"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf135c46099ff3f919d2150a948ce94b9ce545598ef2c6c7bf55dca98a304b52"}, - {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0f85904f73161817b80781cc150f8b906d521fa11e3cdabae19a581c3606209"}, - {file = "pydantic-1.10.7-cp38-cp38-win_amd64.whl", hash = "sha256:9f6f0fd68d73257ad6685419478c5aece46432f4bdd8d32c7345f1986496171e"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c230c0d8a322276d6e7b88c3f7ce885f9ed16e0910354510e0bae84d54991143"}, - {file = "pydantic-1.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:976cae77ba6a49d80f461fd8bba183ff7ba79f44aa5cfa82f1346b5626542f8e"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d45fc99d64af9aaf7e308054a0067fdcd87ffe974f2442312372dfa66e1001d"}, - {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2a5ebb48958754d386195fe9e9c5106f11275867051bf017a8059410e9abf1f"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:abfb7d4a7cd5cc4e1d1887c43503a7c5dd608eadf8bc615413fc498d3e4645cd"}, - {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:80b1fab4deb08a8292d15e43a6edccdffa5377a36a4597bb545b93e79c5ff0a5"}, - {file = "pydantic-1.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:d71e69699498b020ea198468e2480a2f1e7433e32a3a99760058c6520e2bea7e"}, - {file = "pydantic-1.10.7-py3-none-any.whl", hash = "sha256:0cd181f1d0b1d00e2b705f1bf1ac7799a2d938cce3376b8007df62b29be3c2c6"}, - {file = "pydantic-1.10.7.tar.gz", hash = "sha256:cfc83c0678b6ba51b0532bea66860617c4cd4251ecf76e9846fa5a9f3454e97e"}, -] - -[package.dependencies] -typing-extensions = ">=4.2.0" - -[package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] - -[[package]] -name = "pydub" -version = "0.25.1" -description = "Manipulate audio with an simple and easy high level interface" -optional = false -python-versions = "*" -files = [ - {file = "pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6"}, - {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"}, -] - -[[package]] -name = "pygments" -version = "2.15.1" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, - {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, -] - -[package.extras] -plugins = ["importlib-metadata"] - -[[package]] -name = "pyparsing" -version = "3.0.9" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false -python-versions = ">=3.6.8" -files = [ - {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, - {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - -[[package]] -name = "pyrsistent" -version = "0.19.3" -description = "Persistent/Functional/Immutable data structures" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, - {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, - {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, - {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, - {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, - {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, - {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, - {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, - {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, - {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, - {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, - {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, - {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, - {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, - {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, - {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, - {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, - {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, - {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, - {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, - {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, - {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, - {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, -] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-multipart" -version = "0.0.6" -description = "A streaming multipart parser for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"}, - {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"}, -] - -[package.extras] -dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"] - -[[package]] -name = "pytz" -version = "2023.3" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, - {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, -] - -[[package]] -name = "pywin32" -version = "306" -description = "Python for Window Extensions" -optional = false -python-versions = "*" -files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, -] - -[[package]] -name = "pyworld" -version = "0.3.2" -description = "PyWorld is a Python wrapper for WORLD vocoder." -optional = false -python-versions = "*" -files = [ - {file = "pyworld-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:688730fa5394709a185061e5a58e7a614b4548d814eeecc1dc825f73af53a9aa"}, - {file = "pyworld-0.3.2-cp36-cp36m-win32.whl", hash = "sha256:1e110e2f95d45b0765f4ba4e49b389f9b931c9c438cd69774dce20699cc6dc7d"}, - {file = "pyworld-0.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:e858668185a177e9e30c0ff12de3e166b39124c14b424ba3be31418694dcb2b7"}, - {file = "pyworld-0.3.2-cp37-cp37m-win32.whl", hash = "sha256:b5325e7a08f104a9bf533d54423546bd3ef05953b80b79a8ced34efbb892862b"}, - {file = "pyworld-0.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:fddd503ac264810221d9460bfdc1454c5c1313214e1c58a4ddd9417699f99bc8"}, - {file = "pyworld-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:502fbf577f4e56a497b3ad8c29434ec423eabc4674b93fa11046837d297c97be"}, - {file = "pyworld-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a8ea62219b9bce0e514ff05ee80cfbc1248b165d8d802f00b9b8754510701f3e"}, - {file = "pyworld-0.3.2.tar.gz", hash = "sha256:668d09842c3cfa74b1f6edabdb0058a64c04f9cf17b93883e6da811e1204ad4d"}, -] - -[package.dependencies] -cython = "*" -numpy = "*" - -[package.extras] -sdist = ["cython", "numpy"] -test = ["nose"] - -[[package]] -name = "pyyaml" -version = "6.0" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, -] - -[[package]] -name = "regex" -version = "2023.3.23" -description = "Alternative regular expression module, to replace re." -optional = false -python-versions = ">=3.8" -files = [ - {file = "regex-2023.3.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:845a5e2d84389c4ddada1a9b95c055320070f18bb76512608374aca00d22eca8"}, - {file = "regex-2023.3.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87d9951f5a538dd1d016bdc0dcae59241d15fa94860964833a54d18197fcd134"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae17d3be44c0b3f782c28ae9edd8b47c1f1776d4cabe87edc0b98e1f12b021"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b8eb1e3bca6b48dc721818a60ae83b8264d4089a4a41d62be6d05316ec38e15"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df45fac182ebc3c494460c644e853515cc24f5ad9da05f8ffb91da891bfee879"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7006105b10b59971d3b248ad75acc3651c7e4cf54d81694df5a5130a3c3f7ea"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93f3f1aa608380fe294aa4cb82e2afda07a7598e828d0341e124b8fd9327c715"}, - {file = "regex-2023.3.23-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787954f541ab95d8195d97b0b8cf1dc304424adb1e07365967e656b92b38a699"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:20abe0bdf03630fe92ccafc45a599bca8b3501f48d1de4f7d121153350a2f77d"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11d00c31aeab9a6e0503bc77e73ed9f4527b3984279d997eb145d7c7be6268fd"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d5bbe0e1511b844794a3be43d6c145001626ba9a6c1db8f84bdc724e91131d9d"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ea3c0cb56eadbf4ab2277e7a095676370b3e46dbfc74d5c383bd87b0d6317910"}, - {file = "regex-2023.3.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d895b4c863059a4934d3e874b90998df774644a41b349ebb330f85f11b4ef2c0"}, - {file = "regex-2023.3.23-cp310-cp310-win32.whl", hash = "sha256:9d764514d19b4edcc75fd8cb1423448ef393e8b6cbd94f38cab983ab1b75855d"}, - {file = "regex-2023.3.23-cp310-cp310-win_amd64.whl", hash = "sha256:11d1f2b7a0696dc0310de0efb51b1f4d813ad4401fe368e83c0c62f344429f98"}, - {file = "regex-2023.3.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8a9c63cde0eaa345795c0fdeb19dc62d22e378c50b0bc67bf4667cd5b482d98b"}, - {file = "regex-2023.3.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dd7200b4c27b68cf9c9646da01647141c6db09f48cc5b51bc588deaf8e98a797"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22720024b90a6ba673a725dcc62e10fb1111b889305d7c6b887ac7466b74bedb"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b190a339090e6af25f4a5fd9e77591f6d911cc7b96ecbb2114890b061be0ac1"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e76b6fc0d8e9efa39100369a9b3379ce35e20f6c75365653cf58d282ad290f6f"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7868b8f218bf69a2a15402fde08b08712213a1f4b85a156d90473a6fb6b12b09"}, - {file = "regex-2023.3.23-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2472428efc4127374f494e570e36b30bb5e6b37d9a754f7667f7073e43b0abdd"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c37df2a060cb476d94c047b18572ee2b37c31f831df126c0da3cd9227b39253d"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4479f9e2abc03362df4045b1332d4a2b7885b245a30d4f4b051c4083b97d95d8"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2396e0678167f2d0c197da942b0b3fb48fee2f0b5915a0feb84d11b6686afe6"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75f288c60232a5339e0ff2fa05779a5e9c74e9fc085c81e931d4a264501e745b"}, - {file = "regex-2023.3.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c869260aa62cee21c5eb171a466c0572b5e809213612ef8d495268cd2e34f20d"}, - {file = "regex-2023.3.23-cp311-cp311-win32.whl", hash = "sha256:25f0532fd0c53e96bad84664171969de9673b4131f2297f1db850d3918d58858"}, - {file = "regex-2023.3.23-cp311-cp311-win_amd64.whl", hash = "sha256:5ccfafd98473e007cebf7da10c1411035b7844f0f204015efd050601906dbb53"}, - {file = "regex-2023.3.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6572ff287176c0fb96568adb292674b421fa762153ed074d94b1d939ed92c253"}, - {file = "regex-2023.3.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a610e0adfcb0fc84ea25f6ea685e39e74cbcd9245a72a9a7aab85ff755a5ed27"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086afe222d58b88b62847bdbd92079b4699350b4acab892f88a935db5707c790"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79e29fd62fa2f597a6754b247356bda14b866131a22444d67f907d6d341e10f3"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c07ce8e9eee878a48ebeb32ee661b49504b85e164b05bebf25420705709fdd31"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b036f401895e854de9fefe061518e78d506d8a919cc250dc3416bca03f6f9a"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78ac8dd8e18800bb1f97aad0d73f68916592dddf233b99d2b5cabc562088503a"}, - {file = "regex-2023.3.23-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:539dd010dc35af935b32f248099e38447bbffc10b59c2b542bceead2bed5c325"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9bf4a5626f2a0ea006bf81e8963f498a57a47d58907eaa58f4b3e13be68759d8"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf86b4328c204c3f315074a61bc1c06f8a75a8e102359f18ce99fbcbbf1951f0"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:2848bf76673c83314068241c8d5b7fa9ad9bed866c979875a0e84039349e8fa7"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c125a02d22c555e68f7433bac8449992fa1cead525399f14e47c2d98f2f0e467"}, - {file = "regex-2023.3.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cd1671e9d5ac05ce6aa86874dd8dfa048824d1dbe73060851b310c6c1a201a96"}, - {file = "regex-2023.3.23-cp38-cp38-win32.whl", hash = "sha256:fffe57312a358be6ec6baeb43d253c36e5790e436b7bf5b7a38df360363e88e9"}, - {file = "regex-2023.3.23-cp38-cp38-win_amd64.whl", hash = "sha256:dbb3f87e15d3dd76996d604af8678316ad2d7d20faa394e92d9394dfd621fd0c"}, - {file = "regex-2023.3.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c88e8c226473b5549fe9616980ea7ca09289246cfbdf469241edf4741a620004"}, - {file = "regex-2023.3.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6560776ec19c83f3645bbc5db64a7a5816c9d8fb7ed7201c5bcd269323d88072"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b1fc2632c01f42e06173d8dd9bb2e74ab9b0afa1d698058c867288d2c7a31f3"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdf7ad455f1916b8ea5cdbc482d379f6daf93f3867b4232d14699867a5a13af7"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5fc33b27b1d800fc5b78d7f7d0f287e35079ecabe68e83d46930cf45690e1c8c"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c49552dc938e3588f63f8a78c86f3c9c75301e813bca0bef13bdb4b87ccf364"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e152461e9a0aedec7d37fc66ec0fa635eca984777d3d3c3e36f53bf3d3ceb16e"}, - {file = "regex-2023.3.23-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:db034255e72d2995cf581b14bb3fc9c00bdbe6822b49fcd4eef79e1d5f232618"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:55ae114da21b7a790b90255ea52d2aa3a0d121a646deb2d3c6a3194e722fc762"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ef3f528fe1cc3d139508fe1b22523745aa77b9d6cb5b0bf277f48788ee0b993f"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:a81c9ec59ca2303acd1ccd7b9ac409f1e478e40e96f8f79b943be476c5fdb8bb"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cde09c4fdd070772aa2596d97e942eb775a478b32459e042e1be71b739d08b77"}, - {file = "regex-2023.3.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3cd9f5dd7b821f141d3a6ca0d5d9359b9221e4f051ca3139320adea9f1679691"}, - {file = "regex-2023.3.23-cp39-cp39-win32.whl", hash = "sha256:7304863f3a652dab5e68e6fb1725d05ebab36ec0390676d1736e0571ebb713ef"}, - {file = "regex-2023.3.23-cp39-cp39-win_amd64.whl", hash = "sha256:54c3fa855a3f7438149de3211738dd9b5f0c733f48b54ae05aa7fce83d48d858"}, - {file = "regex-2023.3.23.tar.gz", hash = "sha256:dc80df325b43ffea5cdea2e3eaa97a44f3dd298262b1c7fe9dbb2a9522b956a7"}, -] - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "requests-oauthlib" -version = "1.3.1" -description = "OAuthlib authentication support for Requests." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, - {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"}, -] - -[package.dependencies] -oauthlib = ">=3.0.0" -requests = ">=2.0.0" - -[package.extras] -rsa = ["oauthlib[signedtoken] (>=3.0.0)"] - -[[package]] -name = "resampy" -version = "0.4.2" -description = "Efficient signal resampling" -optional = false -python-versions = "*" -files = [ - {file = "resampy-0.4.2-py3-none-any.whl", hash = "sha256:4340b6c4e685a865621dfcf016e2a3dd49d865446b6025e30fe88567f22e052e"}, - {file = "resampy-0.4.2.tar.gz", hash = "sha256:0a469e6ddb89956f4fd6c88728300e4bbd186fae569dd4fd17dae51a91cbaa15"}, -] - -[package.dependencies] -numba = ">=0.53" -numpy = ">=1.17" - -[package.extras] -design = ["optuna (>=2.10.0)"] -docs = ["numpydoc", "sphinx (!=1.3.1)"] -tests = ["pytest (<8)", "pytest-cov", "scipy (>=1.0)"] - -[[package]] -name = "rfc3986" -version = "1.5.0" -description = "Validating URI References per RFC 3986" -optional = false -python-versions = "*" -files = [ - {file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"}, - {file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"}, -] - -[package.dependencies] -idna = {version = "*", optional = true, markers = "extra == \"idna2008\""} - -[package.extras] -idna2008 = ["idna"] - -[[package]] -name = "rsa" -version = "4.9" -description = "Pure-Python RSA implementation" -optional = false -python-versions = ">=3.6,<4" -files = [ - {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, - {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "sacrebleu" -version = "2.3.1" -description = "Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores" -optional = false -python-versions = ">=3.6" -files = [ - {file = "sacrebleu-2.3.1-py3-none-any.whl", hash = "sha256:352227b8ca9e04ed509266d1fee6c8cff0ea1417c429f8c684645ad2db8b02e7"}, - {file = "sacrebleu-2.3.1.tar.gz", hash = "sha256:7969b294f15dae84d80fb2b76d30c83b245f49f4ecb1cac79acb553eb93cb537"}, -] - -[package.dependencies] -colorama = "*" -lxml = "*" -numpy = ">=1.17" -portalocker = "*" -regex = "*" -tabulate = ">=0.8.9" - -[package.extras] -ja = ["ipadic (>=1.0,<2.0)", "mecab-python3 (==1.0.5)"] -ko = ["mecab-ko (==1.0.0)", "mecab-ko-dic (>=1.0,<2.0)"] - -[[package]] -name = "scikit-learn" -version = "1.2.2" -description = "A set of python modules for machine learning and data mining" -optional = false -python-versions = ">=3.8" -files = [ - {file = "scikit-learn-1.2.2.tar.gz", hash = "sha256:8429aea30ec24e7a8c7ed8a3fa6213adf3814a6efbea09e16e0a0c71e1a1a3d7"}, - {file = "scikit_learn-1.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99cc01184e347de485bf253d19fcb3b1a3fb0ee4cea5ee3c43ec0cc429b6d29f"}, - {file = "scikit_learn-1.2.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e6e574db9914afcb4e11ade84fab084536a895ca60aadea3041e85b8ac963edb"}, - {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fe83b676f407f00afa388dd1fdd49e5c6612e551ed84f3b1b182858f09e987d"}, - {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2642baa0ad1e8f8188917423dd73994bf25429f8893ddbe115be3ca3183584"}, - {file = "scikit_learn-1.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ad66c3848c0a1ec13464b2a95d0a484fd5b02ce74268eaa7e0c697b904f31d6c"}, - {file = "scikit_learn-1.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dfeaf8be72117eb61a164ea6fc8afb6dfe08c6f90365bde2dc16456e4bc8e45f"}, - {file = "scikit_learn-1.2.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:fe0aa1a7029ed3e1dcbf4a5bc675aa3b1bc468d9012ecf6c6f081251ca47f590"}, - {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:065e9673e24e0dc5113e2dd2b4ca30c9d8aa2fa90f4c0597241c93b63130d233"}, - {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf036ea7ef66115e0d49655f16febfa547886deba20149555a41d28f56fd6d3c"}, - {file = "scikit_learn-1.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:8b0670d4224a3c2d596fd572fb4fa673b2a0ccfb07152688ebd2ea0b8c61025c"}, - {file = "scikit_learn-1.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9c710ff9f9936ba8a3b74a455ccf0dcf59b230caa1e9ba0223773c490cab1e51"}, - {file = "scikit_learn-1.2.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:2dd3ffd3950e3d6c0c0ef9033a9b9b32d910c61bd06cb8206303fb4514b88a49"}, - {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44b47a305190c28dd8dd73fc9445f802b6ea716669cfc22ab1eb97b335d238b1"}, - {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:953236889928d104c2ef14027539f5f2609a47ebf716b8cbe4437e85dce42744"}, - {file = "scikit_learn-1.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:7f69313884e8eb311460cc2f28676d5e400bd929841a2c8eb8742ae78ebf7c20"}, - {file = "scikit_learn-1.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8156db41e1c39c69aa2d8599ab7577af53e9e5e7a57b0504e116cc73c39138dd"}, - {file = "scikit_learn-1.2.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fe175ee1dab589d2e1033657c5b6bec92a8a3b69103e3dd361b58014729975c3"}, - {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d5312d9674bed14f73773d2acf15a3272639b981e60b72c9b190a0cffed5bad"}, - {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea061bf0283bf9a9f36ea3c5d3231ba2176221bbd430abd2603b1c3b2ed85c89"}, - {file = "scikit_learn-1.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:6477eed40dbce190f9f9e9d0d37e020815825b300121307942ec2110302b66a3"}, -] - -[package.dependencies] -joblib = ">=1.1.1" -numpy = ">=1.17.3" -scipy = ">=1.3.2" -threadpoolctl = ">=2.0.0" - -[package.extras] -benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] -tests = ["black (>=22.3.0)", "flake8 (>=3.8.2)", "matplotlib (>=3.1.3)", "mypy (>=0.961)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=5.3.1)", "pytest-cov (>=2.9.0)", "scikit-image (>=0.16.2)"] - -[[package]] -name = "scipy" -version = "1.9.3" -description = "Fundamental algorithms for scientific computing in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, - {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, - {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, - {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, - {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, - {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, - {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, - {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, - {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, - {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, - {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, - {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, - {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, - {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, - {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, - {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, - {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, - {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, - {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, - {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, - {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, -] - -[package.dependencies] -numpy = ">=1.18.5,<1.26.0" - -[package.extras] -dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] -doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] -test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] - -[[package]] -name = "semantic-version" -version = "2.10.0" -description = "A library implementing the 'SemVer' scheme." -optional = false -python-versions = ">=2.7" -files = [ - {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, - {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, -] - -[package.extras] -dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] -doc = ["Sphinx", "sphinx-rtd-theme"] - -[[package]] -name = "setuptools" -version = "67.7.2" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "setuptools-67.7.2-py3-none-any.whl", hash = "sha256:23aaf86b85ca52ceb801d32703f12d77517b2556af839621c641fca11287952b"}, - {file = "setuptools-67.7.2.tar.gz", hash = "sha256:f104fa03692a2602fa0fec6c6a9e63b6c8a968de13e17c026957dd1f53d80990"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.0" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -files = [ - {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, - {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, -] - -[[package]] -name = "soundfile" -version = "0.12.1" -description = "An audio library based on libsndfile, CFFI and NumPy" -optional = false -python-versions = "*" -files = [ - {file = "soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"}, - {file = "soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"}, - {file = "soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"}, - {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"}, - {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"}, - {file = "soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"}, - {file = "soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"}, - {file = "soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"}, -] - -[package.dependencies] -cffi = ">=1.0" - -[package.extras] -numpy = ["numpy"] - -[[package]] -name = "starlette" -version = "0.27.0" -description = "The little ASGI library that shines." -optional = false -python-versions = ">=3.7" -files = [ - {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, - {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, -] - -[package.dependencies] -anyio = ">=3.4.0,<5" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] - -[[package]] -name = "sympy" -version = "1.11.1" -description = "Computer algebra system (CAS) in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sympy-1.11.1-py3-none-any.whl", hash = "sha256:938f984ee2b1e8eae8a07b884c8b7a1146010040fccddc6539c54f401c8f6fcf"}, - {file = "sympy-1.11.1.tar.gz", hash = "sha256:e32380dce63cb7c0108ed525570092fd45168bdae2faa17e528221ef72e88658"}, -] - -[package.dependencies] -mpmath = ">=0.19" - -[[package]] -name = "tabulate" -version = "0.9.0" -description = "Pretty-print tabular data" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, - {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, -] - -[package.extras] -widechars = ["wcwidth"] - -[[package]] -name = "tensorboard" -version = "2.12.1" -description = "TensorBoard lets you watch Tensors Flow" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tensorboard-2.12.1-py3-none-any.whl", hash = "sha256:58f1c2a25b4829b9c48d2b1ec951dedc9325dcd1ea4b0f601d241d2887d0ed65"}, -] - -[package.dependencies] -absl-py = ">=0.4" -google-auth = ">=1.6.3,<3" -google-auth-oauthlib = ">=0.5,<1.1" -grpcio = ">=1.48.2" -markdown = ">=2.6.8" -numpy = ">=1.12.0" -protobuf = ">=3.19.6" -requests = ">=2.21.0,<3" -setuptools = ">=41.0.0" -tensorboard-data-server = ">=0.7.0,<0.8.0" -tensorboard-plugin-wit = ">=1.6.0" -werkzeug = ">=1.0.1" -wheel = ">=0.26" - -[[package]] -name = "tensorboard-data-server" -version = "0.7.0" -description = "Fast data loading for TensorBoard" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tensorboard_data_server-0.7.0-py3-none-any.whl", hash = "sha256:753d4214799b31da7b6d93837959abebbc6afa86e69eacf1e9a317a48daa31eb"}, - {file = "tensorboard_data_server-0.7.0-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:eb7fa518737944dbf4f0cf83c2e40a7ac346bf91be2e6a0215de98be74e85454"}, - {file = "tensorboard_data_server-0.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64aa1be7c23e80b1a42c13b686eb0875bb70f5e755f4d2b8de5c1d880cf2267f"}, -] - -[[package]] -name = "tensorboard-plugin-wit" -version = "1.8.1" -description = "What-If Tool TensorBoard plugin." -optional = false -python-versions = "*" -files = [ - {file = "tensorboard_plugin_wit-1.8.1-py3-none-any.whl", hash = "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"}, -] - -[[package]] -name = "tensorboardx" -version = "2.6" -description = "TensorBoardX lets you watch Tensors Flow without Tensorflow" -optional = false -python-versions = "*" -files = [ - {file = "tensorboardX-2.6-py2.py3-none-any.whl", hash = "sha256:24a7cd076488de1e9d15ef25371b8ebf90c4f8f622af2477c611198f03f4a606"}, - {file = "tensorboardX-2.6.tar.gz", hash = "sha256:d4c036964dd2deb075a1909832b276daa383eab3f9db519ad90b99f5aea06b0c"}, -] - -[package.dependencies] -numpy = "*" -packaging = "*" -protobuf = ">=3.8.0,<4" - -[[package]] -name = "threadpoolctl" -version = "3.1.0" -description = "threadpoolctl" -optional = false -python-versions = ">=3.6" -files = [ - {file = "threadpoolctl-3.1.0-py3-none-any.whl", hash = "sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b"}, - {file = "threadpoolctl-3.1.0.tar.gz", hash = "sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380"}, -] - -[[package]] -name = "toolz" -version = "0.12.0" -description = "List processing tools and functional utilities" -optional = false -python-versions = ">=3.5" -files = [ - {file = "toolz-0.12.0-py3-none-any.whl", hash = "sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f"}, - {file = "toolz-0.12.0.tar.gz", hash = "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"}, -] - -[[package]] -name = "torch" -version = "2.0.0" -description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "torch-2.0.0-1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c9090bda7d2eeeecd74f51b721420dbeb44f838d4536cc1b284e879417e3064a"}, - {file = "torch-2.0.0-1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:bd42db2a48a20574d2c33489e120e9f32789c4dc13c514b0c44272972d14a2d7"}, - {file = "torch-2.0.0-1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8969aa8375bcbc0c2993e7ede0a7f889df9515f18b9b548433f412affed478d9"}, - {file = "torch-2.0.0-1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ab2da16567cb55b67ae39e32d520d68ec736191d88ac79526ca5874754c32203"}, - {file = "torch-2.0.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:7a9319a67294ef02459a19738bbfa8727bb5307b822dadd708bc2ccf6c901aca"}, - {file = "torch-2.0.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:9f01fe1f6263f31bd04e1757946fd63ad531ae37f28bb2dbf66f5c826ee089f4"}, - {file = "torch-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:527f4ae68df7b8301ee6b1158ca56350282ea633686537b30dbb5d7b4a52622a"}, - {file = "torch-2.0.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:ce9b5a49bd513dff7950a5a07d6e26594dd51989cee05ba388b03e8e366fd5d5"}, - {file = "torch-2.0.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:53e1c33c6896583cdb9a583693e22e99266444c4a43392dddc562640d39e542b"}, - {file = "torch-2.0.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:09651bff72e439d004c991f15add0c397c66f98ab36fe60d5514b44e4da722e8"}, - {file = "torch-2.0.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d439aec349c98f12819e8564b8c54008e4613dd4428582af0e6e14c24ca85870"}, - {file = "torch-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:2802f84f021907deee7e9470ed10c0e78af7457ac9a08a6cd7d55adef835fede"}, - {file = "torch-2.0.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:01858620f25f25e7a9ec4b547ff38e5e27c92d38ec4ccba9cfbfb31d7071ed9c"}, - {file = "torch-2.0.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:9a2e53b5783ef5896a6af338b36d782f28e83c8ddfc2ac44b67b066d9d76f498"}, - {file = "torch-2.0.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ec5fff2447663e369682838ff0f82187b4d846057ef4d119a8dea7772a0b17dd"}, - {file = "torch-2.0.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11b0384fe3c18c01b8fc5992e70fc519cde65e44c51cc87be1838c1803daf42f"}, - {file = "torch-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:e54846aa63855298cfb1195487f032e413e7ac9cbfa978fda32354cc39551475"}, - {file = "torch-2.0.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:cc788cbbbbc6eb4c90e52c550efd067586c2693092cf367c135b34893a64ae78"}, - {file = "torch-2.0.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:d292640f0fd72b7a31b2a6e3b635eb5065fcbedd4478f9cad1a1e7a9ec861d35"}, - {file = "torch-2.0.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6befaad784004b7af357e3d87fa0863c1f642866291f12a4c2af2de435e8ac5c"}, - {file = "torch-2.0.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a83b26bd6ae36fbf5fee3d56973d9816e2002e8a3b7d9205531167c28aaa38a7"}, - {file = "torch-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:c7e67195e1c3e33da53954b026e89a8e1ff3bc1aeb9eb32b677172d4a9b5dcbf"}, - {file = "torch-2.0.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6e0b97beb037a165669c312591f242382e9109a240e20054d5a5782d9236cad0"}, - {file = "torch-2.0.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:297a4919aff1c0f98a58ebe969200f71350a1d4d4f986dbfd60c02ffce780e99"}, -] - -[package.dependencies] -filelock = "*" -jinja2 = "*" -networkx = "*" -nvidia-cublas-cu11 = {version = "11.10.3.66", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cuda-cupti-cu11 = {version = "11.7.101", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cuda-nvrtc-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cuda-runtime-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cudnn-cu11 = {version = "8.5.0.96", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cufft-cu11 = {version = "10.9.0.58", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-curand-cu11 = {version = "10.2.10.91", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cusolver-cu11 = {version = "11.4.0.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-cusparse-cu11 = {version = "11.7.4.91", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-nccl-cu11 = {version = "2.14.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-nvtx-cu11 = {version = "11.7.91", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -sympy = "*" -triton = {version = "2.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -typing-extensions = "*" - -[package.extras] -opt-einsum = ["opt-einsum (>=3.3)"] - -[[package]] -name = "torchaudio" -version = "2.0.1" -description = "An audio package for PyTorch" -optional = false -python-versions = "*" -files = [ - {file = "torchaudio-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5d21ebbb55e7040d418d5062b0e882f9660d68b477b38fd436fa6c92ccbb52a"}, - {file = "torchaudio-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dbcd93b29d71a2f500f36a34ea5e467f510f773da85322098e6bdd8c9dc9948"}, - {file = "torchaudio-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:5fdaba10ff06d098d603d9eb8d2ff541c3f3fe28ba178a78787190cec0d5187f"}, - {file = "torchaudio-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:6419199c773c5045c594ff950d5e5dbbfa6c830892ec09721d4ed8704b702bfd"}, - {file = "torchaudio-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:a5c81e480e5dcdcba065af1e3e31678ac29518991f00260094d37a39e63d76e5"}, - {file = "torchaudio-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e2a047675493c0aa258fec621ef40e8b01abe3d8dbc872152e4b5998418aa3c5"}, - {file = "torchaudio-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:91a28e587f708a03320eddbcc4a7dd1ad7150b3d4846b6c1557d85cc89a8d06c"}, - {file = "torchaudio-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:ba7740d98f601218ff667598ab3d9dab5f326878374fcb52d656f4ff033b9e96"}, - {file = "torchaudio-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:f401b192921c8b77cc5e478ede589b256dba463f1cee91172ecb376fea45a288"}, - {file = "torchaudio-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:0ef6754cf75ca5fd5117cb6243a6cf33552d67e9af0075aa6954b2c34bbf1036"}, - {file = "torchaudio-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:022ca1baa4bb819b78343bd47b57ff6dc6f9fc19fa4ef269946aadf7e62db3c0"}, - {file = "torchaudio-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a153ad5cdb62de8ec9fd1360a0d080bbaf39d578ae04e788db211571e675b7e0"}, - {file = "torchaudio-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:aa7897774ab4156d0b72f7078b823ebc1371ee24c50df965447782889552367a"}, - {file = "torchaudio-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:48d133593cddfe0424a350b566d54065bf6fe7469654de7add2f11b3ef03c5d9"}, - {file = "torchaudio-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:ac65eb067feee435debba81adfe8337fa007a06de6508c0d80261c5562b6d098"}, - {file = "torchaudio-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e3c6c8f9ea9f0e2df7a0b9375b0dcf955906e38fc12fab542b72a861564af8e7"}, - {file = "torchaudio-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1d0cf0779a334ec1861e9fa28bceb66a633c42e8f6b3322e2e37ff9f20d0ae81"}, - {file = "torchaudio-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:ab7acd2b5d351a2c65e4d935bb90b9256382bed93df57ee177bdbbe31c3cc984"}, - {file = "torchaudio-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:77b953fd7278773269a9477315b8998ae7e5011cc4b2907e0df18162327482f1"}, - {file = "torchaudio-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:c01bcea9d4c4a6616452e6cbd44d55913d8e6dee58191b925f35d46a2bf6e71b"}, -] - -[package.dependencies] -torch = "2.0.0" - -[[package]] -name = "torchgen" -version = "0.0.1" -description = "Ready to use implementations of state-of-the-art generative models in PyTorch" -optional = false -python-versions = ">=3.7, <4" -files = [ - {file = "torchgen-0.0.1-py3-none-any.whl", hash = "sha256:78d02b5e4ea0231ce46b4262564a05a9cb2047fcfcdcf4a4ab56230a0f21be66"}, -] - -[[package]] -name = "tornado" -version = "6.3.2" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false -python-versions = ">= 3.8" -files = [ - {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c367ab6c0393d71171123ca5515c61ff62fe09024fa6bf299cd1339dc9456829"}, - {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b46a6ab20f5c7c1cb949c72c1994a4585d2eaa0be4853f50a03b5031e964fc7c"}, - {file = "tornado-6.3.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2de14066c4a38b4ecbbcd55c5cc4b5340eb04f1c5e81da7451ef555859c833f"}, - {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05615096845cf50a895026f749195bf0b10b8909f9be672f50b0fe69cba368e4"}, - {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b17b1cf5f8354efa3d37c6e28fdfd9c1c1e5122f2cb56dac121ac61baa47cbe"}, - {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:29e71c847a35f6e10ca3b5c2990a52ce38b233019d8e858b755ea6ce4dcdd19d"}, - {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:834ae7540ad3a83199a8da8f9f2d383e3c3d5130a328889e4cc991acc81e87a0"}, - {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6a0848f1aea0d196a7c4f6772197cbe2abc4266f836b0aac76947872cd29b411"}, - {file = "tornado-6.3.2-cp38-abi3-win32.whl", hash = "sha256:7efcbcc30b7c654eb6a8c9c9da787a851c18f8ccd4a5a3a95b05c7accfa068d2"}, - {file = "tornado-6.3.2-cp38-abi3-win_amd64.whl", hash = "sha256:0c325e66c8123c606eea33084976c832aa4e766b7dff8aedd7587ea44a604cdf"}, - {file = "tornado-6.3.2.tar.gz", hash = "sha256:4b927c4f19b71e627b13f3db2324e4ae660527143f9e1f2e2fb404f3a187e2ba"}, -] - -[[package]] -name = "tqdm" -version = "4.65.0" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, - {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["py-make (>=0.1.0)", "twine", "wheel"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - -[[package]] -name = "traitlets" -version = "5.9.0" -description = "Traitlets Python configuration system" -optional = false -python-versions = ">=3.7" -files = [ - {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"}, - {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"}, -] - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] - -[[package]] -name = "triton" -version = "2.0.0" -description = "A language and compiler for custom Deep Learning operations" -optional = false -python-versions = "*" -files = [ - {file = "triton-2.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38806ee9663f4b0f7cd64790e96c579374089e58f49aac4a6608121aa55e2505"}, - {file = "triton-2.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:226941c7b8595219ddef59a1fdb821e8c744289a132415ddd584facedeb475b1"}, - {file = "triton-2.0.0-1-cp36-cp36m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4c9fc8c89874bc48eb7e7b2107a9b8d2c0bf139778637be5bfccb09191685cfd"}, - {file = "triton-2.0.0-1-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d2684b6a60b9f174f447f36f933e9a45f31db96cb723723ecd2dcfd1c57b778b"}, - {file = "triton-2.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9d4978298b74fcf59a75fe71e535c092b023088933b2f1df933ec32615e4beef"}, - {file = "triton-2.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:74f118c12b437fb2ca25e1a04759173b517582fcf4c7be11913316c764213656"}, - {file = "triton-2.0.0-1-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9618815a8da1d9157514f08f855d9e9ff92e329cd81c0305003eb9ec25cc5add"}, - {file = "triton-2.0.0-1-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1aca3303629cd3136375b82cb9921727f804e47ebee27b2677fef23005c3851a"}, - {file = "triton-2.0.0-1-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e3e13aa8b527c9b642e3a9defcc0fbd8ffbe1c80d8ac8c15a01692478dc64d8a"}, - {file = "triton-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f05a7e64e4ca0565535e3d5d3405d7e49f9d308505bb7773d21fb26a4c008c2"}, - {file = "triton-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4b99ca3c6844066e516658541d876c28a5f6e3a852286bbc97ad57134827fd"}, - {file = "triton-2.0.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47b4d70dc92fb40af553b4460492c31dc7d3a114a979ffb7a5cdedb7eb546c08"}, - {file = "triton-2.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fedce6a381901b1547e0e7e1f2546e4f65dca6d91e2d8a7305a2d1f5551895be"}, - {file = "triton-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75834f27926eab6c7f00ce73aaf1ab5bfb9bec6eb57ab7c0bfc0a23fac803b4c"}, - {file = "triton-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0117722f8c2b579cd429e0bee80f7731ae05f63fe8e9414acd9a679885fcbf42"}, - {file = "triton-2.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcd9be5d0c2e45d2b7e6ddc6da20112b6862d69741576f9c3dbaf941d745ecae"}, - {file = "triton-2.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42a0d2c3fc2eab4ba71384f2e785fbfd47aa41ae05fa58bf12cb31dcbd0aeceb"}, - {file = "triton-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c47b72c72693198163ece9d90a721299e4fb3b8e24fd13141e384ad952724f"}, -] - -[package.dependencies] -cmake = "*" -filelock = "*" -lit = "*" -torch = "*" - -[package.extras] -tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"] -tutorials = ["matplotlib", "pandas", "tabulate"] - -[[package]] -name = "typing-extensions" -version = "4.5.0" -description = "Backported and Experimental Type Hints for Python 3.7+" -optional = false -python-versions = ">=3.7" -files = [ - {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, - {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, -] - -[[package]] -name = "tzdata" -version = "2023.3" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, -] - -[[package]] -name = "uc-micro-py" -version = "1.0.1" -description = "Micro subset of unicode data files for linkify-it-py projects." -optional = false -python-versions = ">=3.6" -files = [ - {file = "uc-micro-py-1.0.1.tar.gz", hash = "sha256:b7cdf4ea79433043ddfe2c82210208f26f7962c0cfbe3bacb05ee879a7fdb596"}, - {file = "uc_micro_py-1.0.1-py3-none-any.whl", hash = "sha256:316cfb8b6862a0f1d03540f0ae6e7b033ff1fa0ddbe60c12cbe0d4cec846a69f"}, -] - -[package.extras] -test = ["coverage", "pytest", "pytest-cov"] - -[[package]] -name = "urllib3" -version = "1.26.15" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -files = [ - {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, - {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "uvicorn" -version = "0.21.1" -description = "The lightning-fast ASGI server." -optional = false -python-versions = ">=3.7" -files = [ - {file = "uvicorn-0.21.1-py3-none-any.whl", hash = "sha256:e47cac98a6da10cd41e6fd036d472c6f58ede6c5dbee3dbee3ef7a100ed97742"}, - {file = "uvicorn-0.21.1.tar.gz", hash = "sha256:0fac9cb342ba099e0d582966005f3fdba5b0290579fed4a6266dc702ca7bb032"}, -] - -[package.dependencies] -click = ">=7.0" -h11 = ">=0.8" - -[package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] - -[[package]] -name = "websockets" -version = "11.0" -description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "websockets-11.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:269e3547877a6ca55f62acdf291b256b01bc3469535e892af36afd3e17de284a"}, - {file = "websockets-11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a4e03d2416c1dad16ccfab97c975192337c6481b07167c90221f1926893e1e"}, - {file = "websockets-11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4100dc8566ea3b9c0528dee73284be524ab053aebd77e3fc7439a90e0d57745b"}, - {file = "websockets-11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8e0505c556b2b48078291b300d930f2fb8ba81d1e36379b637c060cfa561ae4"}, - {file = "websockets-11.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d5bc68cec8269b4b52ab6d1d8690f56dba35f7bcb83a5487518406300f81cf1"}, - {file = "websockets-11.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:067ac1f6153fc5218afc4563491dcbdb7384895cfc588a0afee962ca77fe0b58"}, - {file = "websockets-11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:910c84c0cfe4f872905b6ebe1866c579582070331abcb7a58621935eca95c18a"}, - {file = "websockets-11.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:df0f7769450ca67a53182f917910e2b0b6dd3f8268f88cbfe54ee6be96812889"}, - {file = "websockets-11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fe23605f5c351773b6fb82fcf680549980d63e126fab5213ed875686c0cec25d"}, - {file = "websockets-11.0-cp310-cp310-win32.whl", hash = "sha256:eb2e7cd654a05c36fccf726385c64a0e1027997d05ba0859f4d84c3d87db1623"}, - {file = "websockets-11.0-cp310-cp310-win_amd64.whl", hash = "sha256:fb26c333751a1e3805ecc416a85dcfa3657676b185acd515fd6992f0cea898ef"}, - {file = "websockets-11.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4a939963bae1055f14976ef2cf53e797c1997f8835ca9cf23060afc3e7d6718"}, - {file = "websockets-11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d7fc189fb632f8b31af8a5b32105919662a1bbaac20912320482415b7fed9c96"}, - {file = "websockets-11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6e3cfc890f1326c95fd7d4cc50f2bd496d3f014fb2da36b4525a10f226be565d"}, - {file = "websockets-11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9093f4c72c23ed5e475970c6a37e77c4f3a8856223421b9eb405b9fb2170629f"}, - {file = "websockets-11.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5c56547f97bc76293522ccfcfbdde12442420f1a2c0218ff45d733a0030046df"}, - {file = "websockets-11.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffb406b4449d4fa41ebc47faa3b9153a082f6fe0e4a0891f596a5ddb69fdeccd"}, - {file = "websockets-11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8fad76be2c5e36fb3620ad507ac8004e9f358f5c4a9a1b756dbe7918d58884a0"}, - {file = "websockets-11.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:17eb1988d320e2f1f20e4a3523f1068a0bb08318ab123962fc99fd90c90ab0d6"}, - {file = "websockets-11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9873288db9c673a2ba9c0f7b59a130576c50fc75f3336a706fff686009c41631"}, - {file = "websockets-11.0-cp311-cp311-win32.whl", hash = "sha256:cf4ef6343478bf63098d3060fe06baf54d9c011b4b1b05e65e7957091cc87ef4"}, - {file = "websockets-11.0-cp311-cp311-win_amd64.whl", hash = "sha256:713cd5fc1fd40436495c90a259274e1a4a39416c65447a256434941ddaf2f424"}, - {file = "websockets-11.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:649ddddcbafd318d427b843425c92b1c035660c32507645c472c77356226cf07"}, - {file = "websockets-11.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:564c53d84b95da527e96778f2cc873ef186038924abee601f9e8f12ebda9ad46"}, - {file = "websockets-11.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66d8df2db9801063e4093efe01458b1705c9f76382ad32617c005eeeb201a730"}, - {file = "websockets-11.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcd876ed166a82d250fcf012b729315489e9d653cb659c2e013c19daba2eb8f"}, - {file = "websockets-11.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cb00963b49d343210ebbdbe69a35004fbecad73da2158e83d481cd2a6716cf19"}, - {file = "websockets-11.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3d6f7c2f822e439f47f3492ee3e48c87c7d134d619a42c6dba1a318504501bfb"}, - {file = "websockets-11.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c4b2ae9c0f1acec5d2f8000eb496eebb9db19055a63716ee166cf0694b945982"}, - {file = "websockets-11.0-cp37-cp37m-win32.whl", hash = "sha256:2b363e0f9b4247a0c7482e22c70ef39fb3259a14f7c0791c9200b93145f60b4b"}, - {file = "websockets-11.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3d372c3426f165a0a22be9250526b1cd12e3556e80b4b2afaa6fd6649c99b086"}, - {file = "websockets-11.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7eb914d37e0574246c63b995f9ca8d7bb7c2f2d53a8d4e9b00200ea856aa43c4"}, - {file = "websockets-11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a8717a5f3a00cde308e2971064bd5fcb14e0cc08f8234b97f4eb92b505ea95d4"}, - {file = "websockets-11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a17151722349c4af221616cca2f28e79237738bfbc53e7155240e2a8a7cc02f4"}, - {file = "websockets-11.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4b60686d9b2ba500847c045595eb5887f4cca7102b4615773b6f490aa611107"}, - {file = "websockets-11.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eededf25ef6b838e650eeeb1511804b82e9ece566fe6cdc11aa909d2992dcdaf"}, - {file = "websockets-11.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7587f339f016f0e1b0b6f013e98c83e382c5929774f2b8234c1b2d3f01dd1339"}, - {file = "websockets-11.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:26369646078e16e7364729ed3e3b1a4315ab1a22ca3c48b4e25dea48fcc1a881"}, - {file = "websockets-11.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:92f51fbe87381ff76c1791dd44d599152b400f1adfa8453613f1ff6857200ee7"}, - {file = "websockets-11.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b5bb04a77c326d727c0b986c37a76147916d79db95629267307d1be47788a020"}, - {file = "websockets-11.0-cp38-cp38-win32.whl", hash = "sha256:50ac95111009178e58b9a25aa51702cdaad4ed843b98eb9b58d69b323ccb224e"}, - {file = "websockets-11.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a4076cd6a3678def988668fc4b1779da598e1e5c9fa26319af5499f00c23e1c"}, - {file = "websockets-11.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:26559e8a385f71ce2a58f3bb1d005ddd3db7d3328ddbfbff1034f4039d46c4ec"}, - {file = "websockets-11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f10d283697dec8d91fa983eb8e217c9cac27bc1032057768129b89780009318e"}, - {file = "websockets-11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f74efe229e078bf5595e207e9a7b135ff37a10858263ed86be66003c4c98d47b"}, - {file = "websockets-11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f048c88bfcc5bf0e038630cfb970b2c479f913819fd9653db920eef3b105a2b1"}, - {file = "websockets-11.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceab6c1827fa14ad10c6b0806941d577b21d17012a3648787ac2b946182285b4"}, - {file = "websockets-11.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:817227e23897808c4bb621da7f57b1f83ee18345bdc44f5c9c1bbd3a094a73f6"}, - {file = "websockets-11.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6fdcc17348d8697c1f88bba38680cca94131f2a9db727a61fe067284e1e59e8d"}, - {file = "websockets-11.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8b21ad915b747075f29fe2fa5590111d98988d6730d2cd212acfe52bbe6a2545"}, - {file = "websockets-11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ae401ad881d5329062b9b2d8160f0b2a147430974f2a3f32e6cedadddc2d634"}, - {file = "websockets-11.0-cp39-cp39-win32.whl", hash = "sha256:ee84660927293f449760badfe010e06409edb99d72e1910e2e404d2eeff6990f"}, - {file = "websockets-11.0-cp39-cp39-win_amd64.whl", hash = "sha256:2b4e704a9dac1faf4994e63dceae9e2f504913ff0f865bd3e5a097cbd5874a8f"}, - {file = "websockets-11.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c2d6429c9bcd70ed8126a1f9ca6069e4ab95c96a3cc141fc84ce02917f7b45ec"}, - {file = "websockets-11.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff3f67567862a853af2c0db362ede8249be50c576cd9eaf380736c6fce840414"}, - {file = "websockets-11.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b86ce3d17bcc4b6556b2a2e1277beed74ff6b1de23f002f9763e9875e8ba361d"}, - {file = "websockets-11.0-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59c4b458cc09ea6470a5eee98b06ccaa84f2a193b92e337a879612614df0f8eb"}, - {file = "websockets-11.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e5e21aeb350906dfcff321bfa6c60541a1d05cadb6d431ecf9d6376365be60d4"}, - {file = "websockets-11.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8731189f6985b239a6c34a353c36b45cb3c9fed1c287fbcf7f61df9e4a7ac392"}, - {file = "websockets-11.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee3aa7660ae0d3a4e47517bb5a545b9a02ff7b9632a640f617e755990ef65f66"}, - {file = "websockets-11.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:027aabfd053715ce0f5f6fc5107e5093e05b3c94fa555fb65375aa09cb845a66"}, - {file = "websockets-11.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e8c729aa179ef105f096cad12070aef230be9e2ae509eb47c3cdd9257213c14"}, - {file = "websockets-11.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ff607c6e16409ac83f1ae59cc96167fead577bc652e8dff48f7458ce082372ff"}, - {file = "websockets-11.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ca3d7c08f472c40f28bb9fb99610d28dc97137612ab5308f80dac7ce79f87fe1"}, - {file = "websockets-11.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f179deef8288dd8ec227d644ba5b711609093b634008643561f6d9c74938c3c"}, - {file = "websockets-11.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:269d33f1573a31130da9afd63a2558f60131522d3fe86d0aa2d1612ad065d27c"}, - {file = "websockets-11.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb0b306c1180d0268341447982b415aca7c072c84b4a59688dbc1d7d2ec25df9"}, - {file = "websockets-11.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6ae209f11e433575e17d5d6e61a2f77ceda53b4bce07df55af614aa1d618e2e7"}, - {file = "websockets-11.0-py3-none-any.whl", hash = "sha256:6ebd971b9b2c0aaa2188c472016e4dad93108b3db425a33ad584bdc41b22026d"}, - {file = "websockets-11.0.tar.gz", hash = "sha256:19d638549c470f5fd3b67b52b2a08f2edba5a04e05323a706937e35f5f19d056"}, -] - -[[package]] -name = "werkzeug" -version = "2.2.3" -description = "The comprehensive WSGI web application library." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Werkzeug-2.2.3-py3-none-any.whl", hash = "sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"}, - {file = "Werkzeug-2.2.3.tar.gz", hash = "sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe"}, -] - -[package.dependencies] -MarkupSafe = ">=2.1.1" - -[package.extras] -watchdog = ["watchdog"] - -[[package]] -name = "wheel" -version = "0.40.0" -description = "A built-package format for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "wheel-0.40.0-py3-none-any.whl", hash = "sha256:d236b20e7cb522daf2390fa84c55eea81c5c30190f90f29ae2ca1ad8355bf247"}, - {file = "wheel-0.40.0.tar.gz", hash = "sha256:cd1196f3faee2b31968d626e1731c94f99cbdb67cf5a46e4f5656cbee7738873"}, -] - -[package.extras] -test = ["pytest (>=6.0.0)"] - -[[package]] -name = "yarl" -version = "1.8.2" -description = "Yet another URL library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bb81f753c815f6b8e2ddd2eef3c855cf7da193b82396ac013c661aaa6cc6b0a5"}, - {file = "yarl-1.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:47d49ac96156f0928f002e2424299b2c91d9db73e08c4cd6742923a086f1c863"}, - {file = "yarl-1.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3fc056e35fa6fba63248d93ff6e672c096f95f7836938241ebc8260e062832fe"}, - {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58a3c13d1c3005dbbac5c9f0d3210b60220a65a999b1833aa46bd6677c69b08e"}, - {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10b08293cda921157f1e7c2790999d903b3fd28cd5c208cf8826b3b508026996"}, - {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de986979bbd87272fe557e0a8fcb66fd40ae2ddfe28a8b1ce4eae22681728fef"}, - {file = "yarl-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c4fcfa71e2c6a3cb568cf81aadc12768b9995323186a10827beccf5fa23d4f8"}, - {file = "yarl-1.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae4d7ff1049f36accde9e1ef7301912a751e5bae0a9d142459646114c70ecba6"}, - {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bf071f797aec5b96abfc735ab97da9fd8f8768b43ce2abd85356a3127909d146"}, - {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:74dece2bfc60f0f70907c34b857ee98f2c6dd0f75185db133770cd67300d505f"}, - {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:df60a94d332158b444301c7f569659c926168e4d4aad2cfbf4bce0e8fb8be826"}, - {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:63243b21c6e28ec2375f932a10ce7eda65139b5b854c0f6b82ed945ba526bff3"}, - {file = "yarl-1.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cfa2bbca929aa742b5084fd4663dd4b87c191c844326fcb21c3afd2d11497f80"}, - {file = "yarl-1.8.2-cp310-cp310-win32.whl", hash = "sha256:b05df9ea7496df11b710081bd90ecc3a3db6adb4fee36f6a411e7bc91a18aa42"}, - {file = "yarl-1.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:24ad1d10c9db1953291f56b5fe76203977f1ed05f82d09ec97acb623a7976574"}, - {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2a1fca9588f360036242f379bfea2b8b44cae2721859b1c56d033adfd5893634"}, - {file = "yarl-1.8.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f37db05c6051eff17bc832914fe46869f8849de5b92dc4a3466cd63095d23dfd"}, - {file = "yarl-1.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77e913b846a6b9c5f767b14dc1e759e5aff05502fe73079f6f4176359d832581"}, - {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0978f29222e649c351b173da2b9b4665ad1feb8d1daa9d971eb90df08702668a"}, - {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388a45dc77198b2460eac0aca1efd6a7c09e976ee768b0d5109173e521a19daf"}, - {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2305517e332a862ef75be8fad3606ea10108662bc6fe08509d5ca99503ac2aee"}, - {file = "yarl-1.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42430ff511571940d51e75cf42f1e4dbdded477e71c1b7a17f4da76c1da8ea76"}, - {file = "yarl-1.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3150078118f62371375e1e69b13b48288e44f6691c1069340081c3fd12c94d5b"}, - {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c15163b6125db87c8f53c98baa5e785782078fbd2dbeaa04c6141935eb6dab7a"}, - {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4d04acba75c72e6eb90745447d69f84e6c9056390f7a9724605ca9c56b4afcc6"}, - {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e7fd20d6576c10306dea2d6a5765f46f0ac5d6f53436217913e952d19237efc4"}, - {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:75c16b2a900b3536dfc7014905a128a2bea8fb01f9ee26d2d7d8db0a08e7cb2c"}, - {file = "yarl-1.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6d88056a04860a98341a0cf53e950e3ac9f4e51d1b6f61a53b0609df342cc8b2"}, - {file = "yarl-1.8.2-cp311-cp311-win32.whl", hash = "sha256:fb742dcdd5eec9f26b61224c23baea46c9055cf16f62475e11b9b15dfd5c117b"}, - {file = "yarl-1.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:8c46d3d89902c393a1d1e243ac847e0442d0196bbd81aecc94fcebbc2fd5857c"}, - {file = "yarl-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ceff9722e0df2e0a9e8a79c610842004fa54e5b309fe6d218e47cd52f791d7ef"}, - {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f6b4aca43b602ba0f1459de647af954769919c4714706be36af670a5f44c9c1"}, - {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1684a9bd9077e922300ecd48003ddae7a7474e0412bea38d4631443a91d61077"}, - {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebb78745273e51b9832ef90c0898501006670d6e059f2cdb0e999494eb1450c2"}, - {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3adeef150d528ded2a8e734ebf9ae2e658f4c49bf413f5f157a470e17a4a2e89"}, - {file = "yarl-1.8.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a7c87927a468e5a1dc60c17caf9597161d66457a34273ab1760219953f7f4c"}, - {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:efff27bd8cbe1f9bd127e7894942ccc20c857aa8b5a0327874f30201e5ce83d0"}, - {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a783cd344113cb88c5ff7ca32f1f16532a6f2142185147822187913eb989f739"}, - {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:705227dccbe96ab02c7cb2c43e1228e2826e7ead880bb19ec94ef279e9555b5b"}, - {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:34c09b43bd538bf6c4b891ecce94b6fa4f1f10663a8d4ca589a079a5018f6ed7"}, - {file = "yarl-1.8.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a48f4f7fea9a51098b02209d90297ac324241bf37ff6be6d2b0149ab2bd51b37"}, - {file = "yarl-1.8.2-cp37-cp37m-win32.whl", hash = "sha256:0414fd91ce0b763d4eadb4456795b307a71524dbacd015c657bb2a39db2eab89"}, - {file = "yarl-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:d881d152ae0007809c2c02e22aa534e702f12071e6b285e90945aa3c376463c5"}, - {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5df5e3d04101c1e5c3b1d69710b0574171cc02fddc4b23d1b2813e75f35a30b1"}, - {file = "yarl-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a66c506ec67eb3159eea5096acd05f5e788ceec7b96087d30c7d2865a243918"}, - {file = "yarl-1.8.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2b4fa2606adf392051d990c3b3877d768771adc3faf2e117b9de7eb977741229"}, - {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e21fb44e1eff06dd6ef971d4bdc611807d6bd3691223d9c01a18cec3677939e"}, - {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93202666046d9edadfe9f2e7bf5e0782ea0d497b6d63da322e541665d65a044e"}, - {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc77086ce244453e074e445104f0ecb27530d6fd3a46698e33f6c38951d5a0f1"}, - {file = "yarl-1.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dd68a92cab699a233641f5929a40f02a4ede8c009068ca8aa1fe87b8c20ae3"}, - {file = "yarl-1.8.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b372aad2b5f81db66ee7ec085cbad72c4da660d994e8e590c997e9b01e44901"}, - {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e6f3515aafe0209dd17fb9bdd3b4e892963370b3de781f53e1746a521fb39fc0"}, - {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dfef7350ee369197106805e193d420b75467b6cceac646ea5ed3049fcc950a05"}, - {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:728be34f70a190566d20aa13dc1f01dc44b6aa74580e10a3fb159691bc76909d"}, - {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ff205b58dc2929191f68162633d5e10e8044398d7a45265f90a0f1d51f85f72c"}, - {file = "yarl-1.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baf211dcad448a87a0d9047dc8282d7de59473ade7d7fdf22150b1d23859f946"}, - {file = "yarl-1.8.2-cp38-cp38-win32.whl", hash = "sha256:272b4f1599f1b621bf2aabe4e5b54f39a933971f4e7c9aa311d6d7dc06965165"}, - {file = "yarl-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:326dd1d3caf910cd26a26ccbfb84c03b608ba32499b5d6eeb09252c920bcbe4f"}, - {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f8ca8ad414c85bbc50f49c0a106f951613dfa5f948ab69c10ce9b128d368baf8"}, - {file = "yarl-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:418857f837347e8aaef682679f41e36c24250097f9e2f315d39bae3a99a34cbf"}, - {file = "yarl-1.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ae0eec05ab49e91a78700761777f284c2df119376e391db42c38ab46fd662b77"}, - {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:009a028127e0a1755c38b03244c0bea9d5565630db9c4cf9572496e947137a87"}, - {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3edac5d74bb3209c418805bda77f973117836e1de7c000e9755e572c1f7850d0"}, - {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da65c3f263729e47351261351b8679c6429151ef9649bba08ef2528ff2c423b2"}, - {file = "yarl-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef8fb25e52663a1c85d608f6dd72e19bd390e2ecaf29c17fb08f730226e3a08"}, - {file = "yarl-1.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcd7bb1e5c45274af9a1dd7494d3c52b2be5e6bd8d7e49c612705fd45420b12d"}, - {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44ceac0450e648de86da8e42674f9b7077d763ea80c8ceb9d1c3e41f0f0a9951"}, - {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:97209cc91189b48e7cfe777237c04af8e7cc51eb369004e061809bcdf4e55220"}, - {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:48dd18adcf98ea9cd721a25313aef49d70d413a999d7d89df44f469edfb38a06"}, - {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e59399dda559688461762800d7fb34d9e8a6a7444fd76ec33220a926c8be1516"}, - {file = "yarl-1.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d617c241c8c3ad5c4e78a08429fa49e4b04bedfc507b34b4d8dceb83b4af3588"}, - {file = "yarl-1.8.2-cp39-cp39-win32.whl", hash = "sha256:cb6d48d80a41f68de41212f3dfd1a9d9898d7841c8f7ce6696cf2fd9cb57ef83"}, - {file = "yarl-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:6604711362f2dbf7160df21c416f81fac0de6dbcf0b5445a2ef25478ecc4c778"}, - {file = "yarl-1.8.2.tar.gz", hash = "sha256:49d43402c6e3013ad0978602bf6bf5328535c48d192304b91b97a3c6790b1562"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - -[[package]] -name = "zipp" -version = "3.15.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.7" -files = [ - {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, - {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[metadata] -lock-version = "2.0" -python-versions = "^3.8" -content-hash = "400ac506bf8f14333fa2e073fd39cc765a1941aab895d5ed6f9dd264146fc726" diff --git a/pretrained/.gitignore b/pretrained/.gitignore deleted file mode 100644 index d6b7ef32c..000000000 --- a/pretrained/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore diff --git a/pretrained_v2/.gitignore b/pretrained_v2/.gitignore deleted file mode 100644 index d6b7ef32c..000000000 --- a/pretrained_v2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 5b1525835..000000000 --- a/pyproject.toml +++ /dev/null @@ -1,62 +0,0 @@ -[tool.poetry] -name = "rvc-beta" -version = "0.1.0" -description = "" -authors = ["lj1995"] -license = "MIT" - -[tool.poetry.dependencies] -python = "^3.8" -torch = "^2.0.0" -torchaudio = "^2.0.1" -Cython = "^0.29.34" -gradio = "^3.34.0" -future = "^0.18.3" -pydub = "^0.25.1" -soundfile = "^0.12.1" -ffmpeg-python = "^0.2.0" -tensorboardX = "^2.6" -functorch = "^2.0.0" -fairseq = "^0.12.2" -faiss-cpu = "^1.7.2" -Jinja2 = "^3.1.2" -json5 = "^0.9.11" -librosa = "0.9.2" -llvmlite = "0.39.0" -Markdown = "^3.4.3" -matplotlib = "^3.7.1" -matplotlib-inline = "^0.1.6" -numba = "0.56.4" -numpy = "1.23.5" -scipy = "1.9.3" -praat-parselmouth = "^0.4.3" -Pillow = "9.3.0" -pyworld = "^0.3.2" -resampy = "^0.4.2" -scikit-learn = "^1.2.2" -starlette = "^0.27.0" -tensorboard = "^2.12.1" -tensorboard-data-server = "^0.7.0" -tensorboard-plugin-wit = "^1.8.1" -torchgen = "^0.0.1" -tqdm = "^4.65.0" -tornado = "^6.3" -Werkzeug = "^2.2.3" -uc-micro-py = "^1.0.1" -sympy = "^1.11.1" -tabulate = "^0.9.0" -PyYAML = "^6.0" -pyasn1 = "^0.4.8" -pyasn1-modules = "^0.2.8" -fsspec = "^2023.3.0" -absl-py = "^1.4.0" -audioread = "^3.0.0" -uvicorn = "^0.21.1" -colorama = "^0.4.6" -torchcrepe = "0.0.20" - -[tool.poetry.dev-dependencies] - -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" diff --git a/requirements-gpu.txt b/requirements-gpu.txt deleted file mode 100644 index dc86f099e..000000000 --- a/requirements-gpu.txt +++ /dev/null @@ -1,46 +0,0 @@ -tornado>=6.1 -setuptools -pydantic -wheel -google-auth-oauthlib -httpx==0.23.0 -fairseq==0.12.2 -tensorboardX -faiss_cpu==1.7.3 -ffmpeg_python==0.2.0 -ffmpy==0.3.1 -websockets>=10.0 -gradio==3.34.0 -librosa==0.9.1 -elevenlabs -gTTS==2.3.2 -wget -matplotlib==3.7.2 -mega.py==1.0.8 -gdown -noisereduce==2.0.1 -unidecode -numba==0.57.1 -numpy==1.23.5 -onnxruntime -onnxruntime_gpu==1.15.1 -opencv_python==4.8.0.74 -opencv_python_headless==4.8.0.74 -pandas==2.0.3 -praat-parselmouth==0.4.2 -PySimpleGUI==4.60.5 -pyworld==0.3.3 -requests==2.31.0 -resampy==0.4.2 -scikit_learn==1.3.0 -scipy==1.11.1 -yt_dlp==2023.7.6 -pyngrok==4.1.12 -sounddevice==0.4.6 -soundfile==0.12.1 -tb_nightly==2.14.0a20230803 -tensorboard==2.13.0 -torchcrepe -torch_directml==0.2.0.dev230426 -torchgen>=0.0.1 -tqdm==4.65.0 diff --git a/requirements-win-for-realtime_vc_gui.txt b/requirements-win-for-realtime_vc_gui.txt deleted file mode 100644 index 9d6935bfe..000000000 --- a/requirements-win-for-realtime_vc_gui.txt +++ /dev/null @@ -1,29 +0,0 @@ -#1.Install torch from pytorch.org: -#torch 2.0 with cuda 11.8 -#pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -#torch 1.11.0 with cuda 11.3 -#pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 -einops -fairseq -flask -flask_cors -gin -gin_config -librosa -local_attention -matplotlib -praat-parselmouth -pyworld -PyYAML -resampy -scikit_learn -scipy -SoundFile -tensorboard -tqdm -wave -PySimpleGUI -sounddevice -gradio -noisereduce -torchcrepe==0.0.20 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index c12d5f6e2..000000000 --- a/requirements.txt +++ /dev/null @@ -1,49 +0,0 @@ -tornado>=6.1 -setuptools -pydantic -wheel -google-auth-oauthlib -httpx==0.23.0 -fairseq==0.12.2 -tensorboardX -faiss_cpu==1.7.3 -ffmpeg_python==0.2.0 -ffmpy==0.3.1 -websockets>=10.0 -gradio==3.34.0 -librosa==0.9.1 -elevenlabs -gTTS==2.3.2 -wget -matplotlib==3.7.2 -mega.py==1.0.8 -gdown -noisereduce==2.0.1 -unidecode -numba==0.57.1 -numpy==1.23.5 -onnxruntime -onnxruntime_gpu==1.15.1 -opencv_python==4.8.0.74 -opencv_python_headless==4.8.0.74 -pandas==2.0.3 -praat-parselmouth==0.4.2 -PySimpleGUI==4.60.5 -pyworld==0.3.3 -requests==2.31.0 -resampy==0.4.2 -scikit_learn==1.3.0 -scipy==1.11.1 -yt_dlp==2023.7.6 -pyngrok==4.1.12 -sounddevice==0.4.6 -soundfile==0.12.1 -tensorboard==2.13.0 -tb_nightly==2.14.0a20230803 -torch==2.0.0 -torchcrepe==0.0.21 -torch_directml==0.2.0.dev230426 -torchaudio==2.0.1 -torchvision==0.15.1 -torchgen>=0.0.1 -tqdm==4.65.0 diff --git a/rmvpe.py b/rmvpe.py deleted file mode 100644 index 8ad0f4e2f..000000000 --- a/rmvpe.py +++ /dev/null @@ -1,434 +0,0 @@ -import sys, torch, numpy as np, traceback, pdb -import torch.nn as nn -from time import time as ttime -import torch.nn.functional as F - - -class BiGRU(nn.Module): - def __init__(self, input_features, hidden_features, num_layers): - super(BiGRU, self).__init__() - self.gru = nn.GRU( - input_features, - hidden_features, - num_layers=num_layers, - batch_first=True, - bidirectional=True, - ) - - def forward(self, x): - return self.gru(x)[0] - - -class ConvBlockRes(nn.Module): - def __init__(self, in_channels, out_channels, momentum=0.01): - super(ConvBlockRes, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - nn.Conv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - if in_channels != out_channels: - self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False - - def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: - return self.conv(x) + x - - -class Encoder(nn.Module): - def __init__( - self, - in_channels, - in_size, - n_encoders, - kernel_size, - n_blocks, - out_channels=16, - momentum=0.01, - ): - super(Encoder, self).__init__() - self.n_encoders = n_encoders - self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) - self.layers = nn.ModuleList() - self.latent_channels = [] - for i in range(self.n_encoders): - self.layers.append( - ResEncoderBlock( - in_channels, out_channels, kernel_size, n_blocks, momentum=momentum - ) - ) - self.latent_channels.append([out_channels, in_size]) - in_channels = out_channels - out_channels *= 2 - in_size //= 2 - self.out_size = in_size - self.out_channel = out_channels - - def forward(self, x): - concat_tensors = [] - x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) - return x, concat_tensors - - -class ResEncoderBlock(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 - ): - super(ResEncoderBlock, self).__init__() - self.n_blocks = n_blocks - self.conv = nn.ModuleList() - self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) - self.kernel_size = kernel_size - if self.kernel_size is not None: - self.pool = nn.AvgPool2d(kernel_size=kernel_size) - - def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) - if self.kernel_size is not None: - return x, self.pool(x) - else: - return x - - -class Intermediate(nn.Module): # - def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): - super(Intermediate, self).__init__() - self.n_inters = n_inters - self.layers = nn.ModuleList() - self.layers.append( - ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) - ) - for i in range(self.n_inters - 1): - self.layers.append( - ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) - ) - - def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) - return x - - -class ResDecoderBlock(nn.Module): - def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): - super(ResDecoderBlock, self).__init__() - out_padding = (0, 1) if stride == (1, 2) else (1, 1) - self.n_blocks = n_blocks - self.conv1 = nn.Sequential( - nn.ConvTranspose2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=stride, - padding=(1, 1), - output_padding=out_padding, - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - self.conv2 = nn.ModuleList() - self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) - - def forward(self, x, concat_tensor): - x = self.conv1(x) - x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) - return x - - -class Decoder(nn.Module): - def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): - super(Decoder, self).__init__() - self.layers = nn.ModuleList() - self.n_decoders = n_decoders - for i in range(self.n_decoders): - out_channels = in_channels // 2 - self.layers.append( - ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) - ) - in_channels = out_channels - - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) - return x - - -class DeepUnet(nn.Module): - def __init__( - self, - kernel_size, - n_blocks, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(DeepUnet, self).__init__() - self.encoder = Encoder( - in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels - ) - self.intermediate = Intermediate( - self.encoder.out_channel // 2, - self.encoder.out_channel, - inter_layers, - n_blocks, - ) - self.decoder = Decoder( - self.encoder.out_channel, en_de_layers, kernel_size, n_blocks - ) - - def forward(self, x): - x, concat_tensors = self.encoder(x) - x = self.intermediate(x) - x = self.decoder(x, concat_tensors) - return x - - -class E2E(nn.Module): - def __init__( - self, - n_blocks, - n_gru, - kernel_size, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(E2E, self).__init__() - self.unet = DeepUnet( - kernel_size, - n_blocks, - en_de_layers, - inter_layers, - in_channels, - en_out_channels, - ) - self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) - if n_gru: - self.fc = nn.Sequential( - BiGRU(3 * 128, 256, n_gru), - nn.Linear(512, 360), - nn.Dropout(0.25), - nn.Sigmoid(), - ) - else: - self.fc = nn.Sequential( - nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid() - ) - - def forward(self, mel): - mel = mel.transpose(-1, -2).unsqueeze(1) - x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) - x = self.fc(x) - return x - - -from librosa.filters import mel - - -class MelSpectrogram(torch.nn.Module): - def __init__( - self, - is_half, - n_mel_channels, - sampling_rate, - win_length, - hop_length, - n_fft=None, - mel_fmin=0, - mel_fmax=None, - clamp=1e-5, - ): - super().__init__() - n_fft = win_length if n_fft is None else n_fft - self.hann_window = {} - mel_basis = mel( - sr=sampling_rate, - n_fft=n_fft, - n_mels=n_mel_channels, - fmin=mel_fmin, - fmax=mel_fmax, - htk=True, - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - self.n_fft = win_length if n_fft is None else n_fft - self.hop_length = hop_length - self.win_length = win_length - self.sampling_rate = sampling_rate - self.n_mel_channels = n_mel_channels - self.clamp = clamp - self.is_half = is_half - - def forward(self, audio, keyshift=0, speed=1, center=True): - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(self.n_fft * factor)) - win_length_new = int(np.round(self.win_length * factor)) - hop_length_new = int(np.round(self.hop_length * speed)) - keyshift_key = str(keyshift) + "_" + str(audio.device) - if keyshift_key not in self.hann_window: - self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - audio.device - ) - fft = torch.stft( - audio, - n_fft=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window=self.hann_window[keyshift_key], - center=center, - return_complex=True, - ) - magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - if keyshift != 0: - size = self.n_fft // 2 + 1 - resize = magnitude.size(1) - if resize < size: - magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) - magnitude = magnitude[:, :size, :] * self.win_length / win_length_new - mel_output = torch.matmul(self.mel_basis, magnitude) - if self.is_half == True: - mel_output = mel_output.half() - log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - return log_mel_spec - - -class RMVPE: - def __init__(self, model_path, is_half, device=None): - self.resample_kernel = {} - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model - self.resample_kernel = {} - self.is_half = is_half - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.device = device - self.mel_extractor = MelSpectrogram( - is_half, 128, 16000, 1024, 160, None, 30, 8000 - ).to(device) - self.model = self.model.to(device) - cents_mapping = 20 * np.arange(360) + 1997.3794084376191 - self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 - - def mel2hidden(self, mel): - with torch.no_grad(): - n_frames = mel.shape[-1] - mel = F.pad( - mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect" - ) - hidden = self.model(mel) - return hidden[:, :n_frames] - - def decode(self, hidden, thred=0.03): - cents_pred = self.to_local_average_cents(hidden, thred=thred) - f0 = 10 * (2 ** (cents_pred / 1200)) - f0[f0 == 10] = 0 - # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred]) - return f0 - - def infer_from_audio(self, audio, thred=0.03): - audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) - mel = self.mel_extractor(audio, center=True) - hidden = self.mel2hidden(mel) - hidden = hidden.squeeze(0).cpu().numpy() - if self.is_half == True: - hidden = hidden.astype("float32") - f0 = self.decode(hidden, thred=thred) - return f0 - - def infer_from_audio_with_pitch(self, audio, thred=0.03, f0_min=50, f0_max=1100): - audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) - mel = self.mel_extractor(audio, center=True) - hidden = self.mel2hidden(mel) - hidden = hidden.squeeze(0).cpu().numpy() - if self.is_half == True: - hidden = hidden.astype("float32") - f0 = self.decode(hidden, thred=thred) - f0[(f0 < f0_min) | (f0 > f0_max)] = 0 - return f0 - - def to_local_average_cents(self, salience, thred=0.05): - # t0 = ttime() - center = np.argmax(salience, axis=1) # 帧长#index - salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368 - # t1 = ttime() - center += 4 - todo_salience = [] - todo_cents_mapping = [] - starts = center - 4 - ends = center + 5 - for idx in range(salience.shape[0]): - todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) - todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) - # t2 = ttime() - todo_salience = np.array(todo_salience) # 帧长,9 - todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9 - product_sum = np.sum(todo_salience * todo_cents_mapping, 1) - weight_sum = np.sum(todo_salience, 1) # 帧长 - devided = product_sum / weight_sum # 帧长 - # t3 = ttime() - maxx = np.max(salience, axis=1) # 帧长 - devided[maxx <= thred] = 0 - # t4 = ttime() - # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) - return devided - - -# if __name__ == '__main__': -# audio, sampling_rate = sf.read("卢本伟语录~1.wav") -# if len(audio.shape) > 1: -# audio = librosa.to_mono(audio.transpose(1, 0)) -# audio_bak = audio.copy() -# if sampling_rate != 16000: -# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) -# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt" -# thred = 0.03 # 0.01 -# device = 'cuda' if torch.cuda.is_available() else 'cpu' -# rmvpe = RMVPE(model_path,is_half=False, device=device) -# t0=ttime() -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# t1=ttime() -# print(f0.shape,t1-t0) diff --git a/run.sh b/run.sh deleted file mode 100644 index 61169ba0f..000000000 --- a/run.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash - -# Define common paths for Homebrew -BREW_PATHS=( - "/usr/local/bin" - "/opt/homebrew/bin" -) - -if [[ "$(uname)" == "Darwin" ]]; then - # macOS specific env: - export PYTORCH_ENABLE_MPS_FALLBACK=1 - export PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 -elif [[ "$(uname)" != "Linux" ]]; then - echo "Unsupported operating system." - exit 1 -fi - -requirements_file="requirements.txt" - -# Function to add a path to PATH -add_to_path() { - echo "Homebrew found in $1, which is not in your PATH." - read -p "Do you want to add this path to your PATH? (y/n) " -n 1 -r - echo - if [[ $REPLY =~ ^[Yy]$ ]]; then - echo "Adding $1 to PATH..." - - # Detect the shell and choose the right profile file - local shell_profile - if [[ $SHELL == *"/bash"* ]]; then - shell_profile="$HOME/.bashrc" - [[ ! -f "$shell_profile" ]] && shell_profile="$HOME/.bash_profile" - elif [[ $SHELL == *"/zsh"* ]]; then - shell_profile="$HOME/.zshrc" - else - echo "Unsupported shell. Please add the following line to your shell profile file manually:" - echo "export PATH=\"$PATH:$1\"" - return - fi - - # Add the export line to the shell profile file - echo "export PATH=\"$PATH:$1\"" >> "$shell_profile" - - # Source the shell profile file - source "$shell_profile" - - # Verify that the new PATH includes Homebrew - if ! command -v brew &> /dev/null; then - echo "Failed to add Homebrew to the PATH." - fi - fi -} - -# Check if Homebrew is in PATH -if command -v brew &> /dev/null; then - echo "Homebrew is already in your PATH." -else - # If not, check common paths for Homebrew - echo "Homebrew not found in PATH. Checking common paths..." - for path in "${BREW_PATHS[@]}"; do - if [[ -x "$path/brew" ]]; then - add_to_path "$path" - break - fi - done -fi - -# Check again if Homebrew is in PATH -if ! command -v brew &> /dev/null; then - echo "Homebrew still not found. Attempting to install..." - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - - # Check again if Homebrew is in PATH - if ! command -v brew &> /dev/null; then - echo "Homebrew not found in PATH even after installation. Checking common paths again..." - for path in "${BREW_PATHS[@]}"; do - if [[ -x "$path/brew" ]]; then - echo "Found post-install homebrew, adding to PATH...." - add_to_path "$path" - break - fi - done - fi -fi - -# Verifying if Homebrew has been installed successfully -if command -v brew &> /dev/null; then - echo "Homebrew installed successfully." -else - echo "Homebrew installation failed." - exit 1 -fi - -# Installing ffmpeg with Homebrew -if [[ "$(uname)" == "Darwin" ]]; then - echo "Installing ffmpeg..." - brew install ffmpeg -fi - -# Check if Python 3.8 is installed -if ! command -v python3.8 &> /dev/null; then - echo "Python 3.8 not found. Attempting to install..." - if [[ "$(uname)" == "Darwin" ]] && command -v brew &> /dev/null; then - brew install python@3.8 - elif [[ "$(uname)" == "Linux" ]] && command -v apt-get &> /dev/null; then - sudo apt-get update - sudo apt-get install python3.8 - else - echo "Please install Python 3.8 manually." - exit 1 - fi -fi - -# Check if required packages are installed and install them if not -if [ -f "${requirements_file}" ]; then - installed_packages=$(python3.8 -m pip list --format=freeze) - while IFS= read -r package; do - [[ "${package}" =~ ^#.* ]] && continue - package_name=$(echo "${package}" | sed 's/[<>=!].*//') - if ! echo "${installed_packages}" | grep -q "${package_name}"; then - echo "${package_name} not found. Attempting to install..." - python3.8 -m pip install --upgrade "${package}" - fi - done < "${requirements_file}" -else - echo "${requirements_file} not found. Please ensure the requirements file with required packages exists." - exit 1 -fi - -# Install onnxruntime package -echo "Installing onnxruntime..." -python3.8 -m pip install onnxruntime - -download_if_not_exists() { - local filename=$1 - local url=$2 - if [ ! -f "$filename" ]; then - echo "$filename does not exist, downloading..." - curl -# -L -o "$filename" "$url" - echo "Download finished." - else - echo "$filename already exists." - fi -} - -# Check and download hubert_base.pt -download_if_not_exists "hubert_base.pt" "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt" - -# Check and download rmvpe.pt -download_if_not_exists "rmvpe.pt" "https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt" - -# Run the main script -python3.8 infer-web.py --pycmd python3.8 diff --git a/rvc_for_realtime.py b/rvc_for_realtime.py deleted file mode 100644 index 74af22b54..000000000 --- a/rvc_for_realtime.py +++ /dev/null @@ -1,297 +0,0 @@ -import faiss, torch, traceback, parselmouth, numpy as np, torchcrepe, torch.nn as nn, pyworld -from fairseq import checkpoint_utils -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -import os, sys -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal - -now_dir = os.getcwd() -sys.path.append(now_dir) -from config import Config -from multiprocessing import Manager as M - -mm = M() -config = Config() - - -class RVC: - def __init__( - self, key, pth_path, index_path, index_rate, n_cpu, inp_q, opt_q, device - ) -> None: - """ - 初始化 - """ - try: - global config - self.inp_q = inp_q - self.opt_q = opt_q - self.device = device - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.sr = 16000 - self.window = 160 - self.n_cpu = n_cpu - if index_rate != 0: - self.index = faiss.read_index(index_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - self.model = hubert_model - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - self.is_half = config.is_half - except: - print(traceback.format_exc()) - - def get_f0_post(self, f0): - f0_min = self.f0_min - f0_max = self.f0_max - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int_) - return f0_coarse, f0bak - - def get_f0(self, x, f0_up_key, n_cpu, method="harvest"): - n_cpu = int(n_cpu) - if method == "crepe": - return self.get_f0_crepe(x, f0_up_key) - if method == "rmvpe": - return self.get_f0_rmvpe(x, f0_up_key) - if method == "pm": - p_len = x.shape[0] // 160 - f0 = ( - parselmouth.Sound(x, 16000) - .to_pitch_ac( - time_step=0.01, - voicing_threshold=0.6, - pitch_floor=50, - pitch_ceiling=1100, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - print(pad_size, p_len - len(f0) - pad_size) - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - if n_cpu == 1: - f0, t = pyworld.harvest( - x.astype(np.double), - fs=16000, - f0_ceil=1100, - f0_floor=50, - frame_period=10, - ) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - f0bak = np.zeros(x.shape[0] // 160, dtype=np.float64) - length = len(x) - part_length = int(length / n_cpu / 160) * 160 - ts = ttime() - res_f0 = mm.dict() - for idx in range(n_cpu): - tail = part_length * (idx + 1) + 320 - if idx == 0: - self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts)) - else: - self.inp_q.put( - (idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts) - ) - while 1: - res_ts = self.opt_q.get() - if res_ts == ts: - break - f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])] - for idx, f0 in enumerate(f0s): - if idx == 0: - f0 = f0[:-3] - elif idx != n_cpu - 1: - f0 = f0[2:-3] - else: - f0 = f0[2:-1] - f0bak[ - part_length * idx // 160 : part_length * idx // 160 + f0.shape[0] - ] = f0 - f0bak = signal.medfilt(f0bak, 3) - f0bak *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0bak) - - def get_f0_crepe(self, x, f0_up_key): - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - 160, - self.f0_min, - self.f0_max, - "full", - batch_size=512, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def get_f0_rmvpe(self, x, f0_up_key): - if hasattr(self, "model_rmvpe") == False: - from rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def infer( - self, - feats: torch.Tensor, - indata: np.ndarray, - rate1, - rate2, - cache_pitch, - cache_pitchf, - f0method, - ) -> np.ndarray: - feats = feats.view(1, -1) - if config.is_half: - feats = feats.half() - else: - feats = feats.float() - feats = feats.to(self.device) - t1 = ttime() - with torch.no_grad(): - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - inputs = { - "source": feats, - "padding_mask": padding_mask, - "output_layer": 9 if self.version == "v1" else 12, - } - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - t2 = ttime() - try: - if hasattr(self, "index") and self.index_rate != 0: - leng_replace_head = int(rate1 * feats[0].shape[0]) - npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if config.is_half: - npy = npy.astype("float16") - feats[0][-leng_replace_head:] = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate - + (1 - self.index_rate) * feats[0][-leng_replace_head:] - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t3 = ttime() - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method) - cache_pitch[:] = np.append(cache_pitch[pitch[:-1].shape[0] :], pitch[:-1]) - cache_pitchf[:] = np.append( - cache_pitchf[pitchf[:-1].shape[0] :], pitchf[:-1] - ) - p_len = min(feats.shape[1], 13000, cache_pitch.shape[0]) - else: - cache_pitch, cache_pitchf = None, None - p_len = min(feats.shape[1], 13000) - t4 = ttime() - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - cache_pitch = cache_pitch[:p_len] - cache_pitchf = cache_pitchf[:p_len] - cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device) - cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device) - p_len = torch.LongTensor([p_len]).to(self.device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(self.device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer( - feats, p_len, cache_pitch, cache_pitchf, sid, rate2 - )[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid, rate2)[0][0, 0] - .data.cpu() - .float() - ) - t5 = ttime() - print("time->fea-index-f0-model:", t2 - t1, t3 - t2, t4 - t3, t5 - t4) - return infered_audio diff --git a/slicer2.py b/slicer2.py deleted file mode 100644 index 5b29ee262..000000000 --- a/slicer2.py +++ /dev/null @@ -1,260 +0,0 @@ -import numpy as np - - -# This function is obtained from librosa. -def get_rms( - y, - frame_length=2048, - hop_length=512, - pad_mode="constant", -): - padding = (int(frame_length // 2), int(frame_length // 2)) - y = np.pad(y, padding, mode=pad_mode) - - axis = -1 - # put our new within-frame axis at the end for now - out_strides = y.strides + tuple([y.strides[axis]]) - # Reduce the shape on the framing axis - x_shape_trimmed = list(y.shape) - x_shape_trimmed[axis] -= frame_length - 1 - out_shape = tuple(x_shape_trimmed) + tuple([frame_length]) - xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides) - if axis < 0: - target_axis = axis - 1 - else: - target_axis = axis + 1 - xw = np.moveaxis(xw, -1, target_axis) - # Downsample along the target axis - slices = [slice(None)] * xw.ndim - slices[axis] = slice(0, None, hop_length) - x = xw[tuple(slices)] - - # Calculate power - power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True) - - return np.sqrt(power) - - -class Slicer: - def __init__( - self, - sr: int, - threshold: float = -40.0, - min_length: int = 5000, - min_interval: int = 300, - hop_size: int = 20, - max_sil_kept: int = 5000, - ): - if not min_length >= min_interval >= hop_size: - raise ValueError( - "The following condition must be satisfied: min_length >= min_interval >= hop_size" - ) - if not max_sil_kept >= hop_size: - raise ValueError( - "The following condition must be satisfied: max_sil_kept >= hop_size" - ) - min_interval = sr * min_interval / 1000 - self.threshold = 10 ** (threshold / 20.0) - self.hop_size = round(sr * hop_size / 1000) - self.win_size = min(round(min_interval), 4 * self.hop_size) - self.min_length = round(sr * min_length / 1000 / self.hop_size) - self.min_interval = round(min_interval / self.hop_size) - self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) - - def _apply_slice(self, waveform, begin, end): - if len(waveform.shape) > 1: - return waveform[ - :, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size) - ] - else: - return waveform[ - begin * self.hop_size : min(waveform.shape[0], end * self.hop_size) - ] - - # @timeit - def slice(self, waveform): - if len(waveform.shape) > 1: - samples = waveform.mean(axis=0) - else: - samples = waveform - if samples.shape[0] <= self.min_length: - return [waveform] - rms_list = get_rms( - y=samples, frame_length=self.win_size, hop_length=self.hop_size - ).squeeze(0) - sil_tags = [] - silence_start = None - clip_start = 0 - for i, rms in enumerate(rms_list): - # Keep looping while frame is silent. - if rms < self.threshold: - # Record start of silent frames. - if silence_start is None: - silence_start = i - continue - # Keep looping while frame is not silent and silence start has not been recorded. - if silence_start is None: - continue - # Clear recorded silence start if interval is not enough or clip is too short - is_leading_silence = silence_start == 0 and i > self.max_sil_kept - need_slice_middle = ( - i - silence_start >= self.min_interval - and i - clip_start >= self.min_length - ) - if not is_leading_silence and not need_slice_middle: - silence_start = None - continue - # Need slicing. Record the range of silent frames to be removed. - if i - silence_start <= self.max_sil_kept: - pos = rms_list[silence_start : i + 1].argmin() + silence_start - if silence_start == 0: - sil_tags.append((0, pos)) - else: - sil_tags.append((pos, pos)) - clip_start = pos - elif i - silence_start <= self.max_sil_kept * 2: - pos = rms_list[ - i - self.max_sil_kept : silence_start + self.max_sil_kept + 1 - ].argmin() - pos += i - self.max_sil_kept - pos_l = ( - rms_list[ - silence_start : silence_start + self.max_sil_kept + 1 - ].argmin() - + silence_start - ) - pos_r = ( - rms_list[i - self.max_sil_kept : i + 1].argmin() - + i - - self.max_sil_kept - ) - if silence_start == 0: - sil_tags.append((0, pos_r)) - clip_start = pos_r - else: - sil_tags.append((min(pos_l, pos), max(pos_r, pos))) - clip_start = max(pos_r, pos) - else: - pos_l = ( - rms_list[ - silence_start : silence_start + self.max_sil_kept + 1 - ].argmin() - + silence_start - ) - pos_r = ( - rms_list[i - self.max_sil_kept : i + 1].argmin() - + i - - self.max_sil_kept - ) - if silence_start == 0: - sil_tags.append((0, pos_r)) - else: - sil_tags.append((pos_l, pos_r)) - clip_start = pos_r - silence_start = None - # Deal with trailing silence. - total_frames = rms_list.shape[0] - if ( - silence_start is not None - and total_frames - silence_start >= self.min_interval - ): - silence_end = min(total_frames, silence_start + self.max_sil_kept) - pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start - sil_tags.append((pos, total_frames + 1)) - # Apply and return slices. - if len(sil_tags) == 0: - return [waveform] - else: - chunks = [] - if sil_tags[0][0] > 0: - chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0])) - for i in range(len(sil_tags) - 1): - chunks.append( - self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]) - ) - if sil_tags[-1][1] < total_frames: - chunks.append( - self._apply_slice(waveform, sil_tags[-1][1], total_frames) - ) - return chunks - - -def main(): - import os.path - from argparse import ArgumentParser - - import librosa - import soundfile - - parser = ArgumentParser() - parser.add_argument("audio", type=str, help="The audio to be sliced") - parser.add_argument( - "--out", type=str, help="Output directory of the sliced audio clips" - ) - parser.add_argument( - "--db_thresh", - type=float, - required=False, - default=-40, - help="The dB threshold for silence detection", - ) - parser.add_argument( - "--min_length", - type=int, - required=False, - default=5000, - help="The minimum milliseconds required for each sliced audio clip", - ) - parser.add_argument( - "--min_interval", - type=int, - required=False, - default=300, - help="The minimum milliseconds for a silence part to be sliced", - ) - parser.add_argument( - "--hop_size", - type=int, - required=False, - default=10, - help="Frame length in milliseconds", - ) - parser.add_argument( - "--max_sil_kept", - type=int, - required=False, - default=500, - help="The maximum silence length kept around the sliced clip, presented in milliseconds", - ) - args = parser.parse_args() - out = args.out - if out is None: - out = os.path.dirname(os.path.abspath(args.audio)) - audio, sr = librosa.load(args.audio, sr=None, mono=False) - slicer = Slicer( - sr=sr, - threshold=args.db_thresh, - min_length=args.min_length, - min_interval=args.min_interval, - hop_size=args.hop_size, - max_sil_kept=args.max_sil_kept, - ) - chunks = slicer.slice(audio) - if not os.path.exists(out): - os.makedirs(out) - for i, chunk in enumerate(chunks): - if len(chunk.shape) > 1: - chunk = chunk.T - soundfile.write( - os.path.join( - out, - f"%s_%d.wav" - % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i), - ), - chunk, - sr, - ) - - -if __name__ == "__main__": - main() diff --git a/stftpitchshift b/stftpitchshift deleted file mode 100644 index 310e89c61..000000000 Binary files a/stftpitchshift and /dev/null differ diff --git a/stftpitchshift.exe b/stftpitchshift.exe deleted file mode 100644 index 39c73ad88..000000000 Binary files a/stftpitchshift.exe and /dev/null differ diff --git a/tensorlowest.py b/tensorlowest.py deleted file mode 100644 index eccd4dbf3..000000000 --- a/tensorlowest.py +++ /dev/null @@ -1,123 +0,0 @@ -from tensorboard.backend.event_processing import event_accumulator - -import os -from shutil import copy2 -from re import search as RSearch -import pandas as pd -from ast import literal_eval as LEval - -weights_dir = 'weights/' - -def find_biggest_tensorboard(tensordir): - try: - files = [f for f in os.listdir(tensordir) if f.endswith('.0')] - if not files: - print("No files with the '.0' extension found!") - return - - max_size = 0 - biggest_file = "" - - for file in files: - file_path = os.path.join(tensordir, file) - if os.path.isfile(file_path): - file_size = os.path.getsize(file_path) - if file_size > max_size: - max_size = file_size - biggest_file = file - - return biggest_file - - except FileNotFoundError: - print("Couldn't find your model!") - return - -def main(model_name, save_freq, lastmdls): - global lowestval_weight_dir, scl - - tensordir = os.path.join('logs', model_name) - lowestval_weight_dir = os.path.join(tensordir, "lowestvals") - - latest_file = find_biggest_tensorboard(tensordir) - - if latest_file is None: - print("Couldn't find a valid tensorboard file!") - return - - tfile = os.path.join(tensordir, latest_file) - - ea = event_accumulator.EventAccumulator(tfile, - size_guidance={ - event_accumulator.COMPRESSED_HISTOGRAMS: 500, - event_accumulator.IMAGES: 4, - event_accumulator.AUDIO: 4, - event_accumulator.SCALARS: 0, - event_accumulator.HISTOGRAMS: 1, - }) - - ea.Reload() - ea.Tags() - - scl = ea.Scalars('loss/g/total') - - listwstep = {} - - for val in scl: - if (val.step // save_freq) * save_freq in [val.step for val in scl]: - listwstep[float(val.value)] = (val.step // save_freq) * save_freq - - lowest_vals = sorted(listwstep.keys())[:lastmdls] - - sorted_dict = {value: step for value, step in listwstep.items() if value in lowest_vals} - - return sorted_dict - -def selectweights(model_name, file_dict, weights_dir, lowestval_weight_dir): - os.makedirs(lowestval_weight_dir, exist_ok=True) - logdir = [] - files = [] - lbldict = { - 'Values': {}, - 'Names': {} - } - weights_dir_path = os.path.join(weights_dir, "") - low_val_path = os.path.join(os.getcwd(), os.path.join(lowestval_weight_dir, "")) - - try: - file_dict = LEval(file_dict) - except Exception as e: - print(f"Error! {e}") - return f"Couldn't load tensorboard file! {e}" - - weights = [f for f in os.scandir(weights_dir)] - for key, value in file_dict.items(): - pattern = fr"^{model_name}_.*_s{value}\.pth$" - matching_weights = [f.name for f in weights if f.is_file() and RSearch(pattern, f.name)] - for weight in matching_weights: - source_path = weights_dir_path + weight - destination_path = os.path.join(lowestval_weight_dir, weight) - - copy2(source_path, destination_path) - - logdir.append(f"File = {weight} Value: {key}, Step: {value}") - - lbldict['Names'][weight] = weight - lbldict['Values'][weight] = key - - files.append(low_val_path + weight) - - print(f"File = {weight} Value: {key}, Step: {value}") - - yield ('\n'.join(logdir), files, pd.DataFrame(lbldict)) - - - return ''.join(logdir), files, pd.DataFrame(lbldict) - - -if __name__ == "__main__": - model = str(input("Enter the name of the model: ")) - sav_freq = int(input("Enter save frequency of the model: ")) - ds = main(model, sav_freq) - - if ds: selectweights(model, ds, weights_dir, lowestval_weight_dir) - \ No newline at end of file diff --git a/tools/dlmodels.bat b/tools/dlmodels.bat deleted file mode 100644 index 547f2aef8..000000000 --- a/tools/dlmodels.bat +++ /dev/null @@ -1,348 +0,0 @@ -@echo off && chcp 65001 - -echo working dir is %cd% -echo downloading requirement aria2 check. -echo= -dir /a:d/b | findstr "aria2" > flag.txt -findstr "aria2" flag.txt >nul -if %errorlevel% ==0 ( - echo aria2 checked. - echo= -) else ( - echo failed. please downloading aria2 from webpage! - echo unzip it and put in this directory! - timeout /T 5 - start https://github.com/aria2/aria2/releases/tag/release-1.36.0 - echo= - goto end -) - -echo envfiles checking start. -echo= - -for /f %%x in ('findstr /i /c:"aria2" "flag.txt"') do (set aria2=%%x)&goto endSch -:endSch - -set d32=f0D32k.pth -set d40=f0D40k.pth -set d48=f0D48k.pth -set g32=f0G32k.pth -set g40=f0G40k.pth -set g48=f0G48k.pth - -set d40v2=f0D40k.pth -set g40v2=f0G40k.pth - -set dld32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -set dld40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -set dld48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -set dlg32=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -set dlg40=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -set dlg48=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth - -set dld40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -set dlg40v2=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth - -set hp2_all=HP2_all_vocals.pth -set hp3_all=HP3_all_vocals.pth -set hp5_only=HP5_only_main_vocal.pth -set VR_DeEchoAggressive=VR-DeEchoAggressive.pth -set VR_DeEchoDeReverb=VR-DeEchoDeReverb.pth -set VR_DeEchoNormal=VR-DeEchoNormal.pth -set onnx_dereverb=vocals.onnx - -set dlhp2_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth -set dlhp3_all=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth -set dlhp5_only=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth -set dlVR_DeEchoAggressive=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth -set dlVR_DeEchoDeReverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth -set dlVR_DeEchoNormal=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth -set dlonnx_dereverb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx - -set hb=hubert_base.pt - -set dlhb=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt - -echo dir check start. -echo= - -if exist "%~dp0pretrained" ( - echo dir .\pretrained checked. - ) else ( - echo failed. generating dir .\pretrained. - mkdir pretrained - ) -if exist "%~dp0pretrained_v2" ( - echo dir .\pretrained_v2 checked. - ) else ( - echo failed. generating dir .\pretrained_v2. - mkdir pretrained_v2 - ) -if exist "%~dp0uvr5_weights" ( - echo dir .\uvr5_weights checked. - ) else ( - echo failed. generating dir .\uvr5_weights. - mkdir uvr5_weights - ) -if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy" ( - echo dir .\uvr5_weights\onnx_dereverb_By_FoxJoy checked. - ) else ( - echo failed. generating dir .\uvr5_weights\onnx_dereverb_By_FoxJoy. - mkdir uvr5_weights\onnx_dereverb_By_FoxJoy - ) - -echo= -echo dir check finished. - -echo= -echo required files check start. - -echo checking D32k.pth -if exist "%~dp0pretrained\D32k.pth" ( - echo D32k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d %~dp0pretrained -o D32k.pth - if exist "%~dp0pretrained\D32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D40k.pth -if exist "%~dp0pretrained\D40k.pth" ( - echo D40k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d %~dp0pretrained -o D40k.pth - if exist "%~dp0pretrained\D40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D40k.pth -if exist "%~dp0pretrained_v2\D40k.pth" ( - echo D40k.pth in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d %~dp0pretrained_v2 -o D40k.pth - if exist "%~dp0pretrained_v2\D40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking D48k.pth -if exist "%~dp0pretrained\D48k.pth" ( - echo D48k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d %~dp0pretrained -o D48k.pth - if exist "%~dp0pretrained\D48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G32k.pth -if exist "%~dp0pretrained\G32k.pth" ( - echo G32k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d %~dp0pretrained -o G32k.pth - if exist "%~dp0pretrained\G32k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G40k.pth -if exist "%~dp0pretrained\G40k.pth" ( - echo G40k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d %~dp0pretrained -o G40k.pth - if exist "%~dp0pretrained\G40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G40k.pth -if exist "%~dp0pretrained_v2\G40k.pth" ( - echo G40k.pth in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d %~dp0pretrained_v2 -o G40k.pth - if exist "%~dp0pretrained_v2\G40k.pth" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking G48k.pth -if exist "%~dp0pretrained\G48k.pth" ( - echo G48k.pth in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d %~dp0pretrained -o G48k.pth - if exist "%~dp0pretrained\G48k.pth" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %d32% -if exist "%~dp0pretrained\%d32%" ( - echo %d32% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld32% -d %~dp0pretrained -o %d32% - if exist "%~dp0pretrained\%d32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d40% -if exist "%~dp0pretrained\%d40%" ( - echo %d40% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40% -d %~dp0pretrained -o %d40% - if exist "%~dp0pretrained\%d40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d40v2% -if exist "%~dp0pretrained_v2\%d40v2%" ( - echo %d40v2% in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld40v2% -d %~dp0pretrained_v2 -o %d40v2% - if exist "%~dp0pretrained_v2\%d40v2%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %d48% -if exist "%~dp0pretrained\%d48%" ( - echo %d48% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dld48% -d %~dp0pretrained -o %d48% - if exist "%~dp0pretrained\%d48%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g32% -if exist "%~dp0pretrained\%g32%" ( - echo %g32% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg32% -d %~dp0pretrained -o %g32% - if exist "%~dp0pretrained\%g32%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g40% -if exist "%~dp0pretrained\%g40%" ( - echo %g40% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40% -d %~dp0pretrained -o %g40% - if exist "%~dp0pretrained\%g40%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g40v2% -if exist "%~dp0pretrained_v2\%g40v2%" ( - echo %g40v2% in .\pretrained_v2 checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg40v2% -d %~dp0pretrained_v2 -o %g40v2% - if exist "%~dp0pretrained_v2\%g40v2%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %g48% -if exist "%~dp0pretrained\%g48%" ( - echo %g48% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlg48% -d %~dp0\pretrained -o %g48% - if exist "%~dp0pretrained\%g48%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hp2_all% -if exist "%~dp0uvr5_weights\%hp2_all%" ( - echo %hp2_all% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp2_all% -d %~dp0\uvr5_weights -o %hp2_all% - if exist "%~dp0uvr5_weights\%hp2_all%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %hp3_all% -if exist "%~dp0uvr5_weights\%hp3_all%" ( - echo %hp3_all% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp3_all% -d %~dp0\uvr5_weights -o %hp3_all% - if exist "%~dp0uvr5_weights\%hp3_all%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %hp5_only% -if exist "%~dp0uvr5_weights\%hp5_only%" ( - echo %hp5_only% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhp5_only% -d %~dp0\uvr5_weights -o %hp5_only% - if exist "%~dp0uvr5_weights\%hp5_only%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoAggressive% -if exist "%~dp0uvr5_weights\%VR_DeEchoAggressive%" ( - echo %VR_DeEchoAggressive% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoAggressive% -d %~dp0\uvr5_weights -o %VR_DeEchoAggressive% - if exist "%~dp0uvr5_weights\%VR_DeEchoAggressive%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoDeReverb% -if exist "%~dp0uvr5_weights\%VR_DeEchoDeReverb%" ( - echo %VR_DeEchoDeReverb% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoDeReverb% -d %~dp0\uvr5_weights -o %VR_DeEchoDeReverb% - if exist "%~dp0uvr5_weights\%VR_DeEchoDeReverb%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %VR_DeEchoNormal% -if exist "%~dp0uvr5_weights\%VR_DeEchoNormal%" ( - echo %VR_DeEchoNormal% in .\uvr5_weights checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlVR_DeEchoNormal% -d %~dp0\uvr5_weights -o %VR_DeEchoNormal% - if exist "%~dp0uvr5_weights\%VR_DeEchoNormal%" (echo download successful.) else (echo please try again! - echo=) - ) -echo checking %onnx_dereverb% -if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" ( - echo %onnx_dereverb% in .\uvr5_weights\onnx_dereverb_By_FoxJoy checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlonnx_dereverb% -d %~dp0\uvr5_weights\onnx_dereverb_By_FoxJoy -o %onnx_dereverb% - if exist "%~dp0uvr5_weights\onnx_dereverb_By_FoxJoy\%onnx_dereverb%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo checking %hb% -if exist "%~dp0%hb%" ( - echo %hb% in .\pretrained checked. - echo= - ) else ( - echo failed. starting download from huggingface. - %~dp0%aria2%\aria2c --console-log-level=error -c -x 16 -s 16 -k 1M %dlhb% -d %~dp0 -o %hb% - if exist "%~dp0%hb%" (echo download successful.) else (echo please try again! - echo=) - ) - -echo required files check finished. -echo envfiles check complete. -pause -:end -del flag.txt diff --git a/tools/dlmodels.sh b/tools/dlmodels.sh deleted file mode 100644 index 0ae7f7eb8..000000000 --- a/tools/dlmodels.sh +++ /dev/null @@ -1,546 +0,0 @@ -#!/bin/bash - -echo working dir is $(pwd) -echo downloading requirement aria2 check. - -if command -v aria2c &> /dev/null -then - echo "aria2c command found" -else - echo failed. please install aria2 - sleep 5 - exit 1 -fi - -d32="f0D32k.pth" -d40="f0D40k.pth" -d48="f0D48k.pth" -g32="f0G32k.pth" -g40="f0G40k.pth" -g48="f0G48k.pth" - -d40v2="f0D40k.pth" -g40v2="f0G40k.pth" - -dld32="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth" -dld40="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth" -dld48="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth" -dlg32="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth" -dlg40="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth" -dlg48="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth" - -dld40v2="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth" -dlg40v2="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth" - -hp2_all="HP2_all_vocals.pth" -hp3_all="HP3_all_vocals.pth" -hp5_only="HP5_only_main_vocal.pth" -VR_DeEchoAggressive="VR-DeEchoAggressive.pth" -VR_DeEchoDeReverb="VR-DeEchoDeReverb.pth" -VR_DeEchoNormal="VR-DeEchoNormal.pth" -onnx_dereverb="vocals.onnx" - -dlhp2_all="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth" -dlhp3_all="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth" -dlhp5_only="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth" -dlVR_DeEchoAggressive="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth" -dlVR_DeEchoDeReverb="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth" -dlVR_DeEchoNormal="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth" -dlonnx_dereverb="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx" - -hb="hubert_base.pt" - -dlhb="https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt" - -echo dir check start. - -if [ -d "./pretrained" ]; then - echo dir ./pretrained checked. -else - echo failed. generating dir ./pretrained. - mkdir pretrained -fi - -if [ -d "./pretrained_v2" ]; then - echo dir ./pretrained_v2 checked. -else - echo failed. generating dir ./pretrained_v2. - mkdir pretrained_v2 -fi - -if [ -d "./uvr5_weights" ]; then - echo dir ./uvr5_weights checked. -else - echo failed. generating dir ./uvr5_weights. - mkdir uvr5_weights -fi - -if [ -d "./uvr5_weights/onnx_dereverb_By_FoxJoy" ]; then - echo dir ./uvr5_weights/onnx_dereverb_By_FoxJoy checked. -else - echo failed. generating dir ./uvr5_weights/onnx_dereverb_By_FoxJoy. - mkdir uvr5_weights/onnx_dereverb_By_FoxJoy -fi - -echo dir check finished. - -echo required files check start. - -echo checking D32k.pth -if [ -f "./pretrained/D32k.pth" ]; then - echo D32k.pth in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d ./pretrained -o D32k.pth - if [ -f "./pretrained/D32k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking D40k.pth -if [ -f "./pretrained/D40k.pth" ]; then - echo D40k.pth in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d ./pretrained -o D40k.pth - if [ -f "./pretrained/D40k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking D40k.pth -if [ -f "./pretrained_v2/D40k.pth" ]; then - echo D40k.pth in ./pretrained_v2 checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d ./pretrained_v2 -o D40k.pth - if [ -f "./pretrained_v2/D40k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking D48k.pth -if [ -f "./pretrained/D48k.pth" ]; then - echo D48k.pth in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d ./pretrained -o D48k.pth - if [ -f "./pretrained/D48k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking G32k.pth -if [ -f "./pretrained/G32k.pth" ]; then - echo G32k.pth in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d ./pretrained -o G32k.pth - if [ -f "./pretrained/G32k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking G40k.pth -if [ -f "./pretrained/G40k.pth" ]; then - echo G40k.pth in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d ./pretrained -o G40k.pth - if [ -f "./pretrained/G40k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking G40k.pth -if [ -f "./pretrained_v2/G40k.pth" ]; then - echo G40k.pth in ./pretrained_v2 checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d ./pretrained_v2 -o G40k.pth - if [ -f "./pretrained_v2/G40k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking G48k.pth -if [ -f "./pretrained/G48k.pth" ]; then - echo G48k.pth in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d ./pretrained -o G48k.pth - if [ -f "./pretrained/G48k.pth" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $d32 -if [ -f "./pretrained/$d32" ]; then - echo $d32 in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld32 -d ./pretrained -o $d32 - if [ -f "./pretrained/$d32" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $d40 -if [ -f "./pretrained/$d40" ]; then - echo $d40 in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld40 -d ./pretrained -o $d40 - if [ -f "./pretrained/$d40" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $d40v2 -if [ -f "./pretrained_v2/$d40v2" ]; then - echo $d40v2 in ./pretrained_v2 checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld40v2 -d ./pretrained_v2 -o $d40v2 - if [ -f "./pretrained_v2/$d40v2" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $d48 -if [ -f "./pretrained/$d48" ]; then - echo $d48 in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dld48 -d ./pretrained -o $d48 - if [ -f "./pretrained/$d48" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $g32 -if [ -f "./pretrained/$g32" ]; then - echo $g32 in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg32 -d ./pretrained -o $g32 - if [ -f "./pretrained/$g32" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $g40 -if [ -f "./pretrained/$g40" ]; then - echo $g40 in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg40 -d ./pretrained -o $g40 - if [ -f "./pretrained/$g40" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $g40v2 -if [ -f "./pretrained_v2/$g40v2" ]; then - echo $g40v2 in ./pretrained_v2 checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg40v2 -d ./pretrained_v2 -o $g40v2 - if [ -f "./pretrained_v2/$g40v2" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $g48 -if [ -f "./pretrained/$g48" ]; then - echo $g48 in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlg48 -d ./pretrained -o $g48 - if [ -f "./pretrained/$g48" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $hp2_all -if [ -f "./uvr5_weights/$hp2_all" ]; then - echo $hp2_all in ./uvr5_weights checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp2_all -d ./uvr5_weights -o $hp2_all - if [ -f "./uvr5_weights/$hp2_all" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $hp3_all -if [ -f "./uvr5_weights/$hp3_all" ]; then - echo $hp3_all in ./uvr5_weights checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp3_all -d ./uvr5_weights -o $hp3_all - if [ -f "./uvr5_weights/$hp3_all" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $hp5_only -if [ -f "./uvr5_weights/$hp5_only" ]; then - echo $hp5_only in ./uvr5_weights checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhp5_only -d ./uvr5_weights -o $hp5_only - if [ -f "./uvr5_weights/$hp5_only" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $VR_DeEchoAggressive -if [ -f "./uvr5_weights/$VR_DeEchoAggressive" ]; then - echo $VR_DeEchoAggressive in ./uvr5_weights checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoAggressive -d ./uvr5_weights -o $VR_DeEchoAggressive - if [ -f "./uvr5_weights/$VR_DeEchoAggressive" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $VR_DeEchoDeReverb -if [ -f "./uvr5_weights/$VR_DeEchoDeReverb" ]; then - echo $VR_DeEchoDeReverb in ./uvr5_weights checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoDeReverb -d ./uvr5_weights -o $VR_DeEchoDeReverb - if [ -f "./uvr5_weights/$VR_DeEchoDeReverb" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $VR_DeEchoNormal -if [ -f "./uvr5_weights/$VR_DeEchoNormal" ]; then - echo $VR_DeEchoNormal in ./uvr5_weights checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlVR_DeEchoNormal -d ./uvr5_weights -o $VR_DeEchoNormal - if [ -f "./uvr5_weights/$VR_DeEchoNormal" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $onnx_dereverb -if [ -f "./uvr5_weights/onnx_dereverb_By_FoxJoy/$onnx_dereverb" ]; then - echo $onnx_dereverb in ./uvr5_weights/onnx_dereverb_By_FoxJoy checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlonnx_dereverb -d ./uvr5_weights/onnx_dereverb_By_FoxJoy -o $onnx_dereverb - if [ -f "./uvr5_weights/onnx_dereverb_By_FoxJoy/$onnx_dereverb" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo checking $hb -if [ -f "./pretrained/$hb" ]; then - echo $hb in ./pretrained checked. -else - echo failed. starting download from huggingface. - if command -v aria2c &> /dev/null; then - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M $dlhb -d ./ -o $hb - if [ -f "./$hb" ]; then - echo download successful. - else - echo please try again! - exit 1 - fi - else - echo aria2c command not found. Please install aria2c and try again. - exit 1 - fi -fi - -echo required files check finished. -read -p "Press any key to continue..." -n1 -s diff --git a/tools/infer/infer-pm-index256.py b/tools/infer/infer-pm-index256.py deleted file mode 100644 index ead4dcb56..000000000 --- a/tools/infer/infer-pm-index256.py +++ /dev/null @@ -1,199 +0,0 @@ -""" - -对源特征进行检索 -""" -import torch, pdb, os, parselmouth - -os.environ["CUDA_VISIBLE_DEVICES"] = "0" -import numpy as np -import soundfile as sf - -# from models import SynthesizerTrn256#hifigan_nonsf -# from lib.infer_pack.models import SynthesizerTrn256NSF as SynthesizerTrn256#hifigan_nsf -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid as SynthesizerTrn256, -) # hifigan_nsf - -# from lib.infer_pack.models import SynthesizerTrnMs256NSFsid_sim as SynthesizerTrn256#hifigan_nsf -# from models import SynthesizerTrn256NSFsim as SynthesizerTrn256#hifigan_nsf -# from models import SynthesizerTrn256NSFsimFlow as SynthesizerTrn256#hifigan_nsf - - -from scipy.io import wavfile -from fairseq import checkpoint_utils - -# import pyworld -import librosa -import torch.nn.functional as F -import scipy.signal as signal - -# import torchcrepe -from time import time as ttime - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model_path = r"E:\codes\py39\vits_vc_gpu_train\hubert_base.pt" # -print("load model(s) from {}".format(model_path)) -models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [model_path], - suffix="", -) -model = models[0] -model = model.to(device) -model = model.half() -model.eval() - -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],183,256,is_half=True)#hifigan#512#256 -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],109,256,is_half=True)#hifigan#512#256 -net_g = SynthesizerTrn256( - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 10, 2, 2], - 512, - [16, 16, 4, 4], - 183, - 256, - is_half=True, -) # hifigan#512#256#no_dropout -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,3,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],0)#ts3 -# net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2],512,[16,16,4],0)#hifigan-ps-sr -# -# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [5,5], 512, [15,15], 0)#ms -# net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,10], 512, [16,16], 0)#idwt2 - -# weights=torch.load("infer/ft-mi_1k-noD.pt") -# weights=torch.load("infer/ft-mi-freeze-vocoder-flow-enc_q_1k.pt") -# weights=torch.load("infer/ft-mi-freeze-vocoder_true_1k.pt") -# weights=torch.load("infer/ft-mi-sim1k.pt") -weights = torch.load("infer/ft-mi-no_opt-no_dropout.pt") -print(net_g.load_state_dict(weights, strict=True)) - -net_g.eval().to(device) -net_g.half() - - -def get_f0(x, p_len, f0_up_key=0): - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = ( - parselmouth.Sound(x, 16000) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0 *= pow(2, f0_up_key / 12) - f0bak = f0.copy() - - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - # f0_mel[f0_mel > 188] = 188 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak - - -import faiss - -index = faiss.read_index("infer/added_IVF512_Flat_mi_baseline_src_feat.index") -big_npy = np.load("infer/big_src_feature_mi.npy") -ta0 = ta1 = ta2 = 0 -for idx, name in enumerate( - [ - "冬之花clip1.wav", - ] -): ## - wav_path = "todo-songs/%s" % name # - f0_up_key = -2 # - audio, sampling_rate = sf.read(wav_path) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - - feats = torch.from_numpy(audio).float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.half().to(device), - "padding_mask": padding_mask.to(device), - "output_layer": 9, # layer 9 - } - if torch.cuda.is_available(): - torch.cuda.synchronize() - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - ####索引优化 - npy = feats[0].cpu().numpy().astype("float32") - D, I = index.search(npy, 1) - feats = ( - torch.from_numpy(big_npy[I.squeeze()].astype("float16")).unsqueeze(0).to(device) - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if torch.cuda.is_available(): - torch.cuda.synchronize() - t1 = ttime() - # p_len = min(feats.shape[1],10000,pitch.shape[0])#太大了爆显存 - p_len = min(feats.shape[1], 10000) # - pitch, pitchf = get_f0(audio, p_len, f0_up_key) - p_len = min(feats.shape[1], 10000, pitch.shape[0]) # 太大了爆显存 - if torch.cuda.is_available(): - torch.cuda.synchronize() - t2 = ttime() - feats = feats[:, :p_len, :] - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - p_len = torch.LongTensor([p_len]).to(device) - pitch = torch.LongTensor(pitch).unsqueeze(0).to(device) - sid = torch.LongTensor([0]).to(device) - pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device) - with torch.no_grad(): - audio = ( - net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] - .data.cpu() - .float() - .numpy() - ) # nsf - if torch.cuda.is_available(): - torch.cuda.synchronize() - t3 = ttime() - ta0 += t1 - t0 - ta1 += t2 - t1 - ta2 += t3 - t2 - # wavfile.write("ft-mi_1k-index256-noD-%s.wav"%name, 40000, audio)## - # wavfile.write("ft-mi-freeze-vocoder-flow-enc_q_1k-%s.wav"%name, 40000, audio)## - # wavfile.write("ft-mi-sim1k-%s.wav"%name, 40000, audio)## - wavfile.write("ft-mi-no_opt-no_dropout-%s.wav" % name, 40000, audio) ## - - -print(ta0, ta1, ta2) # diff --git a/tools/infer/train-index-v2.py b/tools/infer/train-index-v2.py deleted file mode 100644 index 67b6162b4..000000000 --- a/tools/infer/train-index-v2.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个 -""" -import faiss, numpy as np, os -from sklearn.cluster import MiniBatchKMeans -import traceback -from multiprocessing import cpu_count - -# ###########如果是原始特征要先写save -n_cpu = 0 -if n_cpu == 0: - n_cpu = cpu_count() -inp_root = r"./logs/anz/3_feature768" -npys = [] -listdir_res = list(os.listdir(inp_root)) -for name in sorted(listdir_res): - phone = np.load("%s/%s" % (inp_root, name)) - npys.append(phone) -big_npy = np.concatenate(npys, 0) -big_npy_idx = np.arange(big_npy.shape[0]) -np.random.shuffle(big_npy_idx) -big_npy = big_npy[big_npy_idx] -print(big_npy.shape) # (6196072, 192)#fp32#4.43G -if big_npy.shape[0] > 2e5: - # if(1): - info = "Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0] - print(info) - try: - big_npy = ( - MiniBatchKMeans( - n_clusters=10000, - verbose=True, - batch_size=256 * n_cpu, - compute_labels=False, - init="random", - ) - .fit(big_npy) - .cluster_centers_ - ) - except: - info = traceback.format_exc() - print(info) - -np.save("tools/infer/big_src_feature_mi.npy", big_npy) - -##################train+add -# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy") -n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) -index = faiss.index_factory(768, "IVF%s,Flat" % n_ivf) # mi -print("training") -index_ivf = faiss.extract_index_ivf(index) # -index_ivf.nprobe = 1 -index.train(big_npy) -faiss.write_index( - index, "tools/infer/trained_IVF%s_Flat_baseline_src_feat_v2.index" % (n_ivf) -) -print("adding") -batch_size_add = 8192 -for i in range(0, big_npy.shape[0], batch_size_add): - index.add(big_npy[i : i + batch_size_add]) -faiss.write_index( - index, "tools/infer/added_IVF%s_Flat_mi_baseline_src_feat.index" % (n_ivf) -) -""" -大小(都是FP32) -big_src_feature 2.95G - (3098036, 256) -big_emb 4.43G - (6196072, 192) -big_emb双倍是因为求特征要repeat后再加pitch - -""" diff --git a/tools/infer/train-index.py b/tools/infer/train-index.py deleted file mode 100644 index 04396a224..000000000 --- a/tools/infer/train-index.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个 -""" -import faiss, numpy as np, os - -# ###########如果是原始特征要先写save -inp_root = r"E:\codes\py39\dataset\mi\2-co256" -npys = [] -for name in sorted(list(os.listdir(inp_root))): - phone = np.load("%s/%s" % (inp_root, name)) - npys.append(phone) -big_npy = np.concatenate(npys, 0) -print(big_npy.shape) # (6196072, 192)#fp32#4.43G -np.save("infer/big_src_feature_mi.npy", big_npy) - -##################train+add -# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy") -print(big_npy.shape) -index = faiss.index_factory(256, "IVF512,Flat") # mi -print("training") -index_ivf = faiss.extract_index_ivf(index) # -index_ivf.nprobe = 9 -index.train(big_npy) -faiss.write_index(index, "infer/trained_IVF512_Flat_mi_baseline_src_feat.index") -print("adding") -index.add(big_npy) -faiss.write_index(index, "infer/added_IVF512_Flat_mi_baseline_src_feat.index") -""" -大小(都是FP32) -big_src_feature 2.95G - (3098036, 256) -big_emb 4.43G - (6196072, 192) -big_emb双倍是因为求特征要repeat后再加pitch - -""" diff --git a/tools/infer/trans_weights.py b/tools/infer/trans_weights.py deleted file mode 100644 index da0759627..000000000 --- a/tools/infer/trans_weights.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch, pdb - -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-suc\G_1000.pth")["model"]#sim_nsf# -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder-flow-enc_q\G_1000.pth")["model"]#sim_nsf# -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder\G_1000.pth")["model"]#sim_nsf# -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-test\G_1000.pth")["model"]#sim_nsf# -a = torch.load( - r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-no_opt-no_dropout\G_1000.pth" -)[ - "model" -] # sim_nsf# -for key in a.keys(): - a[key] = a[key].half() -# torch.save(a,"ft-mi-freeze-vocoder_true_1k.pt")# -# torch.save(a,"ft-mi-sim1k.pt")# -torch.save(a, "ft-mi-no_opt-no_dropout.pt") # diff --git a/train/cmd.txt b/train/cmd.txt deleted file mode 100644 index e4b895e54..000000000 --- a/train/cmd.txt +++ /dev/null @@ -1 +0,0 @@ -python train_nsf_sim_cache_sid.py -c configs/mi_mix40k_nsf_co256_cs1sid_ms2048.json -m ft-mi \ No newline at end of file diff --git a/train/data_utils.py b/train/data_utils.py deleted file mode 100644 index 71c0eff18..000000000 --- a/train/data_utils.py +++ /dev/null @@ -1,512 +0,0 @@ -import os, traceback -import numpy as np -import torch -import torch.utils.data - -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text - - -class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 5000) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv]) - lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - file = audiopath_and_text[0] - phone = audiopath_and_text[1] - pitch = audiopath_and_text[2] - pitchf = audiopath_and_text[3] - dv = audiopath_and_text[4] - - phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf) - spec, wav = self.get_audio(file) - dv = self.get_sid(dv) - - len_phone = phone.size()[0] - len_spec = spec.size()[-1] - # print(123,phone.shape,pitch.shape,spec.shape) - if len_phone != len_spec: - len_min = min(len_phone, len_spec) - # amor - len_wav = len_min * self.hop_length - - spec = spec[:, :len_min] - wav = wav[:, :len_wav] - - phone = phone[:len_min, :] - pitch = pitch[:len_min] - pitchf = pitchf[:len_min] - - return (spec, wav, phone, pitch, pitchf, dv) - - def get_labels(self, phone, pitch, pitchf): - phone = np.load(phone) - phone = np.repeat(phone, 2, axis=0) - pitch = np.load(pitch) - pitchf = np.load(pitchf) - n_num = min(phone.shape[0], 900) # DistributedBucketSampler - # print(234,phone.shape,pitch.shape) - phone = phone[:n_num, :] - pitch = pitch[:n_num] - pitchf = pitchf[:n_num] - phone = torch.FloatTensor(phone) - pitch = torch.LongTensor(pitch) - pitchf = torch.FloatTensor(pitchf) - return phone, pitch, pitchf - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate - ) - ) - audio_norm = audio - # audio_norm = audio / self.max_wav_value - # audio_norm = audio / np.abs(audio).max() - - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - try: - spec = torch.load(spec_filename) - except: - print(spec_filename, traceback.format_exc()) - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - else: - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - return spec, audio_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollateMultiNSFsid: - """Zero-pads model inputs and targets""" - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True - ) - - max_spec_len = max([x[0].size(1) for x in batch]) - max_wave_len = max([x[1].size(1) for x in batch]) - spec_lengths = torch.LongTensor(len(batch)) - wave_lengths = torch.LongTensor(len(batch)) - spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len) - wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len) - spec_padded.zero_() - wave_padded.zero_() - - max_phone_len = max([x[2].size(0) for x in batch]) - phone_lengths = torch.LongTensor(len(batch)) - phone_padded = torch.FloatTensor( - len(batch), max_phone_len, batch[0][2].shape[1] - ) # (spec, wav, phone, pitch) - pitch_padded = torch.LongTensor(len(batch), max_phone_len) - pitchf_padded = torch.FloatTensor(len(batch), max_phone_len) - phone_padded.zero_() - pitch_padded.zero_() - pitchf_padded.zero_() - # dv = torch.FloatTensor(len(batch), 256)#gin=256 - sid = torch.LongTensor(len(batch)) - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - spec = row[0] - spec_padded[i, :, : spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wave = row[1] - wave_padded[i, :, : wave.size(1)] = wave - wave_lengths[i] = wave.size(1) - - phone = row[2] - phone_padded[i, : phone.size(0), :] = phone - phone_lengths[i] = phone.size(0) - - pitch = row[3] - pitch_padded[i, : pitch.size(0)] = pitch - pitchf = row[4] - pitchf_padded[i, : pitchf.size(0)] = pitchf - - # dv[i] = row[5] - sid[i] = row[5] - - return ( - phone_padded, - phone_lengths, - pitch_padded, - pitchf_padded, - spec_padded, - spec_lengths, - wave_padded, - wave_lengths, - # dv - sid, - ) - - -class TextAudioLoader(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 5000) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text, dv in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text, dv]) - lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - file = audiopath_and_text[0] - phone = audiopath_and_text[1] - dv = audiopath_and_text[2] - - phone = self.get_labels(phone) - spec, wav = self.get_audio(file) - dv = self.get_sid(dv) - - len_phone = phone.size()[0] - len_spec = spec.size()[-1] - if len_phone != len_spec: - len_min = min(len_phone, len_spec) - len_wav = len_min * self.hop_length - spec = spec[:, :len_min] - wav = wav[:, :len_wav] - phone = phone[:len_min, :] - return (spec, wav, phone, dv) - - def get_labels(self, phone): - phone = np.load(phone) - phone = np.repeat(phone, 2, axis=0) - n_num = min(phone.shape[0], 900) # DistributedBucketSampler - phone = phone[:n_num, :] - phone = torch.FloatTensor(phone) - return phone - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate - ) - ) - audio_norm = audio - # audio_norm = audio / self.max_wav_value - # audio_norm = audio / np.abs(audio).max() - - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - try: - spec = torch.load(spec_filename) - except: - print(spec_filename, traceback.format_exc()) - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - else: - spec = spectrogram_torch( - audio_norm, - self.filter_length, - self.sampling_rate, - self.hop_length, - self.win_length, - center=False, - ) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename, _use_new_zipfile_serialization=False) - return spec, audio_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollate: - """Zero-pads model inputs and targets""" - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True - ) - - max_spec_len = max([x[0].size(1) for x in batch]) - max_wave_len = max([x[1].size(1) for x in batch]) - spec_lengths = torch.LongTensor(len(batch)) - wave_lengths = torch.LongTensor(len(batch)) - spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len) - wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len) - spec_padded.zero_() - wave_padded.zero_() - - max_phone_len = max([x[2].size(0) for x in batch]) - phone_lengths = torch.LongTensor(len(batch)) - phone_padded = torch.FloatTensor( - len(batch), max_phone_len, batch[0][2].shape[1] - ) - phone_padded.zero_() - sid = torch.LongTensor(len(batch)) - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - spec = row[0] - spec_padded[i, :, : spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wave = row[1] - wave_padded[i, :, : wave.size(1)] = wave - wave_lengths[i] = wave.size(1) - - phone = row[2] - phone_padded[i, : phone.size(0), :] = phone - phone_lengths[i] = phone.size(0) - - sid[i] = row[3] - - return ( - phone_padded, - phone_lengths, - spec_padded, - spec_lengths, - wave_padded, - wave_lengths, - sid, - ) - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__( - self, - dataset, - batch_size, - boundaries, - num_replicas=None, - rank=None, - shuffle=True, - ): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, -1, -1): # - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = ( - total_batch_size - (len_bucket % total_batch_size) - ) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ( - ids_bucket - + ids_bucket * (rem // len_bucket) - + ids_bucket[: (rem % len_bucket)] - ) - - # subsample - ids_bucket = ids_bucket[self.rank :: self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [ - bucket[idx] - for idx in ids_bucket[ - j * self.batch_size : (j + 1) * self.batch_size - ] - ] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/train/losses.py b/train/losses.py deleted file mode 100644 index b89038f14..000000000 --- a/train/losses.py +++ /dev/null @@ -1,59 +0,0 @@ -import torch -from torch.nn import functional as F - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg**2) - loss += r_loss + g_loss - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/train/mel_processing.py b/train/mel_processing.py deleted file mode 100644 index 1c871ab6b..000000000 --- a/train/mel_processing.py +++ /dev/null @@ -1,130 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - return dynamic_range_compression_torch(magnitudes) - - -def spectral_de_normalize_torch(magnitudes): - return dynamic_range_decompression_torch(magnitudes) - - -# Reusable banks -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - """Convert waveform into Linear-frequency Linear-amplitude spectrogram. - - Args: - y :: (B, T) - Audio waveforms - n_fft - sampling_rate - hop_size - win_size - center - Returns: - :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram - """ - # Validation - if torch.min(y) < -1.07: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.07: - print("max value is ", torch.max(y)) - - # Window - Cache if needed - global hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( - dtype=y.dtype, device=y.device - ) - - # Padding - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2) - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - # MelBasis - Cache if needed - global mel_basis - dtype_device = str(spec.dtype) + "_" + str(spec.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn( - sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax - ) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( - dtype=spec.dtype, device=spec.device - ) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame) - melspec = torch.matmul(mel_basis[fmax_dtype_device], spec) - melspec = spectral_normalize_torch(melspec) - return melspec - - -def mel_spectrogram_torch( - y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False -): - """Convert waveform into Mel-frequency Log-amplitude spectrogram. - - Args: - y :: (B, T) - Waveforms - Returns: - melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram - """ - # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame) - spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame) - melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) - - return melspec diff --git a/train/process_ckpt.py b/train/process_ckpt.py deleted file mode 100644 index 19de5f963..000000000 --- a/train/process_ckpt.py +++ /dev/null @@ -1,259 +0,0 @@ -import torch, traceback, os, pdb, sys - -now_dir = os.getcwd() -sys.path.append(now_dir) -from collections import OrderedDict -from i18n import I18nAuto - -i18n = I18nAuto() - - -def savee(ckpt, sr, if_f0, name, epoch, version, hps): - try: - opt = OrderedDict() - opt["weight"] = {} - for key in ckpt.keys(): - if "enc_q" in key: - continue - opt["weight"][key] = ckpt[key].half() - opt["config"] = [ - hps.data.filter_length // 2 + 1, - 32, - hps.model.inter_channels, - hps.model.hidden_channels, - hps.model.filter_channels, - hps.model.n_heads, - hps.model.n_layers, - hps.model.kernel_size, - hps.model.p_dropout, - hps.model.resblock, - hps.model.resblock_kernel_sizes, - hps.model.resblock_dilation_sizes, - hps.model.upsample_rates, - hps.model.upsample_initial_channel, - hps.model.upsample_kernel_sizes, - hps.model.spk_embed_dim, - hps.model.gin_channels, - hps.data.sampling_rate, - ] - opt["info"] = "%sepoch" % epoch - opt["sr"] = sr - opt["f0"] = if_f0 - opt["version"] = version - torch.save(opt, "weights/%s.pth" % name) - return "Success." - except: - return traceback.format_exc() - - -def show_info(path): - try: - a = torch.load(path, map_location="cpu") - return "模型信息:%s\n采样率:%s\n模型是否输入音高引导:%s\n版本:%s" % ( - a.get("info", "None"), - a.get("sr", "None"), - a.get("f0", "None"), - a.get("version", "None"), - ) - except: - return traceback.format_exc() - - -def extract_small_model(path, name, sr, if_f0, info, version): - try: - ckpt = torch.load(path, map_location="cpu") - if "model" in ckpt: - ckpt = ckpt["model"] - opt = OrderedDict() - opt["weight"] = {} - for key in ckpt.keys(): - if "enc_q" in key: - continue - opt["weight"][key] = ckpt[key].half() - if sr == "40k": - opt["config"] = [ - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 10, 2, 2], - 512, - [16, 16, 4, 4], - 109, - 256, - 40000, - ] - elif sr == "48k": - if version == "v1": - opt["config"] = [ - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 6, 2, 2, 2], - 512, - [16, 16, 4, 4, 4], - 109, - 256, - 48000, - ] - else: - opt["config"] = [ - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [12, 10, 2, 2], - 512, - [24, 20, 4, 4], - 109, - 256, - 48000, - ] - elif sr == "32k": - if version == "v1": - opt["config"] = [ - 513, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 4, 2, 2, 2], - 512, - [16, 16, 4, 4, 4], - 109, - 256, - 32000, - ] - else: - opt["config"] = [ - 513, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 8, 2, 2], - 512, - [20, 16, 4, 4], - 109, - 256, - 32000, - ] - if info == "": - info = "Extracted model." - opt["info"] = info - opt["version"] = version - opt["sr"] = sr - opt["f0"] = int(if_f0) - torch.save(opt, "weights/%s.pth" % name) - return "Success." - except: - return traceback.format_exc() - - -def change_info(path, info, name): - try: - ckpt = torch.load(path, map_location="cpu") - ckpt["info"] = info - if name == "": - name = os.path.basename(path) - torch.save(ckpt, "weights/%s" % name) - return "Success." - except: - return traceback.format_exc() - - -def merge(path1, path2, alpha1, sr, f0, info, name, version): - try: - - def extract(ckpt): - a = ckpt["model"] - opt = OrderedDict() - opt["weight"] = {} - for key in a.keys(): - if "enc_q" in key: - continue - opt["weight"][key] = a[key] - return opt - - ckpt1 = torch.load(path1, map_location="cpu") - ckpt2 = torch.load(path2, map_location="cpu") - cfg = ckpt1["config"] - if "model" in ckpt1: - ckpt1 = extract(ckpt1) - else: - ckpt1 = ckpt1["weight"] - if "model" in ckpt2: - ckpt2 = extract(ckpt2) - else: - ckpt2 = ckpt2["weight"] - if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())): - return "Fail to merge the models. The model architectures are not the same." - opt = OrderedDict() - opt["weight"] = {} - for key in ckpt1.keys(): - # try: - if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape: - min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0]) - opt["weight"][key] = ( - alpha1 * (ckpt1[key][:min_shape0].float()) - + (1 - alpha1) * (ckpt2[key][:min_shape0].float()) - ).half() - else: - opt["weight"][key] = ( - alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float()) - ).half() - # except: - # pdb.set_trace() - opt["config"] = cfg - """ - if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000] - elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000] - elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000] - """ - opt["sr"] = sr - opt["f0"] = 1 if f0 else 0 - opt["version"] = version - opt["info"] = info - torch.save(opt, "weights/%s.pth" % name) - return "Success." - except: - return traceback.format_exc() diff --git a/train/utils.py b/train/utils.py deleted file mode 100644 index aae833b08..000000000 --- a/train/utils.py +++ /dev/null @@ -1,500 +0,0 @@ -import os, traceback -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") - - ################## - def go(model, bkey): - saved_state_dict = checkpoint_dict[bkey] - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): # 模型需要的shape - try: - new_state_dict[k] = saved_state_dict[k] - if saved_state_dict[k].shape != state_dict[k].shape: - print( - "shape-%s-mismatch|need-%s|get-%s" - % (k, state_dict[k].shape, saved_state_dict[k].shape) - ) # - raise KeyError - except: - # logger.info(traceback.format_exc()) - logger.info("%s is not in the checkpoint" % k) # pretrain缺失的 - new_state_dict[k] = v # 模型自带的随机值 - if hasattr(model, "module"): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - - go(combd, "combd") - go(sbd, "sbd") - ############# - logger.info("Loaded model weights") - - iteration = checkpoint_dict["iteration"] - learning_rate = checkpoint_dict["learning_rate"] - if ( - optimizer is not None and load_opt == 1 - ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch - # try: - optimizer.load_state_dict(checkpoint_dict["optimizer"]) - # except: - # traceback.print_exc() - logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -# def load_checkpoint(checkpoint_path, model, optimizer=None): -# assert os.path.isfile(checkpoint_path) -# checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') -# iteration = checkpoint_dict['iteration'] -# learning_rate = checkpoint_dict['learning_rate'] -# if optimizer is not None: -# optimizer.load_state_dict(checkpoint_dict['optimizer']) -# # print(1111) -# saved_state_dict = checkpoint_dict['model'] -# # print(1111) -# -# if hasattr(model, 'module'): -# state_dict = model.module.state_dict() -# else: -# state_dict = model.state_dict() -# new_state_dict= {} -# for k, v in state_dict.items(): -# try: -# new_state_dict[k] = saved_state_dict[k] -# except: -# logger.info("%s is not in the checkpoint" % k) -# new_state_dict[k] = v -# if hasattr(model, 'module'): -# model.module.load_state_dict(new_state_dict) -# else: -# model.load_state_dict(new_state_dict) -# logger.info("Loaded checkpoint '{}' (epoch {})" .format( -# checkpoint_path, iteration)) -# return model, optimizer, learning_rate, iteration -def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") - - saved_state_dict = checkpoint_dict["model"] - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): # 模型需要的shape - try: - new_state_dict[k] = saved_state_dict[k] - if saved_state_dict[k].shape != state_dict[k].shape: - print( - "shape-%s-mismatch|need-%s|get-%s" - % (k, state_dict[k].shape, saved_state_dict[k].shape) - ) # - raise KeyError - except: - # logger.info(traceback.format_exc()) - logger.info("%s is not in the checkpoint" % k) # pretrain缺失的 - new_state_dict[k] = v # 模型自带的随机值 - if hasattr(model, "module"): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - logger.info("Loaded model weights") - - iteration = checkpoint_dict["iteration"] - learning_rate = checkpoint_dict["learning_rate"] - if ( - optimizer is not None and load_opt == 1 - ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch - # try: - optimizer.load_state_dict(checkpoint_dict["optimizer"]) - # except: - # traceback.print_exc() - logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info( - "Saving model and optimizer state at epoch {} to {}".format( - iteration, checkpoint_path - ) - ) - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save( - { - "model": state_dict, - "iteration": iteration, - "optimizer": optimizer.state_dict(), - "learning_rate": learning_rate, - }, - checkpoint_path, - ) - - -def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path): - logger.info( - "Saving model and optimizer state at epoch {} to {}".format( - iteration, checkpoint_path - ) - ) - if hasattr(combd, "module"): - state_dict_combd = combd.module.state_dict() - else: - state_dict_combd = combd.state_dict() - if hasattr(sbd, "module"): - state_dict_sbd = sbd.module.state_dict() - else: - state_dict_sbd = sbd.state_dict() - torch.save( - { - "combd": state_dict_combd, - "sbd": state_dict_sbd, - "iteration": iteration, - "optimizer": optimizer.state_dict(), - "learning_rate": learning_rate, - }, - checkpoint_path, - ) - - -def summarize( - writer, - global_step, - scalars={}, - histograms={}, - images={}, - audios={}, - audio_sampling_rate=22050, -): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats="HWC") - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow( - alignment.transpose(), aspect="auto", origin="lower", interpolation="none" - ) - fig.colorbar(im, ax=ax) - xlabel = "Decoder timestep" - if info is not None: - xlabel += "\n\n" + info - plt.xlabel(xlabel) - plt.ylabel("Encoder timestep") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - filepaths_and_text = [item for item in filepaths_and_text if len(item) == 5] # ensure there are 5 items. - return filepaths_and_text - - -def get_hparams(init=True): - """ - todo: - 结尾七人组: - 保存频率、总epoch done - bs done - pretrainG、pretrainD done - 卡号:os.en["CUDA_VISIBLE_DEVICES"] done - if_latest done - 模型:if_f0 done - 采样率:自动选择config done - 是否缓存数据集进GPU:if_cache_data_in_gpu done - - -m: - 自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done - -c不要了 - """ - parser = argparse.ArgumentParser() - # parser.add_argument('-c', '--config', type=str, default="configs/40k.json",help='JSON file for configuration') - parser.add_argument( - "-se", - "--save_every_epoch", - type=int, - required=True, - help="checkpoint save frequency (epoch)", - ) - parser.add_argument( - "-te", "--total_epoch", type=int, required=True, help="total_epoch" - ) - parser.add_argument( - "-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path" - ) - parser.add_argument( - "-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path" - ) - parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -") - parser.add_argument( - "-bs", "--batch_size", type=int, required=True, help="batch size" - ) - parser.add_argument( - "-e", "--experiment_dir", type=str, required=True, help="experiment dir" - ) # -m - parser.add_argument( - "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k" - ) - parser.add_argument( - "-sw", - "--save_every_weights", - type=str, - default="0", - help="save the extracted model in weights directory when saving checkpoints", - ) - parser.add_argument( - "-v", "--version", type=str, required=True, help="model version" - ) - parser.add_argument( - "-f0", - "--if_f0", - type=int, - required=True, - help="use f0 as one of the inputs of the model, 1 or 0", - ) - parser.add_argument( - "-l", - "--if_latest", - type=int, - required=True, - help="if only save the latest G/D pth file, 1 or 0", - ) - parser.add_argument( - "-c", - "--if_cache_data_in_gpu", - type=int, - required=True, - help="if caching the dataset in GPU memory, 1 or 0", - ) - parser.add_argument( - "-li", "--log_interval", type=int, required=True, help="log interval" - ) - - args = parser.parse_args() - name = args.experiment_dir - experiment_dir = os.path.join("./logs", args.experiment_dir) - - if not os.path.exists(experiment_dir): - os.makedirs(experiment_dir) - - if args.version == "v1" or args.sample_rate == "40k": - config_path = "configs/%s.json" % args.sample_rate - else: - config_path = "configs/%s_v2.json" % args.sample_rate - config_save_path = os.path.join(experiment_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = hparams.experiment_dir = experiment_dir - hparams.save_every_epoch = args.save_every_epoch - hparams.name = name - hparams.total_epoch = args.total_epoch - hparams.pretrainG = args.pretrainG - hparams.pretrainD = args.pretrainD - hparams.version = args.version - hparams.gpus = args.gpus - hparams.train.batch_size = args.batch_size - hparams.sample_rate = args.sample_rate - hparams.if_f0 = args.if_f0 - hparams.if_latest = args.if_latest - hparams.save_every_weights = args.save_every_weights - hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu - hparams.data.training_files = "%s/filelist.txt" % experiment_dir - - hparams.train.log_interval = args.log_interval - - # Update log_interval in the 'train' section of the config dictionary - config["train"]["log_interval"] = args.log_interval - - # Save the updated config back to the config_save_path - with open(config_save_path, "w") as f: - json.dump(config, f, indent=4) - - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn( - "{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - ) - ) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn( - "git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8] - ) - ) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams: - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/train_nsf_sim_cache_sid_load_pretrain.py b/train_nsf_sim_cache_sid_load_pretrain.py deleted file mode 100644 index 2887a97bb..000000000 --- a/train_nsf_sim_cache_sid_load_pretrain.py +++ /dev/null @@ -1,510 +0,0 @@ -import sys, os - -now_dir = os.getcwd() -sys.path.append(os.path.join(now_dir)) -sys.path.append(os.path.join(now_dir, "train")) -import utils -import datetime - -hps = utils.get_hparams() -os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",") -n_gpus = len(hps.gpus.split("-")) -from random import shuffle, randint -import traceback, json, argparse, itertools, math, torch, pdb - -torch.backends.cudnn.deterministic = False -torch.backends.cudnn.benchmark = False -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from lib.infer_pack import commons -from time import sleep -from time import time as ttime -from data_utils import ( - TextAudioLoaderMultiNSFsid, - TextAudioLoader, - TextAudioCollateMultiNSFsid, - TextAudioCollate, - DistributedBucketSampler, -) - -import csv - -if hps.version == "v1": - from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid as RVC_Model_f0, - SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0, - MultiPeriodDiscriminator, - ) -else: - from lib.infer_pack.models import ( - SynthesizerTrnMs768NSFsid as RVC_Model_f0, - SynthesizerTrnMs768NSFsid_nono as RVC_Model_nof0, - MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator, - ) -from losses import generator_loss, discriminator_loss, feature_loss, kl_loss -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from process_ckpt import savee - -global global_step -global_step = 0 - - -class EpochRecorder: - def __init__(self): - self.last_time = ttime() - - def record(self): - now_time = ttime() - elapsed_time = now_time - self.last_time - self.last_time = now_time - elapsed_time_str = str(datetime.timedelta(seconds=elapsed_time)) - current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - return f"[{current_time}] | ({elapsed_time_str})" - - -def main(): - n_gpus = torch.cuda.device_count() - if torch.cuda.is_available() == False and torch.backends.mps.is_available() == True: - n_gpus = 1 - os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = str(randint(20000, 55555)) - children = [] - for i in range(n_gpus): - subproc = mp.Process( - target=run, - args=( - i, - n_gpus, - hps, - ), - ) - children.append(subproc) - subproc.start() - - for i in range(n_gpus): - children[i].join() - -def reset_stop_flag(): - with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite: - csv_writer = csv.writer(STOPCSVwrite, delimiter=",") - csv_writer.writerow(["False"]) - -def create_model(hps, model_f0, model_nof0): - filter_length_adjusted = hps.data.filter_length // 2 + 1 - segment_size_adjusted = hps.train.segment_size // hps.data.hop_length - is_half = hps.train.fp16_run - sr = hps.sample_rate - - model = model_f0 if hps.if_f0 == 1 else model_nof0 - - return model( - filter_length_adjusted, - segment_size_adjusted, - **hps.model, - is_half=is_half, - sr=sr - ) - -def move_model_to_cuda_if_available(model, rank): - if torch.cuda.is_available(): - return model.cuda(rank) - else: - return model - -def create_optimizer(model, hps): - return torch.optim.AdamW( - model.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - -def create_ddp_model(model, rank): - if torch.cuda.is_available(): - return DDP(model, device_ids=[rank]) - else: - return DDP(model) - -def create_dataset(hps, if_f0=True): - return TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) if if_f0 else TextAudioLoader(hps.data.training_files, hps.data) - -def create_sampler(dataset, batch_size, n_gpus, rank): - return DistributedBucketSampler( - dataset, - batch_size * n_gpus, - # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s - [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s - num_replicas=n_gpus, - rank=rank, - shuffle=True, - ) - -def set_collate_fn(if_f0=True): - return TextAudioCollateMultiNSFsid() if if_f0 else TextAudioCollate() - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - # utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group( - backend="gloo", init_method="env://", world_size=n_gpus, rank=rank - ) - torch.manual_seed(hps.train.seed) - if torch.cuda.is_available(): - torch.cuda.set_device(rank) - - - train_dataset = TextAudioLoaderMultiNSFsid( - hps.data.training_files, hps.data - ) if hps.if_f0 == 1 else TextAudioLoader(hps.data.training_files, hps.data) - - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size * n_gpus, - # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s - [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s - num_replicas=n_gpus, - rank=rank, - shuffle=True, - ) - # It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. - # num_workers=8 -> num_workers=4 - - collate_fn = TextAudioCollateMultiNSFsid() if hps.if_f0 == 1 else TextAudioCollate() - train_loader = DataLoader( - train_dataset, - num_workers=4, - shuffle=False, - pin_memory=True, - collate_fn=collate_fn, - batch_sampler=train_sampler, - persistent_workers=True, - prefetch_factor=8, - ) - - net_g = create_model(hps, RVC_Model_f0, RVC_Model_nof0) - - net_g = move_model_to_cuda_if_available(net_g, rank) - net_d = move_model_to_cuda_if_available(MultiPeriodDiscriminator(hps.model.use_spectral_norm), rank) - - optim_g = create_optimizer(net_g, hps) - optim_d = create_optimizer(net_d, hps) - # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - net_g = create_ddp_model(net_g, rank) - net_d = create_ddp_model(net_d, rank) - - try: # 如果能加载自动resume - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d - ) # D多半加载没事 - if rank == 0: - logger.info("loaded D") - # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g - ) - global_step = (epoch_str - 1) * len(train_loader) - # epoch_str = 1 - # global_step = 0 - except: # 如果首次不能加载,加载pretrain - # traceback.print_exc() - epoch_str = 1 - global_step = 0 - if hps.pretrainG != "": - if rank == 0: - logger.info(f"loaded pretrained {hps.pretrainG}") - print( - net_g.module.load_state_dict( - torch.load(hps.pretrainG, map_location="cpu")["model"] - ) - ) ##测试不加载优化器 - if hps.pretrainD != "": - if rank == 0: - logger.info("loaded pretrained %s" % (hps.pretrainD)) - print( - net_d.module.load_state_dict( - torch.load(hps.pretrainD, map_location="cpu")["model"] - ) - ) - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR( - optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR( - optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - cache = [] - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - logger, - [writer, writer_eval], - cache, - ) - else: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - None, - None, - cache, - ) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, cache): - net_g, net_d = nets - optim_g, optim_d = optims - train_loader, eval_loader = loaders - writer, writer_eval = (writers if writers is not None else (None, None)) - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - nets = [net_g, net_d] - for net in nets: - net.train() - - def save_checkpoint(name): - ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict() - result = savee(ckpt, hps.sample_rate, hps.if_f0, name, epoch, hps.version, hps) - logger.info("Saving final ckpt: {}".format(result)) - sleep(1) - - if hps.if_cache_data_in_gpu: - # Use Cache - data_iterator = cache - if len(cache) == 0: - gpu_available = torch.cuda.is_available() - - for batch_idx, info in enumerate(train_loader): - # Unpack - info = list(info) - if hps.if_f0: - tensors = info - else: - # We consider that pitch and pitchf are not included in this case - tensors = info[:2] + info[4:] - - # Load on CUDA - if gpu_available: - tensors = [tensor.cuda(rank, non_blocking=True) for tensor in tensors] - - # Cache on list - cache.extend([(batch_idx, tuple(tensor for tensor in tensors if tensor is not None))]) - else: - shuffle(cache) - else: - data_iterator = enumerate(train_loader) - - def to_gpu_if_available(tensor): - return tensor.cuda(rank, non_blocking=True) if torch.cuda.is_available() else tensor - - # Run steps - gpu_available = torch.cuda.is_available() - epoch_recorder = EpochRecorder() - fp16_run = hps.train.fp16_run - c_mel = hps.train.c_mel - - for batch_idx, info in data_iterator: - # Data - ## Unpack - if hps.if_f0 == 1: - phone, phone_lengths, pitch, pitchf, spec, spec_lengths, wave, wave_lengths, sid = info - else: - phone, phone_lengths, spec, spec_lengths, wave, wave_lengths, sid = info - ## Load on CUDA - if (not hps.if_cache_data_in_gpu) and gpu_available: - phone = to_gpu_if_available(phone) - phone_lengths = to_gpu_if_available(phone_lengths) - sid = to_gpu_if_available(sid) - spec = to_gpu_if_available(spec) - spec_lengths = to_gpu_if_available(spec_lengths) - wave = to_gpu_if_available(wave) - - if hps.if_f0 == 1: - pitch = to_gpu_if_available(pitch) - pitchf = to_gpu_if_available(pitchf) - - # Calculate - with autocast(enabled=fp16_run): - if hps.if_f0 == 1: - y_hat, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = \ - net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid) - else: - y_hat, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = \ - net_g(phone, phone_lengths, spec, spec_lengths, sid) - mel = spec_to_mel_torch(spec, hps.data.filter_length, hps.data.n_mel_channels, - hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) - - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.float().squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - - if fp16_run: y_hat_mel = y_hat_mel.half() - - wave = commons.slice_segments(wave, ids_slice * hps.data.hop_length, - hps.train.segment_size) # slice - - y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach()) - - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - net_d_params = net_d.parameters() - net_g_params = net_g.parameters() - lr_scalar = optim_g.param_groups[0]["lr"] - - optim_d.zero_grad() - scaler.scale(loss_disc).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d_params, None) - scaler.step(optim_d) - - with autocast(enabled=fp16_run): - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat) - - loss_mel = F.l1_loss(y_mel, y_hat_mel) * c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl - - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g_params, None) - scaler.step(optim_g) - scaler.update() - - if rank == 0 and global_step % hps.train.log_interval == 0: - lr = lr_scalar # use stored lr scalar here - logger.info("Train Epoch: {} [{:.0f}%]".format(epoch, 100.0 * batch_idx / len(train_loader))) - - # Amor For Tensorboard display - loss_mel, loss_kl = min(loss_mel, 75), min(loss_kl, 9) - - scalar_dict = { - "loss/g/total": loss_gen_all, - "loss/d/total": loss_disc, - "learning_rate": lr, - "grad_norm_d": grad_norm_d, - "grad_norm_g": grad_norm_g, - "loss/g/fm": loss_fm, - "loss/g/mel": loss_mel, - "loss/g/kl": loss_kl, - **{"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}, - **{"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}, - **{"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}, - } - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - } - - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict, - ) - global_step += 1 - - if epoch % hps.save_every_epoch == 0: - if rank == 0: - save_format = str(2333333) if hps.if_latest else str(global_step) - model_dir = hps.model_dir - learning_rate = hps.train.learning_rate - name_epoch = f"{hps.name}_e{epoch}" - models = {'G': net_g, 'D': net_d} - optims = {'G': optim_g, 'D': optim_d} - - for model_name, model in models.items(): - path = os.path.join(model_dir, f"{model_name}_{save_format}.pth") - utils.save_checkpoint(model, optims[model_name], learning_rate, epoch, path) - - if hps.save_every_weights == "1": - ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict() - logger.info( - "saving ckpt %s_%s" - % ( - name_epoch, - savee( - ckpt, - hps.sample_rate, - hps.if_f0, - f"{name_epoch}_s{global_step}", - epoch, - hps.version, - hps, - ), - ) - ) - - stopbtn = False - try: - with open("csvdb/stop.csv", 'r') as csv_file: - stopbtn_str = next(csv.reader(csv_file), [None])[0] - if stopbtn_str is not None: stopbtn = stopbtn_str.lower() == 'true' - except (ValueError, TypeError, FileNotFoundError, IndexError) as e: - print(f"Handling exception: {e}") - stopbtn = False - - if stopbtn: - logger.info("Stop Button was pressed. The program is closed.") - ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict() - logger.info(f"Saving final ckpt:{savee(ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps)}") - sleep(1) - reset_stop_flag() - os._exit(2333333) - - if rank == 0: - logger.info(f"====> Epoch: {epoch} {epoch_recorder.record()}") - - if epoch >= hps.total_epoch: - logger.info("Training is done. The program is closed.") - save_checkpoint(hps.name) - os._exit(2333333) - - -if __name__ == "__main__": - torch.multiprocessing.set_start_method("spawn") - main() diff --git a/trainset_preprocess_pipeline_print.py b/trainset_preprocess_pipeline_print.py deleted file mode 100644 index f20845839..000000000 --- a/trainset_preprocess_pipeline_print.py +++ /dev/null @@ -1,155 +0,0 @@ -import sys, os, multiprocessing -from scipy import signal - -now_dir = os.getcwd() -sys.path.append(now_dir) - -inp_root = sys.argv[1] -sr = int(sys.argv[2]) -n_p = int(sys.argv[3]) -exp_dir = sys.argv[4] -noparallel = sys.argv[5] == "True" -import numpy as np, os, traceback -from slicer2 import Slicer -import librosa, traceback -from scipy.io import wavfile -import multiprocessing -from my_utils import load_audio, check_audio_duration -import tqdm - -DoFormant = False -Quefrency = 1.0 -Timbre = 1.0 - -mutex = multiprocessing.Lock() -f = open(f"{exp_dir}/preprocess.log", "a+") - - -def println(strr): - mutex.acquire() - print(strr) - f.write("%s\n" % strr) - f.flush() - mutex.release() - - -class PreProcess: - def __init__(self, sr, exp_dir): - self.slicer = Slicer( - sr=sr, - threshold=-42, - min_length=1500, - min_interval=400, - hop_size=15, - max_sil_kept=500, - ) - self.sr = sr - self.bh, self.ah = signal.butter(N=5, Wn=48, btype="high", fs=self.sr) - self.per = 3.0 - self.overlap = 0.3 - self.tail = self.per + self.overlap - self.max = 0.9 - self.alpha = 0.75 - self.exp_dir = exp_dir - self.gt_wavs_dir = "%s/0_gt_wavs" % exp_dir - self.wavs16k_dir = "%s/1_16k_wavs" % exp_dir - os.makedirs(self.exp_dir, exist_ok=True) - os.makedirs(self.gt_wavs_dir, exist_ok=True) - os.makedirs(self.wavs16k_dir, exist_ok=True) - - def norm_write(self, tmp_audio, idx0, idx1): - tmp_max = np.abs(tmp_audio).max() - if tmp_max > 2.5: - print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max)) - return - tmp_audio = (tmp_audio / tmp_max * (self.max * self.alpha)) + ( - 1 - self.alpha - ) * tmp_audio - wavfile.write( - "%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1), - self.sr, - tmp_audio.astype(np.float32), - ) - tmp_audio = librosa.resample( - tmp_audio, orig_sr=self.sr, target_sr=16000 - ) # , res_type="soxr_vhq" - wavfile.write( - "%s/%s_%s.wav" % (self.wavs16k_dir, idx0, idx1), - 16000, - tmp_audio.astype(np.float32), - ) - - def pipeline(self, path, idx0): - - file_extension = path.split('.')[-1] - supported_file_extensions = {'wav', 'mp3', 'flac', 'ogg', 'opus', - 'm4a', 'mp4', 'aac', 'alac', 'wma', - 'aiff', 'webm', 'ac3'} - - try: - if file_extension in supported_file_extensions: - if not check_audio_duration(path): return - audio = load_audio(path, self.sr, DoFormant=False) - # zero phased digital filter cause pre-ringing noise... - # audio = signal.filtfilt(self.bh, self.ah, audio) - audio = signal.lfilter(self.bh, self.ah, audio) - - idx1 = 0 - for audio in self.slicer.slice(audio): - frame_start_points = range(0, len(audio), int(self.sr * (self.per - self.overlap))) - - for _, start in enumerate(frame_start_points): - if len(audio[start:]) <= self.tail * self.sr: - tmp_audio = audio[start:] - idx1 += 1 - break - - tmp_audio = audio[start : start + int(self.per * self.sr)] - self.norm_write(tmp_audio, idx0, idx1) - idx1 += 1 - self.norm_write(tmp_audio, idx0, idx1) - # println("%s->Suc." % path) - else: - print(f"Unsupported audio format! - {path.split('/')[-1]}") - except: - println("%s->%s" % (path, traceback.format_exc())) - - def pipeline_mp(self, infos, thread_n): - for path, idx0 in tqdm.tqdm( - infos, position=thread_n, leave=True, desc="thread:%s" % thread_n - ): - self.pipeline(path, idx0) - - def pipeline_mp_inp_dir(self, inp_root, n_p): - try: - infos = [ - ("%s/%s" % (inp_root, name), idx) - for idx, name in enumerate(sorted(list(os.listdir(inp_root)))) - ] - if noparallel: - for i in range(n_p): - self.pipeline_mp(infos[i::n_p]) - else: - ps = [] - for i in range(n_p): - p = multiprocessing.Process( - target=self.pipeline_mp, args=(infos[i::n_p], i) - ) - ps.append(p) - p.start() - for i in range(n_p): - ps[i].join() - except: - println("Fail. %s" % traceback.format_exc()) - - -def preprocess_trainset(inp_root, sr, n_p, exp_dir): - pp = PreProcess(sr, exp_dir) - println("start preprocess") - println(sys.argv) - pp.pipeline_mp_inp_dir(inp_root, n_p) - println("end preprocess") - - -if __name__ == "__main__": - preprocess_trainset(inp_root, sr, n_p, exp_dir) diff --git a/uvr5_weights/.gitignore b/uvr5_weights/.gitignore deleted file mode 100644 index d6b7ef32c..000000000 --- a/uvr5_weights/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore diff --git a/vc_infer_pipeline.py b/vc_infer_pipeline.py deleted file mode 100644 index eed21b3eb..000000000 --- a/vc_infer_pipeline.py +++ /dev/null @@ -1,594 +0,0 @@ -from scipy.io import wavfile -import numpy as np, parselmouth, torch, pdb, sys, os -from time import time as ttime -import torch.nn.functional as F -import torchcrepe # Fork feature. Use the crepe f0 algorithm. New dependency (pip install torchcrepe) -from torch import Tensor -import scipy.signal as signal -import pyworld, os, traceback, faiss, librosa, torchcrepe -from scipy import signal -from functools import lru_cache - -from functools import partial -import re - -from tqdm import tqdm - -now_dir = os.getcwd() -sys.path.append(now_dir) - -from LazyImport import lazyload - -torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess -torch = lazyload("torch") -rmvpe = lazyload("rmvpe") - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - self.model_rmvpe = rmvpe.RMVPE("rmvpe.pt", is_half=False, device="cuda:0") - self.f0_method_dict = { - "pm": self.get_pm, - "harvest": self.get_harvest, - "dio": self.get_dio, - "rmvpe": self.get_rmvpe, - "rmvpe+": self.get_pitch_dependant_rmvpe, - "crepe": self.get_f0_official_crepe_computation, - "crepe-tiny": partial(self.get_f0_official_crepe_computation, model='model'), - "mangio-crepe": self.get_f0_crepe_computation, - "mangio-crepe-tiny": partial(self.get_f0_crepe_computation, model='model'), - - } - - # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) - def get_optimal_torch_device(self, index: int = 0) -> torch.device: - if torch.cuda.is_available(): - return torch.device( - f"cuda:{index % torch.cuda.device_count()}" - ) # Very fast - elif torch.backends.mps.is_available(): - return torch.device("mps") - return torch.device("cpu") - - # Fork Feature: Compute f0 with the crepe method - def get_f0_crepe_computation( - self, - x, - f0_min, - f0_max, - p_len, - *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. - **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full - ): - x = x.astype( - np.float32 - ) # fixes the F.conv2D exception. We needed to convert double to float. - x /= np.quantile(np.abs(x), 0.999) - torch_device = self.get_optimal_torch_device() - audio = torch.from_numpy(x).to(torch_device, copy=True) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - hop_length = kwargs.get('crepe_hop_length', 160) - model = kwargs.get('model', 'full') - print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) - pitch: Tensor = torchcrepe.predict( - audio, - self.sr, - hop_length, - f0_min, - f0_max, - model, - batch_size=hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // hop_length - # Resize the pitch for final f0 - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - return f0 # Resized f0 - - def get_f0_official_crepe_computation( - self, - x, - f0_min, - f0_max, - *args, - **kwargs - ): - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - model = kwargs.get('model', 'full') - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - # Fork Feature: Compute pYIN f0 method - def get_f0_pyin_computation(self, x, f0_min, f0_max): - y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True) - f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max) - f0 = f0[1:] # Get rid of extra first frame - return f0 - - def get_pm(self, x, p_len, *args, **kwargs): - f0 = parselmouth.Sound(x, self.sr).to_pitch_ac( - time_step=160 / 16000, - voicing_threshold=0.6, - pitch_floor=kwargs.get('f0_min'), - pitch_ceiling=kwargs.get('f0_max'), - ).selected_array["frequency"] - - return np.pad( - f0, - [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]], - mode="constant" - ) - - def get_harvest(self, x, *args, **kwargs): - f0_spectral = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=kwargs.get('f0_max'), - f0_floor=kwargs.get('f0_min'), - frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) - - def get_dio(self, x, *args, **kwargs): - f0_spectral = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=kwargs.get('f0_max'), - f0_floor=kwargs.get('f0_min'), - frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) - - - def get_rmvpe(self, x, *args, **kwargs): - return self.model_rmvpe.infer_from_audio(x, thred=0.03) - - def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs): - return self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max) - - # Fork Feature: Acquire median hybrid f0 estimation calculation - def get_f0_hybrid_computation( - self, - methods_str, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step - ): - # Get various f0 methods from input to use in the computation stack - params = {'x': x, 'p_len': p_len, 'f0_min': f0_min, - 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, - 'crepe_hop_length': crepe_hop_length, 'model': "full" - } - methods_str = re.search('hybrid\[(.+)\]', methods_str) - if methods_str: # Ensure a match was found - methods = [method.strip() for method in methods_str.group(1).split('+')] - f0_computation_stack = [] - - print(f"Calculating f0 pitch estimations for methods: {str(methods)}") - x = x.astype(np.float32) - x /= np.quantile(np.abs(x), 0.999) - # Get f0 calculations for all methods specified - - for method in methods: - if method not in self.f0_method_dict: - print(f"Method {method} not found.") - continue - f0 = self.f0_method_dict[method](**params) - if method == 'harvest' and filter_radius > 2: - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] # Get rid of first frame. - f0_computation_stack.append(f0) - - for fc in f0_computation_stack: - print(len(fc)) - - print(f"Calculating hybrid median f0 from the stack of: {str(methods)}") - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) - return f0_median_hybrid - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - inp_f0=None, - f0_min=50, - f0_max=1100, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min, - 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, - 'crepe_hop_length': crepe_hop_length, 'model': "full" - } - f0 = self.f0_method_dict[f0_method](**params) - - if "hybrid" in f0_method: - # Perform hybrid median pitch estimation - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = self.get_f0_hybrid_computation( - f0_method,+ - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ) - - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g): - t = t // window * window - if if_f0 == 1: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - pitch[:, s // window : (t + t_pad_tgt) // window], - pitchf[:, s // window : (t + t_pad_tgt) // window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - else: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - - def pipeline(self, model, net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, - file_index, index_rate, if_f0, filter_radius, tgt_sr, resample_sr, rms_mix_rate, - version, protect, crepe_hop_length, f0_file=None, f0_min=50, f0_max=1100): - - try: - if file_index == "": - print("File index was empty.") - index = None - big_npy = None - else: - if os.path.exists(file_index): - sys.stdout.write(f"Attempting to load {file_index}....\n") - sys.stdout.flush() - else: - sys.stdout.write(f"Attempting to load {file_index}.... (despite it not existing)\n") - sys.stdout.flush() - index = faiss.read_index(file_index) - big_npy = index.reconstruct_n(0, index.ntotal) - except Exception: - print("Could not open Faiss index file for reading.") - index = None - big_npy = None - - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - - for t in range(self.t_center, audio.shape[0], self.t_center): - abs_audio_sum = np.abs(audio_sum[t - self.t_query : t + self.t_query]) - min_abs_audio_sum = abs_audio_sum.min() - opt_ts.append(t - self.t_query + np.where(abs_audio_sum == min_abs_audio_sum)[0][0]) - - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - - if f0_file is not None: - try: - with open(f0_file.name, "r") as f: - inp_f0 = np.array([list(map(float, line.split(","))) for line in f.read().strip("\n").split("\n")], dtype="float32") - except: - traceback.print_exc() - - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - - if if_f0: - pitch, pitchf = self.get_f0( - input_audio_path, audio_pad, p_len, f0_up_key, f0_method, - filter_radius, crepe_hop_length, inp_f0, f0_min, f0_max) - - pitch = pitch[:p_len].astype(np.int64 if self.device != 'mps' else np.float32) - pitchf = pitchf[:p_len].astype(np.float32) - pitch = torch.from_numpy(pitch).to(self.device).unsqueeze(0) - pitchf = torch.from_numpy(pitchf).to(self.device).unsqueeze(0) - - t2 = ttime() - times[1] += t2 - t1 - - with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar: - for i, t in enumerate(opt_ts): - t = t // self.window * self.window - start = s - end = t + self.t_pad2 + self.window - audio_slice = audio_pad[start:end] - pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None - pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - s = t - pbar.update(1) - pbar.refresh() - - audio_slice = audio_pad[t:] - pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch - pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr) - - max_int16 = 32768 - audio_max = max(np.abs(audio_opt).max() / 0.99, 1) - audio_opt = (audio_opt * max_int16 / audio_max).astype(np.int16) - - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - print("Returning completed audio...") - print("-------------------") - - output_folder = "audio-outputs" - output_filename = "generated_audio_{}.wav" - output_count = 1 - while True: - current_output_path = os.path.join(output_folder, output_filename.format(output_count)) - if not os.path.exists(current_output_path): - break - output_count += 1 - - # Guardar el audio generado como archivo WAV - wavfile.write(current_output_path, tgt_sr, audio_opt) - - print(f"Generated audio saved to: {current_output_path}") - - return audio_opt diff --git a/venv.sh b/venv.sh deleted file mode 100644 index 17f58bf2a..000000000 --- a/venv.sh +++ /dev/null @@ -1 +0,0 @@ -python3 -m venv .venv diff --git a/weights/.gitignore b/weights/.gitignore deleted file mode 100644 index d6b7ef32c..000000000 --- a/weights/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore