diff --git a/app.py b/app.py index 130d71e43..744744ba1 100644 --- a/app.py +++ b/app.py @@ -12,6 +12,7 @@ from tabs.download.download import download_tab from tabs.tts.tts import tts_tab from assets.discord_presence import rich_presence + rich_presence() with gr.Blocks(theme="ParityError/Interstellar", title="Applio") as Applio: diff --git a/assets/discord_presence.py b/assets/discord_presence.py index 27c311bc0..4f9815138 100644 --- a/assets/discord_presence.py +++ b/assets/discord_presence.py @@ -2,6 +2,7 @@ import datetime as dt import time + def rich_presence(): client_id = "1144714449563955302" RPC = Presence(client_id) @@ -19,6 +20,7 @@ def rich_presence(): ) return RPC + if __name__ == "__main__": rpc = rich_presence() @@ -26,4 +28,4 @@ def rich_presence(): while True: time.sleep(15) except KeyboardInterrupt: - rpc.close() \ No newline at end of file + rpc.close() diff --git a/core.py b/core.py index 88e2b4478..3d01600ec 100644 --- a/core.py +++ b/core.py @@ -36,7 +36,7 @@ def run_infer_script( output_path, pth_file, index_path, - split_audio + split_audio, ): infer_script_path = os.path.join("rvc", "infer", "infer.py") command = [ @@ -51,7 +51,7 @@ def run_infer_script( output_path, pth_file, index_path, - str(split_audio) + str(split_audio), ] subprocess.run(command) return f"File {input_path} inferred successfully.", output_path @@ -323,6 +323,7 @@ def run_download_script(model_link): subprocess.run(command) return f"Model downloaded successfully." + # Parse arguments def parse_arguments(): parser = argparse.ArgumentParser( @@ -670,7 +671,7 @@ def main(): args.output_path, args.pth_file, args.index_path, - args.split_audio + args.split_audio, ) elif args.mode == "batch_infer": run_batch_infer_script( diff --git a/rvc/infer/infer.py b/rvc/infer/infer.py index 69c645bc5..d9188202e 100644 --- a/rvc/infer/infer.py +++ b/rvc/infer/infer.py @@ -83,7 +83,9 @@ def vc_single( result, new_dir_path = process_audio(input_audio_path) if result == "Error": return "Error with Split Audio", None - dir_path = new_dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + dir_path = ( + new_dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + ) if dir_path != "": paths = [ os.path.join(root, name) @@ -108,14 +110,17 @@ def vc_single( path, False, ) - #new_dir_path + # new_dir_path except Exception as error: print(error) return "Error", None print("Finished processing segmented audio, now merging audio...") - merge_timestamps_file = os.path.join(os.path.dirname(new_dir_path), f"{os.path.basename(input_audio_path).split('.')[0]}_timestamps.txt") - tgt_sr, audio_opt = merge_audio(merge_timestamps_file) - + merge_timestamps_file = os.path.join( + os.path.dirname(new_dir_path), + f"{os.path.basename(input_audio_path).split('.')[0]}_timestamps.txt", + ) + tgt_sr, audio_opt = merge_audio(merge_timestamps_file) + else: audio_opt = vc.pipeline( hubert_model, @@ -137,7 +142,6 @@ def vc_single( hop_length, f0_file=f0_file, ) - if output_path is not None: sf.write(output_path, audio_opt, tgt_sr, format="WAV") @@ -243,7 +247,7 @@ def get_vc(weight_root, sid): index_rate=index_rate, hop_length=hop_length, output_path=output_file, - split_audio=split_audio + split_audio=split_audio, ) if os.path.exists(output_file) and os.path.getsize(output_file) > 0: diff --git a/rvc/train/train.py b/rvc/train/train.py index 961db6480..8fb4a14e4 100644 --- a/rvc/train/train.py +++ b/rvc/train/train.py @@ -573,7 +573,9 @@ def train_and_evaluate(rank, epoch, hps, nets, optims, scaler, loaders, writers, ) if rank == 0: - print(f"{hps.name} | epoch={epoch} | step={global_step} | {epoch_recorder.record()} | loss_disc={loss_disc:.3f} | loss_gen={loss_gen:.3f} | loss_fm={loss_fm:.3f} | loss_mel={loss_mel:.3f} | loss_kl={loss_kl:.3f}") + print( + f"{hps.name} | epoch={epoch} | step={global_step} | {epoch_recorder.record()} | loss_disc={loss_disc:.3f} | loss_gen={loss_gen:.3f} | loss_fm={loss_fm:.3f} | loss_mel={loss_mel:.3f} | loss_kl={loss_kl:.3f}" + ) if epoch >= hps.total_epoch and rank == 0: print( f"Training has been successfully completed with {epoch} epoch and {global_step} steps." diff --git a/tabs/inference/inference.py b/tabs/inference/inference.py index 145486eb1..e048d7807 100644 --- a/tabs/inference/inference.py +++ b/tabs/inference/inference.py @@ -58,8 +58,11 @@ and "_output" not in name ] + def output_path_fn(input_audio_path): - original_name_without_extension = os.path.basename(input_audio_path).rsplit(".", 1)[0] + original_name_without_extension = os.path.basename(input_audio_path).rsplit(".", 1)[ + 0 + ] new_name = original_name_without_extension + "_output.wav" output_path = os.path.join(os.path.dirname(input_audio_path), new_name) return output_path @@ -248,7 +251,9 @@ def inference_tab(): output_path = gr.Textbox( label=i18n("Output Path"), placeholder=i18n("Enter output path"), - value=output_path_fn(audio_paths[0]) if audio_paths else os.path.join(now_dir, "assets", "audios", "output.wav"), + value=output_path_fn(audio_paths[0]) + if audio_paths + else os.path.join(now_dir, "assets", "audios", "output.wav"), interactive=True, ) split_audio = gr.Checkbox( diff --git a/tabs/train/train.py b/tabs/train/train.py index 4064e1a26..2b8834e9c 100644 --- a/tabs/train/train.py +++ b/tabs/train/train.py @@ -162,7 +162,9 @@ def train_tab(): label=i18n("Save Only Latest"), value=False, interactive=True ) save_every_weights = gr.Checkbox( - label=i18n("Save Every Weights"), value=False, visible=False # Working on fix this - Only saving on final epoch + label=i18n("Save Every Weights"), + value=False, + visible=False, # Working on fix this - Only saving on final epoch ) custom_pretrained = gr.Checkbox( label=i18n("Custom Pretrained"), value=False, interactive=True