Skip to content

Commit

Permalink
Merge pull request #190 from IAHispano/formatter-main
Browse files Browse the repository at this point in the history
chore(format): run black on main
  • Loading branch information
blaisewf authored Jan 19, 2024
2 parents 994af3d + c53412a commit 02bd34a
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 15 deletions.
1 change: 1 addition & 0 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from tabs.download.download import download_tab
from tabs.tts.tts import tts_tab
from assets.discord_presence import rich_presence

rich_presence()

with gr.Blocks(theme="ParityError/Interstellar", title="Applio") as Applio:
Expand Down
4 changes: 3 additions & 1 deletion assets/discord_presence.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import datetime as dt
import time


def rich_presence():
client_id = "1144714449563955302"
RPC = Presence(client_id)
Expand All @@ -19,11 +20,12 @@ def rich_presence():
)
return RPC


if __name__ == "__main__":
rpc = rich_presence()

try:
while True:
time.sleep(15)
except KeyboardInterrupt:
rpc.close()
rpc.close()
7 changes: 4 additions & 3 deletions core.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def run_infer_script(
output_path,
pth_file,
index_path,
split_audio
split_audio,
):
infer_script_path = os.path.join("rvc", "infer", "infer.py")
command = [
Expand All @@ -51,7 +51,7 @@ def run_infer_script(
output_path,
pth_file,
index_path,
str(split_audio)
str(split_audio),
]
subprocess.run(command)
return f"File {input_path} inferred successfully.", output_path
Expand Down Expand Up @@ -323,6 +323,7 @@ def run_download_script(model_link):
subprocess.run(command)
return f"Model downloaded successfully."


# Parse arguments
def parse_arguments():
parser = argparse.ArgumentParser(
Expand Down Expand Up @@ -670,7 +671,7 @@ def main():
args.output_path,
args.pth_file,
args.index_path,
args.split_audio
args.split_audio,
)
elif args.mode == "batch_infer":
run_batch_infer_script(
Expand Down
18 changes: 11 additions & 7 deletions rvc/infer/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,9 @@ def vc_single(
result, new_dir_path = process_audio(input_audio_path)
if result == "Error":
return "Error with Split Audio", None
dir_path = new_dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
dir_path = (
new_dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
)
if dir_path != "":
paths = [
os.path.join(root, name)
Expand All @@ -108,14 +110,17 @@ def vc_single(
path,
False,
)
#new_dir_path
# new_dir_path
except Exception as error:
print(error)
return "Error", None
print("Finished processing segmented audio, now merging audio...")
merge_timestamps_file = os.path.join(os.path.dirname(new_dir_path), f"{os.path.basename(input_audio_path).split('.')[0]}_timestamps.txt")
tgt_sr, audio_opt = merge_audio(merge_timestamps_file)

merge_timestamps_file = os.path.join(
os.path.dirname(new_dir_path),
f"{os.path.basename(input_audio_path).split('.')[0]}_timestamps.txt",
)
tgt_sr, audio_opt = merge_audio(merge_timestamps_file)

else:
audio_opt = vc.pipeline(
hubert_model,
Expand All @@ -137,7 +142,6 @@ def vc_single(
hop_length,
f0_file=f0_file,
)


if output_path is not None:
sf.write(output_path, audio_opt, tgt_sr, format="WAV")
Expand Down Expand Up @@ -243,7 +247,7 @@ def get_vc(weight_root, sid):
index_rate=index_rate,
hop_length=hop_length,
output_path=output_file,
split_audio=split_audio
split_audio=split_audio,
)

if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
Expand Down
4 changes: 3 additions & 1 deletion rvc/train/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,9 @@ def train_and_evaluate(rank, epoch, hps, nets, optims, scaler, loaders, writers,
)

if rank == 0:
print(f"{hps.name} | epoch={epoch} | step={global_step} | {epoch_recorder.record()} | loss_disc={loss_disc:.3f} | loss_gen={loss_gen:.3f} | loss_fm={loss_fm:.3f} | loss_mel={loss_mel:.3f} | loss_kl={loss_kl:.3f}")
print(
f"{hps.name} | epoch={epoch} | step={global_step} | {epoch_recorder.record()} | loss_disc={loss_disc:.3f} | loss_gen={loss_gen:.3f} | loss_fm={loss_fm:.3f} | loss_mel={loss_mel:.3f} | loss_kl={loss_kl:.3f}"
)
if epoch >= hps.total_epoch and rank == 0:
print(
f"Training has been successfully completed with {epoch} epoch and {global_step} steps."
Expand Down
9 changes: 7 additions & 2 deletions tabs/inference/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,11 @@
and "_output" not in name
]


def output_path_fn(input_audio_path):
original_name_without_extension = os.path.basename(input_audio_path).rsplit(".", 1)[0]
original_name_without_extension = os.path.basename(input_audio_path).rsplit(".", 1)[
0
]
new_name = original_name_without_extension + "_output.wav"
output_path = os.path.join(os.path.dirname(input_audio_path), new_name)
return output_path
Expand Down Expand Up @@ -248,7 +251,9 @@ def inference_tab():
output_path = gr.Textbox(
label=i18n("Output Path"),
placeholder=i18n("Enter output path"),
value=output_path_fn(audio_paths[0]) if audio_paths else os.path.join(now_dir, "assets", "audios", "output.wav"),
value=output_path_fn(audio_paths[0])
if audio_paths
else os.path.join(now_dir, "assets", "audios", "output.wav"),
interactive=True,
)
split_audio = gr.Checkbox(
Expand Down
4 changes: 3 additions & 1 deletion tabs/train/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,9 @@ def train_tab():
label=i18n("Save Only Latest"), value=False, interactive=True
)
save_every_weights = gr.Checkbox(
label=i18n("Save Every Weights"), value=False, visible=False # Working on fix this - Only saving on final epoch
label=i18n("Save Every Weights"),
value=False,
visible=False, # Working on fix this - Only saving on final epoch
)
custom_pretrained = gr.Checkbox(
label=i18n("Custom Pretrained"), value=False, interactive=True
Expand Down

0 comments on commit 02bd34a

Please sign in to comment.