diff --git a/AutoSubs-App/src-tauri/resources/AutoSubs V2.lua b/AutoSubs-App/src-tauri/resources/AutoSubs V2.lua index e1b78ee..236b70a 100644 --- a/AutoSubs-App/src-tauri/resources/AutoSubs V2.lua +++ b/AutoSubs-App/src-tauri/resources/AutoSubs V2.lua @@ -363,10 +363,9 @@ function AddSubtitles(filePath, trackIndex, templateName, textFormat, removePunc end end - -- If within 1 second, join the subtitles + -- If within threshold, join the subtitles local clipList = {} - local joinThreshold = frame_rate - local subtitlesCount = #subtitles + local joinThreshold = frame_rate * 4 for i, subtitle in ipairs(subtitles) do -- print("Adding subtitle: ", subtitle["text"]) diff --git a/Transcription-Server/server.py b/Transcription-Server/server.py index 3d25ce1..4424567 100644 --- a/Transcription-Server/server.py +++ b/Transcription-Server/server.py @@ -218,9 +218,8 @@ def log_progress(seek, total_duration): def transcribe_audio(audio_file, kwargs, max_words, max_chars, sensitive_words): if (platform.system() == 'Windows'): - compute_type = "int8_float16" if kwargs["device"] == "cuda" else "int8" - model = stable_whisper.load_faster_whisper( - kwargs["model"], device=kwargs["device"], compute_type=compute_type) + compute_type = "float16" if kwargs["device"] == "cuda" else "int8" + model = stable_whisper.load_faster_whisper(kwargs["model"], device=kwargs["device"], compute_type=compute_type) if kwargs["language"] == "auto": result = model.transcribe_stable( audio_file, task=kwargs["task"], regroup=True, verbose=True, vad_filter=True, progress_callback=log_progress)