LAP-DEV commited on
Commit
aa05b82
·
verified ·
1 Parent(s): 81f2469

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +437 -0
app.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import gradio as gr
4
+ import yaml
5
+
6
+ from modules.utils.paths import (FASTER_WHISPER_MODELS_DIR, DIARIZATION_MODELS_DIR, OUTPUT_DIR, WHISPER_MODELS_DIR,
7
+ INSANELY_FAST_WHISPER_MODELS_DIR, NLLB_MODELS_DIR, DEFAULT_PARAMETERS_CONFIG_PATH,
8
+ UVR_MODELS_DIR)
9
+ from modules.utils.files_manager import load_yaml
10
+ from modules.whisper.whisper_factory import WhisperFactory
11
+ from modules.whisper.faster_whisper_inference import FasterWhisperInference
12
+ from modules.whisper.insanely_fast_whisper_inference import InsanelyFastWhisperInference
13
+ from modules.translation.nllb_inference import NLLBInference
14
+ from modules.ui.htmls import *
15
+ from modules.utils.cli_manager import str2bool
16
+ from modules.utils.youtube_manager import get_ytmetas
17
+ from modules.translation.deepl_api import DeepLAPI
18
+ from modules.whisper.whisper_parameter import *
19
+
20
+ ### Device info ###
21
+ import torch
22
+ import torchaudio
23
+ import torch.cuda as cuda
24
+ import platform
25
+ from transformers import __version__ as transformers_version
26
+
27
+ device = "cuda" if torch.cuda.is_available() else "cpu"
28
+ num_gpus = cuda.device_count() if torch.cuda.is_available() else 0
29
+ cuda_version = torch.version.cuda if torch.cuda.is_available() else "N/A"
30
+ cudnn_version = torch.backends.cudnn.version() if torch.cuda.is_available() else "N/A"
31
+ os_info = platform.system() + " " + platform.release() + " " + platform.machine()
32
+
33
+ # Get the available VRAM for each GPU (if available)
34
+ vram_info = []
35
+ if torch.cuda.is_available():
36
+ for i in range(cuda.device_count()):
37
+ gpu_properties = cuda.get_device_properties(i)
38
+ vram_info.append(f"**GPU {i}: {gpu_properties.total_memory / 1024**3:.2f} GB**")
39
+
40
+ pytorch_version = torch.__version__
41
+ torchaudio_version = torchaudio.__version__ if 'torchaudio' in dir() else "N/A"
42
+
43
+ device_info = f"""Running on: **{device}**
44
+
45
+ Number of GPUs available: **{num_gpus}**
46
+
47
+ CUDA version: **{cuda_version}**
48
+
49
+ CuDNN version: **{cudnn_version}**
50
+
51
+ PyTorch version: **{pytorch_version}**
52
+
53
+ Torchaudio version: **{torchaudio_version}**
54
+
55
+ Transformers version: **{transformers_version}**
56
+
57
+ Operating system: **{os_info}**
58
+
59
+ Available VRAM:
60
+ \t {', '.join(vram_info) if vram_info else '**N/A**'}
61
+ """
62
+ ### End Device info ###
63
+
64
+ class App:
65
+ def __init__(self, args):
66
+ self.args = args
67
+ self.app = gr.Blocks(css=CSS,theme=gr.themes.Ocean(), title="Whisper", delete_cache=(60, 3600))
68
+ self.whisper_inf = WhisperFactory.create_whisper_inference(
69
+ whisper_type=self.args.whisper_type,
70
+ whisper_model_dir=self.args.whisper_model_dir,
71
+ faster_whisper_model_dir=self.args.faster_whisper_model_dir,
72
+ insanely_fast_whisper_model_dir=self.args.insanely_fast_whisper_model_dir,
73
+ uvr_model_dir=self.args.uvr_model_dir,
74
+ output_dir=self.args.output_dir,
75
+ )
76
+ self.nllb_inf = NLLBInference(
77
+ model_dir=self.args.nllb_model_dir,
78
+ output_dir=os.path.join(self.args.output_dir, "translations")
79
+ )
80
+ self.deepl_api = DeepLAPI(
81
+ output_dir=os.path.join(self.args.output_dir, "translations")
82
+ )
83
+ self.default_params = load_yaml(DEFAULT_PARAMETERS_CONFIG_PATH)
84
+ print(f"Use \"{self.args.whisper_type}\" implementation")
85
+ print(f"Device \"{self.whisper_inf.device}\" is detected")
86
+
87
+ def create_whisper_parameters(self):
88
+
89
+ whisper_params = self.default_params["whisper"]
90
+ diarization_params = self.default_params["diarization"]
91
+ vad_params = self.default_params["vad"]
92
+ uvr_params = self.default_params["bgm_separation"]
93
+
94
+ #Translation integration
95
+ translation_params = self.default_params["translation"]
96
+ nllb_params = translation_params["nllb"]
97
+
98
+ with gr.Row():
99
+ with gr.Column(scale=1):
100
+ with gr.Row():
101
+ input_multi = gr.Radio(["Audio", "Video", "Multiple"], label="Process one or multiple files", value="Audio")
102
+ with gr.Row():
103
+ dd_file_format = gr.Dropdown(choices=["CSV","SRT","TXT"], value="CSV", label="Output format", multiselect=True, interactive=True, visible=True)
104
+ with gr.Row():
105
+ cb_timestamp_preview = gr.Checkbox(value=whisper_params["add_timestamp_preview"],label="Show preview with timestamps", interactive=True)
106
+ cb_timestamp_file = gr.Checkbox(value=whisper_params["add_timestamp_file"], label="Add timestamp to filenames", interactive=True)
107
+ with gr.Column(scale=4):
108
+ input_file_audio = gr.Audio(type='filepath', elem_id="audio_input", show_download_button=True, visible=True, interactive=True)
109
+ input_file_video = gr.Video(elem_id="audio_input", show_download_button=True, visible=False, interactive=True)
110
+ input_file_multi = gr.Files(label="Upload one or more audio/video files here", elem_id="audio_input", type='filepath', file_count="multiple", allow_reordering=True, file_types=["audio","video"], visible=False, interactive=True)
111
+
112
+ with gr.Row():
113
+ with gr.Column(scale=4):
114
+ with gr.Row():
115
+ dd_model = gr.Dropdown(choices=self.whisper_inf.available_models, value=whisper_params["model_size"],label="Model", info="Larger models increase transcription quality, but reduce performance", interactive=True)
116
+ dd_lang = gr.Dropdown(choices=["Automatic Detection"] + self.whisper_inf.available_langs,value=whisper_params["lang"], label="Language", info="If the language is known upfront, always set it manually", interactive=True)
117
+ with gr.Row():
118
+ dd_translate_model = gr.Dropdown(choices=self.nllb_inf.available_models, value=nllb_params["model_size"],label="Model", info="Model used for translation", interactive=True)
119
+ dd_target_lang = gr.Dropdown(choices=["English","Dutch","French","German"], value=nllb_params["target_lang"],label="Language", info="Language used for output translation", interactive=True)
120
+ with gr.Column(scale=1):
121
+ with gr.Row():
122
+ cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English", info="Translate using OpenAI Whisper's built-in module",interactive=True)
123
+ cb_translate_output = gr.Checkbox(value=translation_params["translate_output"], label="Translate to selected language", info="Translate using Facebook's NLLB",interactive=True)
124
+
125
+ with gr.Accordion("Speaker diarization", open=False, visible=True):
126
+ cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"],label="Use diarization",interactive=True)
127
+ tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="An access token is required to use diarization & can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0).")
128
+ dd_diarization_device = gr.Dropdown(label="Device",
129
+ choices=self.whisper_inf.diarizer.get_available_device(),
130
+ value=self.whisper_inf.diarizer.get_device(),
131
+ interactive=True, visible=False)
132
+
133
+ with gr.Accordion("Preprocessing options", open=False, visible=True):
134
+
135
+ with gr.Accordion("Voice Detection Filter", open=False, visible=True):
136
+ cb_vad_filter = gr.Checkbox(label="Enable Silero VAD Filter", value=vad_params["vad_filter"],
137
+ info="Enable to transcribe only detected voice parts",
138
+ interactive=True)
139
+ sd_threshold = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Speech Threshold",
140
+ value=vad_params["threshold"],
141
+ info="Lower it to be more sensitive to small sounds")
142
+ nb_min_speech_duration_ms = gr.Number(label="Minimum Speech Duration (ms)", precision=0,
143
+ value=vad_params["min_speech_duration_ms"],
144
+ info="Final speech chunks shorter than this time are thrown out")
145
+ nb_max_speech_duration_s = gr.Number(label="Maximum Speech Duration (s)",
146
+ value=vad_params["max_speech_duration_s"],
147
+ info="Maximum duration of speech chunks in seconds")
148
+ nb_min_silence_duration_ms = gr.Number(label="Minimum Silence Duration (ms)", precision=0,
149
+ value=vad_params["min_silence_duration_ms"],
150
+ info="In the end of each speech chunk wait for this time"
151
+ " before separating it")
152
+ nb_speech_pad_ms = gr.Number(label="Speech Padding (ms)", precision=0, value=vad_params["speech_pad_ms"],
153
+ info="Final speech chunks are padded by this time each side")
154
+
155
+ with gr.Accordion("Background Music Remover Filter", open=False):
156
+ cb_bgm_separation = gr.Checkbox(label="Enable Background Music Remover Filter", value=uvr_params["is_separate_bgm"],
157
+ info="Enable to remove background music by submodel before transcribing",
158
+ interactive=True)
159
+ dd_uvr_device = gr.Dropdown(label="Device",
160
+ value=self.whisper_inf.music_separator.device,
161
+ choices=self.whisper_inf.music_separator.available_devices,
162
+ interactive=True, visible=False)
163
+ dd_uvr_model_size = gr.Dropdown(label="Model", value=uvr_params["model_size"],
164
+ choices=self.whisper_inf.music_separator.available_models,
165
+ interactive=True)
166
+ nb_uvr_segment_size = gr.Number(label="Segment Size", value=uvr_params["segment_size"], precision=0,
167
+ interactive=True, visible=False)
168
+ cb_uvr_save_file = gr.Checkbox(label="Save separated files to output", value=uvr_params["save_file"],
169
+ interactive=True, visible=False)
170
+ cb_uvr_enable_offload = gr.Checkbox(label="Offload sub model after removing background music",value=uvr_params["enable_offload"],
171
+ interactive=True, visible=False)
172
+
173
+ with gr.Accordion("Advanced processing options", open=False, visible=False):
174
+ nb_beam_size = gr.Number(label="Beam Size", value=whisper_params["beam_size"], precision=0, interactive=True,
175
+ info="Beam size to use for decoding.")
176
+ nb_log_prob_threshold = gr.Number(label="Log Probability Threshold", value=whisper_params["log_prob_threshold"], interactive=True,
177
+ info="If the average log probability over sampled tokens is below this value, treat as failed.")
178
+ nb_no_speech_threshold = gr.Number(label="No Speech Threshold", value=whisper_params["no_speech_threshold"], interactive=True,
179
+ info="If the no speech probability is higher than this value AND the average log probability over sampled tokens is below 'Log Prob Threshold', consider the segment as silent.")
180
+ dd_compute_type = gr.Dropdown(label="Compute Type", choices=self.whisper_inf.available_compute_types,
181
+ value=self.whisper_inf.current_compute_type, interactive=True,
182
+ allow_custom_value=True,
183
+ info="Select the type of computation to perform.")
184
+ nb_best_of = gr.Number(label="Best Of", value=whisper_params["best_of"], interactive=True,
185
+ info="Number of candidates when sampling with non-zero temperature.")
186
+ nb_patience = gr.Number(label="Patience", value=whisper_params["patience"], interactive=True,
187
+ info="Beam search patience factor.")
188
+ cb_condition_on_previous_text = gr.Checkbox(label="Condition On Previous Text", value=whisper_params["condition_on_previous_text"],
189
+ interactive=True,
190
+ info="Condition on previous text during decoding.")
191
+ sld_prompt_reset_on_temperature = gr.Slider(label="Prompt Reset On Temperature", value=whisper_params["prompt_reset_on_temperature"],
192
+ minimum=0, maximum=1, step=0.01, interactive=True,
193
+ info="Resets prompt if temperature is above this value."
194
+ " Arg has effect only if 'Condition On Previous Text' is True.")
195
+ tb_initial_prompt = gr.Textbox(label="Initial Prompt", value=None, interactive=True,
196
+ info="Initial prompt to use for decoding.")
197
+ sd_temperature = gr.Slider(label="Temperature", value=whisper_params["temperature"], minimum=0.0,
198
+ step=0.01, maximum=1.0, interactive=True,
199
+ info="Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `Compression Ratio Threshold` or `Log Prob Threshold`.")
200
+ nb_compression_ratio_threshold = gr.Number(label="Compression Ratio Threshold", value=whisper_params["compression_ratio_threshold"],
201
+ interactive=True,
202
+ info="If the gzip compression ratio is above this value, treat as failed.")
203
+ nb_chunk_length = gr.Number(label="Chunk Length (s)", value=lambda: whisper_params["chunk_length"],
204
+ precision=0,
205
+ info="The length of audio segments. If it is not None, it will overwrite the default chunk_length of the FeatureExtractor.")
206
+ with gr.Group(visible=isinstance(self.whisper_inf, FasterWhisperInference)):
207
+ nb_length_penalty = gr.Number(label="Length Penalty", value=whisper_params["length_penalty"],
208
+ info="Exponential length penalty constant.")
209
+ nb_repetition_penalty = gr.Number(label="Repetition Penalty", value=whisper_params["repetition_penalty"],
210
+ info="Penalty applied to the score of previously generated tokens (set > 1 to penalize).")
211
+ nb_no_repeat_ngram_size = gr.Number(label="No Repeat N-gram Size", value=whisper_params["no_repeat_ngram_size"],
212
+ precision=0,
213
+ info="Prevent repetitions of n-grams with this size (set 0 to disable).")
214
+ tb_prefix = gr.Textbox(label="Prefix", value=lambda: whisper_params["prefix"],
215
+ info="Optional text to provide as a prefix for the first window.")
216
+ cb_suppress_blank = gr.Checkbox(label="Suppress Blank", value=whisper_params["suppress_blank"],
217
+ info="Suppress blank outputs at the beginning of the sampling.")
218
+ tb_suppress_tokens = gr.Textbox(label="Suppress Tokens", value=whisper_params["suppress_tokens"],
219
+ info="List of token IDs to suppress. -1 will suppress a default set of symbols as defined in the model config.json file.")
220
+ nb_max_initial_timestamp = gr.Number(label="Max Initial Timestamp", value=whisper_params["max_initial_timestamp"],
221
+ info="The initial timestamp cannot be later than this.")
222
+ cb_word_timestamps = gr.Checkbox(label="Word Timestamps", value=whisper_params["word_timestamps"],
223
+ info="Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment.")
224
+ tb_prepend_punctuations = gr.Textbox(label="Prepend Punctuations", value=whisper_params["prepend_punctuations"],
225
+ info="If 'Word Timestamps' is True, merge these punctuation symbols with the next word.")
226
+ tb_append_punctuations = gr.Textbox(label="Append Punctuations", value=whisper_params["append_punctuations"],
227
+ info="If 'Word Timestamps' is True, merge these punctuation symbols with the previous word.")
228
+ nb_max_new_tokens = gr.Number(label="Max New Tokens", value=lambda: whisper_params["max_new_tokens"],
229
+ precision=0,
230
+ info="Maximum number of new tokens to generate per-chunk. If not set, the maximum will be set by the default max_length.")
231
+ nb_hallucination_silence_threshold = gr.Number(label="Hallucination Silence Threshold (sec)",
232
+ value=lambda: whisper_params["hallucination_silence_threshold"],
233
+ info="When 'Word Timestamps' is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected.")
234
+ tb_hotwords = gr.Textbox(label="Hotwords", value=lambda: whisper_params["hotwords"],
235
+ info="Hotwords/hint phrases to provide the model with. Has no effect if prefix is not None.")
236
+ nb_language_detection_threshold = gr.Number(label="Language Detection Threshold", value=lambda: whisper_params["language_detection_threshold"],
237
+ info="If the maximum probability of the language tokens is higher than this value, the language is detected.")
238
+ nb_language_detection_segments = gr.Number(label="Language Detection Segments", value=lambda: whisper_params["language_detection_segments"],
239
+ precision=0,
240
+ info="Number of segments to consider for the language detection.")
241
+ with gr.Group(visible=isinstance(self.whisper_inf, InsanelyFastWhisperInference)):
242
+ nb_batch_size = gr.Number(label="Batch Size", value=whisper_params["batch_size"], precision=0)
243
+
244
+
245
+ #dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
246
+
247
+ return (
248
+ WhisperParameters(
249
+ model_size=dd_model, lang=dd_lang, is_translate=cb_translate, beam_size=nb_beam_size,
250
+ log_prob_threshold=nb_log_prob_threshold, no_speech_threshold=nb_no_speech_threshold,
251
+ compute_type=dd_compute_type, best_of=nb_best_of, patience=nb_patience,
252
+ condition_on_previous_text=cb_condition_on_previous_text, initial_prompt=tb_initial_prompt,
253
+ temperature=sd_temperature, compression_ratio_threshold=nb_compression_ratio_threshold,
254
+ vad_filter=cb_vad_filter, threshold=sd_threshold, min_speech_duration_ms=nb_min_speech_duration_ms,
255
+ max_speech_duration_s=nb_max_speech_duration_s, min_silence_duration_ms=nb_min_silence_duration_ms,
256
+ speech_pad_ms=nb_speech_pad_ms, chunk_length=nb_chunk_length, batch_size=nb_batch_size,
257
+ is_diarize=cb_diarize, hf_token=tb_hf_token, diarization_device=dd_diarization_device,
258
+ length_penalty=nb_length_penalty, repetition_penalty=nb_repetition_penalty,
259
+ no_repeat_ngram_size=nb_no_repeat_ngram_size, prefix=tb_prefix, suppress_blank=cb_suppress_blank,
260
+ suppress_tokens=tb_suppress_tokens, max_initial_timestamp=nb_max_initial_timestamp,
261
+ word_timestamps=cb_word_timestamps, prepend_punctuations=tb_prepend_punctuations,
262
+ append_punctuations=tb_append_punctuations, max_new_tokens=nb_max_new_tokens,
263
+ hallucination_silence_threshold=nb_hallucination_silence_threshold, hotwords=tb_hotwords,
264
+ language_detection_threshold=nb_language_detection_threshold,
265
+ language_detection_segments=nb_language_detection_segments,
266
+ prompt_reset_on_temperature=sld_prompt_reset_on_temperature, is_bgm_separate=cb_bgm_separation,
267
+ uvr_device=dd_uvr_device, uvr_model_size=dd_uvr_model_size, uvr_segment_size=nb_uvr_segment_size,
268
+ uvr_save_file=cb_uvr_save_file, uvr_enable_offload=cb_uvr_enable_offload
269
+ ),
270
+ input_multi,
271
+ input_file_audio,
272
+ input_file_video,
273
+ input_file_multi,
274
+ dd_file_format,
275
+ cb_timestamp_file,
276
+ cb_translate_output,
277
+ dd_translate_model,
278
+ dd_target_lang,
279
+ cb_timestamp_preview,
280
+ cb_diarize
281
+ )
282
+
283
+ def launch(self):
284
+ translation_params = self.default_params["translation"]
285
+ deepl_params = translation_params["deepl"]
286
+ nllb_params = translation_params["nllb"]
287
+ uvr_params = self.default_params["bgm_separation"]
288
+ general_params = self.default_params["general"]
289
+
290
+ with self.app:
291
+
292
+ website_title = str(general_params["website_title"]).strip()
293
+ website_subtitle = str(general_params["website_subtitle"]).strip()
294
+ disclaimer_text = str(general_params["disclaimer_text"]).strip()
295
+ disclaimer_show = general_params["disclaimer_show"]
296
+ disclaimer_popup = general_params["disclaimer_popup"]
297
+
298
+ with gr.Row():
299
+ #with gr.Column():
300
+ #gr.Markdown(MARKDOWN, elem_id="md_project")
301
+
302
+ with gr.Column(scale=3):
303
+ gr.Markdown("# " + website_title, elem_id="md_title")
304
+ if website_subtitle:
305
+ gr.Markdown("### " + website_subtitle, elem_id="md_title")
306
+
307
+ with gr.Column(scale=2):
308
+ if disclaimer_show:
309
+ gr.Markdown("###### ⚠ " + disclaimer_text, elem_id="md_disclaimer")
310
+ else:
311
+ gr.Markdown("")
312
+
313
+ with gr.Tabs():
314
+ with gr.TabItem("Transcribe audio/video"): # tab1
315
+
316
+ tb_input_folder = gr.Textbox(label="Input Folder Path (Optional)",
317
+ info="Optional: Specify the folder path where the input files are located, if you prefer to use local files instead of uploading them."
318
+ " Leave this field empty if you do not wish to use a local path.",
319
+ visible=self.args.colab,
320
+ value="")
321
+
322
+ whisper_params, input_multi, input_file_audio, input_file_video, input_file_multi, dd_file_format, cb_timestamp_file, cb_translate_output, dd_translate_model, dd_target_lang, cb_timestamp_preview, cb_diarize = self.create_whisper_parameters()
323
+
324
+ with gr.Row():
325
+ btn_run = gr.Button("Transcribe", variant="primary")
326
+ btn_reset = gr.Button(value="Reset")
327
+ btn_reset.click(None,js="window.location.reload()")
328
+ with gr.Row():
329
+ with gr.Column(scale=4):
330
+ #tb_indicator = gr.Textbox(label="Output preview (Always review output generated by AI models)", show_copy_button=True, show_label=True)
331
+ tb_indicator = gr.Dataframe(label="Output preview (Always review output generated by AI models)", headers=["Time","Speaker","Text"], show_search="search", wrap=True, show_label=True, show_copy_button=True, show_fullscreen_button=True, interactive=False)
332
+ with gr.Column(scale=1):
333
+ tb_info = gr.Textbox(label="Output info", interactive=False, show_copy_button=True)
334
+ files_subtitles = gr.Files(label="Output data", interactive=False, file_count="multiple")
335
+ # btn_openfolder = gr.Button('📂', scale=1)
336
+
337
+ params = [input_file_audio, input_file_video, input_file_multi, input_multi, tb_input_folder, dd_file_format, cb_timestamp_file, cb_translate_output, dd_translate_model, dd_target_lang, cb_timestamp_preview, cb_diarize]
338
+
339
+ btn_run.click(fn=self.whisper_inf.transcribe_file,
340
+ inputs=params + whisper_params.as_list(),
341
+ outputs=[tb_indicator, files_subtitles, tb_info])
342
+ #btn_run.click(fn=self.update_dataframe,inputs=[cb_timestamp_preview,cb_diarize],outputs=tb_indicator)
343
+ # btn_openfolder.click(fn=lambda: self.open_folder("outputs"), inputs=None, outputs=None)
344
+
345
+ input_multi.change(fn=self.update_viewer,inputs=input_multi,outputs=[input_file_audio,input_file_video,input_file_multi])
346
+
347
+ with gr.TabItem("Device info"): # tab2
348
+ with gr.Column():
349
+ gr.Markdown(device_info, label="Hardware info & installed packages")
350
+
351
+ # Launch the app with optional gradio settings
352
+ args = self.args
353
+
354
+ self.app.queue(
355
+ api_open=args.api_open
356
+ ).launch(
357
+ share=args.share,
358
+ server_name=args.server_name,
359
+ server_port=args.server_port,
360
+ auth=(args.username, args.password) if args.username and args.password else None,
361
+ root_path=args.root_path,
362
+ inbrowser=args.inbrowser
363
+ )
364
+
365
+ @staticmethod
366
+ def open_folder(folder_path: str):
367
+ if os.path.exists(folder_path):
368
+ os.system(f"start {folder_path}")
369
+ else:
370
+ os.makedirs(folder_path, exist_ok=True)
371
+ print(f"The directory path {folder_path} has newly created.")
372
+
373
+ @staticmethod
374
+ def on_change_models(model_size: str):
375
+ translatable_model = ["large", "large-v1", "large-v2", "large-v3"]
376
+ if model_size not in translatable_model:
377
+ return gr.Checkbox(visible=False, value=False, interactive=False)
378
+ #return gr.Checkbox(visible=True, value=False, label="Translate to English (large models only)", interactive=False)
379
+ else:
380
+ return gr.Checkbox(visible=True, value=False, label="Translate to English", interactive=True)
381
+
382
+ @staticmethod
383
+ def update_viewer(radio_text):
384
+ if radio_text == "Audio":
385
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
386
+ elif radio_text == "Video":
387
+ return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
388
+ else:
389
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
390
+
391
+ @staticmethod
392
+ def update_dataframe(value_cb_timestamp_preview,value_cb_diarize):
393
+ if value_cb_timestamp_preview==True and value_cb_diarize==True:
394
+ return gr.Dataframe(headers=["Time","Speaker","Text"],column_widths=["10%","10%","80%"])
395
+ elif value_cb_timestamp_preview==True and value_cb_diarize==False:
396
+ return gr.Dataframe(headers=["Time","Text"],column_widths=["10%","90%"])
397
+ elif value_cb_timestamp_preview==False and value_cb_diarize==True:
398
+ return gr.Dataframe(headers=["Speaker","Text"],column_widths=["10%","90%"])
399
+ elif value_cb_timestamp_preview==False and value_cb_diarize==False:
400
+ return gr.Dataframe(headers=["Text"],column_widths=["100%"])
401
+ else:
402
+ return gr.Dataframe(headers=["Text"],column_widths=["100%"])
403
+
404
+
405
+ # Create the parser for command-line arguments
406
+ parser = argparse.ArgumentParser()
407
+ parser.add_argument('--whisper_type', type=str, default="faster-whisper",
408
+ help='A type of the whisper implementation between: ["whisper", "faster-whisper", "insanely-fast-whisper"]')
409
+ parser.add_argument('--share', type=str2bool, default=False, nargs='?', const=True, help='Gradio share value')
410
+ parser.add_argument('--server_name', type=str, default=None, help='Gradio server host')
411
+ parser.add_argument('--server_port', type=int, default=None, help='Gradio server port')
412
+ parser.add_argument('--root_path', type=str, default=None, help='Gradio root path')
413
+ parser.add_argument('--username', type=str, default=None, help='Gradio authentication username')
414
+ parser.add_argument('--password', type=str, default=None, help='Gradio authentication password')
415
+ parser.add_argument('--theme', type=str, default=None, help='Gradio Blocks theme')
416
+ parser.add_argument('--colab', type=str2bool, default=False, nargs='?', const=True, help='Is colab user or not')
417
+ parser.add_argument('--api_open', type=str2bool, default=False, nargs='?', const=True, help='Enable api or not in Gradio')
418
+ parser.add_argument('--inbrowser', type=str2bool, default=True, nargs='?', const=True, help='Whether to automatically start Gradio app or not')
419
+ parser.add_argument('--whisper_model_dir', type=str, default=WHISPER_MODELS_DIR,
420
+ help='Directory path of the whisper model')
421
+ parser.add_argument('--faster_whisper_model_dir', type=str, default=FASTER_WHISPER_MODELS_DIR,
422
+ help='Directory path of the faster-whisper model')
423
+ parser.add_argument('--insanely_fast_whisper_model_dir', type=str,
424
+ default=INSANELY_FAST_WHISPER_MODELS_DIR,
425
+ help='Directory path of the insanely-fast-whisper model')
426
+ parser.add_argument('--diarization_model_dir', type=str, default=DIARIZATION_MODELS_DIR,
427
+ help='Directory path of the diarization model')
428
+ parser.add_argument('--nllb_model_dir', type=str, default=NLLB_MODELS_DIR,
429
+ help='Directory path of the Facebook NLLB model')
430
+ parser.add_argument('--uvr_model_dir', type=str, default=UVR_MODELS_DIR,
431
+ help='Directory path of the UVR model')
432
+ parser.add_argument('--output_dir', type=str, default=OUTPUT_DIR, help='Directory path of the outputs')
433
+ _args = parser.parse_args()
434
+
435
+ if __name__ == "__main__":
436
+ app = App(args=_args)
437
+ app.launch()