jhj0517 commited on
Commit
4822ec3
·
1 Parent(s): 7cc2d4f

add debug function

Browse files
app.py CHANGED
@@ -158,10 +158,9 @@ class App:
158
  hf_token=tb_hf_token,
159
  diarization_device=dd_diarization_device)
160
 
161
- # btn_run.click(fn=self.whisper_inf.transcribe_file,
162
- # inputs=params + whisper_params.as_list(),
163
- # outputs=[tb_indicator, files_subtitles])
164
- btn_run.click(fn=self.whisper_inf.test, inputs=None, outputs=None)
165
  dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
166
 
167
  with gr.TabItem("Youtube"): # tab2
 
158
  hf_token=tb_hf_token,
159
  diarization_device=dd_diarization_device)
160
 
161
+ btn_run.click(fn=self.whisper_inf.transcribe_file,
162
+ inputs=params + whisper_params.as_list(),
163
+ outputs=[tb_indicator, files_subtitles])
 
164
  dd_model.change(fn=self.on_change_models, inputs=[dd_model], outputs=[cb_translate])
165
 
166
  with gr.TabItem("Youtube"): # tab2
modules/whisper/faster_whisper_inference.py CHANGED
@@ -33,7 +33,7 @@ class FasterWhisperInference(WhisperBase):
33
  self.available_compute_types = self.get_available_compute_type()
34
  self.download_model(model_size="large-v2", model_dir=self.model_dir)
35
 
36
- #@spaces.GPU(duration=120)
37
  def transcribe(self,
38
  audio: Union[str, BinaryIO, np.ndarray],
39
  progress: gr.Progress,
@@ -65,6 +65,7 @@ class FasterWhisperInference(WhisperBase):
65
  if params.model_size != self.current_model_size or self.model is None or self.current_compute_type != params.compute_type:
66
  self.update_model(params.model_size, params.compute_type, progress)
67
 
 
68
  segments, info = self.model.transcribe(
69
  audio=audio,
70
  language=params.lang,
@@ -89,9 +90,10 @@ class FasterWhisperInference(WhisperBase):
89
  })
90
 
91
  elapsed_time = time.time() - start_time
 
92
  return segments_result, elapsed_time
93
 
94
- #@spaces.GPU(duration=120)
95
  def update_model(self,
96
  model_size: str,
97
  compute_type: str,
@@ -111,6 +113,7 @@ class FasterWhisperInference(WhisperBase):
111
  Indicator to show progress directly in gradio.
112
  """
113
  progress(0, desc="Initializing Model..")
 
114
  self.current_model_size = self.model_paths[model_size]
115
  self.current_compute_type = compute_type
116
  self.model = faster_whisper.WhisperModel(
@@ -119,6 +122,7 @@ class FasterWhisperInference(WhisperBase):
119
  download_root=self.model_dir,
120
  compute_type=self.current_compute_type
121
  )
 
122
 
123
  def get_model_paths(self):
124
  """
@@ -150,21 +154,18 @@ class FasterWhisperInference(WhisperBase):
150
  return ['float32', 'int8_float16', 'float16', 'int8', 'int8_float32']
151
  return ['int16', 'float32', 'int8', 'int8_float32']
152
 
153
- #@spaces.GPU(duration=120)
154
  @staticmethod
155
  def get_device():
 
156
  if torch.cuda.is_available():
 
157
  return "cuda"
158
  elif torch.backends.mps.is_available():
159
  return "auto"
160
  else:
161
  return "cpu"
162
 
163
- @staticmethod
164
- @spaces.GPU(duration=120)
165
- def test():
166
- print("\nWHAT Happend?\n")
167
-
168
  @staticmethod
169
  def download_model(model_size: str, model_dir: str):
170
  print(f"\nDownloading \"{model_size}\" to \"{model_dir}\"..\n")
 
33
  self.available_compute_types = self.get_available_compute_type()
34
  self.download_model(model_size="large-v2", model_dir=self.model_dir)
35
 
36
+ @spaces.GPU(duration=120)
37
  def transcribe(self,
38
  audio: Union[str, BinaryIO, np.ndarray],
39
  progress: gr.Progress,
 
65
  if params.model_size != self.current_model_size or self.model is None or self.current_compute_type != params.compute_type:
66
  self.update_model(params.model_size, params.compute_type, progress)
67
 
68
+ print("transcribe:")
69
  segments, info = self.model.transcribe(
70
  audio=audio,
71
  language=params.lang,
 
90
  })
91
 
92
  elapsed_time = time.time() - start_time
93
+ print("transcribe: finished")
94
  return segments_result, elapsed_time
95
 
96
+ @spaces.GPU(duration=120)
97
  def update_model(self,
98
  model_size: str,
99
  compute_type: str,
 
113
  Indicator to show progress directly in gradio.
114
  """
115
  progress(0, desc="Initializing Model..")
116
+ print("update_model:")
117
  self.current_model_size = self.model_paths[model_size]
118
  self.current_compute_type = compute_type
119
  self.model = faster_whisper.WhisperModel(
 
122
  download_root=self.model_dir,
123
  compute_type=self.current_compute_type
124
  )
125
+ print("update_model: finished")
126
 
127
  def get_model_paths(self):
128
  """
 
154
  return ['float32', 'int8_float16', 'float16', 'int8', 'int8_float32']
155
  return ['int16', 'float32', 'int8', 'int8_float32']
156
 
157
+ @spaces.GPU(duration=120)
158
  @staticmethod
159
  def get_device():
160
+ print("GET DEVICE:")
161
  if torch.cuda.is_available():
162
+ print("GET DEVICE: device is cuda")
163
  return "cuda"
164
  elif torch.backends.mps.is_available():
165
  return "auto"
166
  else:
167
  return "cpu"
168
 
 
 
 
 
 
169
  @staticmethod
170
  def download_model(model_size: str, model_dir: str):
171
  print(f"\nDownloading \"{model_size}\" to \"{model_dir}\"..\n")