Ngoufack commited on
Commit
5fdc54e
·
1 Parent(s): 2515efc

hotfix 1.6

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -23,7 +23,7 @@ model = WhisperModel(MODEL_NAME, device=device, compute_type="float16" if device
23
  def transcribe(inputs, task):
24
  if inputs is None:
25
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
26
- segments, info = model.transcribe(input, beam_size=5,batch_size=BATCH_SIZE, vad_filter=True, word_timestamps=False)
27
  transcription = " ".join([segment.text for segment in segments])
28
 
29
  #text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
@@ -84,7 +84,7 @@ def yt_transcribe(yt_url, task, max_filesize=75.0):
84
  #inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
85
 
86
  #text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
87
- segments, info = model.transcribe(filepath, beam_size=5,batch_size=BATCH_SIZE, vad_filter=True, word_timestamps=False)
88
  transcription = " ".join([segment.text for segment in segments])
89
 
90
  return html_embed_str, transcription
 
23
  def transcribe(inputs, task):
24
  if inputs is None:
25
  raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
26
+ segments, info = model.transcribe(input, beam_size=5, vad_filter=True, word_timestamps=False)
27
  transcription = " ".join([segment.text for segment in segments])
28
 
29
  #text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
 
84
  #inputs = {"array": inputs, "sampling_rate": pipe.feature_extractor.sampling_rate}
85
 
86
  #text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
87
+ segments, info = model.transcribe(filepath, beam_size=5, vad_filter=True, word_timestamps=False)
88
  transcription = " ".join([segment.text for segment in segments])
89
 
90
  return html_embed_str, transcription