camparchimedes commited on
Commit
35a4118
·
verified ·
1 Parent(s): 1f31e20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -48,7 +48,7 @@ import gradio as gr
48
  #if not torch.cuda.is_available():
49
  #DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
50
 
51
- CACHE_EXAMPLES = torch.device('cuda') and os.getenv("CACHE_EXAMPLES", "0") == "1"
52
  #CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
53
  #USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
54
  #ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
@@ -58,16 +58,18 @@ device = torch.device('cuda')
58
 
59
  def transcribe(file_upload, progress=gr.Progress(track_tqdm=True)): # microphone
60
 
61
- file = file_upload # microphone if microphone is not None else
62
  start_time = time.time()
63
 
64
  #--------------____________________________________________--------------"
65
-
66
  with torch.no_grad():
67
- pipe = pipeline("automatic-speech-recognition", model="NbAiLab/nb-whisper-large", chunk_length_s=30, device=device)
 
 
 
 
68
 
69
  text = pipe(file)["text"]
70
-
71
  #--------------____________________________________________--------------"
72
 
73
  end_time = time.time()
 
48
  #if not torch.cuda.is_available():
49
  #DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
50
 
51
+ #CACHE_EXAMPLES = torch.device('cuda') and os.getenv("CACHE_EXAMPLES", "0") == "1"
52
  #CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
53
  #USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
54
  #ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
 
58
 
59
  def transcribe(file_upload, progress=gr.Progress(track_tqdm=True)): # microphone
60
 
61
+ file = file_upload
62
  start_time = time.time()
63
 
64
  #--------------____________________________________________--------------"
 
65
  with torch.no_grad():
66
+ pipe = pipeline("automatic-speech-recognition",
67
+ model="NbAiLab/nb-whisper-large",
68
+ chunk_length_s=30,
69
+ forced_decoder_ids=None, # -- explicitly set to None
70
+ device=device)
71
 
72
  text = pipe(file)["text"]
 
73
  #--------------____________________________________________--------------"
74
 
75
  end_time = time.time()