github-actions[bot] commited on
Commit
de6e790
·
1 Parent(s): 0863517

Sync with https://github.com/mozilla-ai/speech-to-text-finetune

Browse files
Files changed (1) hide show
  1. app.py +4 -28
app.py CHANGED
@@ -1,11 +1,7 @@
1
  import os
2
- from pathlib import Path
3
- from typing import Tuple
4
  import gradio as gr
5
  import spaces
6
  from transformers import pipeline, Pipeline
7
- from huggingface_hub import repo_exists
8
-
9
 
10
  is_hf_space = os.getenv("IS_HF_SPACE")
11
  model_ids = [
@@ -20,9 +16,7 @@ model_ids = [
20
  ]
21
 
22
 
23
- def _load_local_model(model_dir: str) -> Tuple[Pipeline | None, str]:
24
- if not Path(model_dir).is_dir():
25
- return None, f"⚠️ Couldn't find local model directory: {model_dir}"
26
  from transformers import (
27
  WhisperProcessor,
28
  WhisperTokenizer,
@@ -41,19 +35,14 @@ def _load_local_model(model_dir: str) -> Tuple[Pipeline | None, str]:
41
  processor=processor,
42
  tokenizer=tokenizer,
43
  feature_extractor=feature_extractor,
44
- ), f"✅ Local model has been loaded from {model_dir}."
45
 
46
 
47
- def _load_hf_model(model_repo_id: str) -> Tuple[Pipeline | None, str]:
48
- if not repo_exists(model_repo_id):
49
- return (
50
- None,
51
- f"⚠️ Couldn't find {model_repo_id} on Hugging Face. If its a private repo, make sure you are logged in locally.",
52
- )
53
  return pipeline(
54
  "automatic-speech-recognition",
55
  model=model_repo_id,
56
- ), f"✅ HF Model {model_repo_id} has been loaded."
57
 
58
 
59
  @spaces.GPU
@@ -105,9 +94,6 @@ def setup_gradio_demo():
105
  placeholder="artifacts/my-whisper-tiny",
106
  )
107
 
108
- # load_model_button = gr.Button("Load model")
109
- model_loaded = gr.Markdown()
110
-
111
  ### Transcription ###
112
  audio_input = gr.Audio(
113
  sources=["microphone", "upload"],
@@ -119,16 +105,6 @@ def setup_gradio_demo():
119
  transcribe_button = gr.Button("Transcribe")
120
  transcribe_output = gr.Text(label="Output")
121
 
122
- ### Event listeners ###
123
- """
124
- model = gr.State()
125
- load_model_button.click(
126
- fn=load_model,
127
- inputs=[dropdown_model, user_model, local_model],
128
- outputs=[model, model_loaded],
129
- )
130
- """
131
-
132
  transcribe_button.click(
133
  fn=transcribe,
134
  inputs=[dropdown_model, user_model, local_model, audio_input],
 
1
  import os
 
 
2
  import gradio as gr
3
  import spaces
4
  from transformers import pipeline, Pipeline
 
 
5
 
6
  is_hf_space = os.getenv("IS_HF_SPACE")
7
  model_ids = [
 
16
  ]
17
 
18
 
19
+ def _load_local_model(model_dir: str) -> Pipeline:
 
 
20
  from transformers import (
21
  WhisperProcessor,
22
  WhisperTokenizer,
 
35
  processor=processor,
36
  tokenizer=tokenizer,
37
  feature_extractor=feature_extractor,
38
+ )
39
 
40
 
41
+ def _load_hf_model(model_repo_id: str) -> Pipeline:
 
 
 
 
 
42
  return pipeline(
43
  "automatic-speech-recognition",
44
  model=model_repo_id,
45
+ )
46
 
47
 
48
  @spaces.GPU
 
94
  placeholder="artifacts/my-whisper-tiny",
95
  )
96
 
 
 
 
97
  ### Transcription ###
98
  audio_input = gr.Audio(
99
  sources=["microphone", "upload"],
 
105
  transcribe_button = gr.Button("Transcribe")
106
  transcribe_output = gr.Text(label="Output")
107
 
 
 
 
 
 
 
 
 
 
 
108
  transcribe_button.click(
109
  fn=transcribe,
110
  inputs=[dropdown_model, user_model, local_model, audio_input],