ginic commited on
Commit
86bc5ce
·
1 Parent(s): 66ae0a4

Allow user to select a model from dropdown

Browse files
Files changed (1) hide show
  1. app.py +47 -14
app.py CHANGED
@@ -2,22 +2,55 @@ import gradio as gr
2
 
3
  from transformers import pipeline
4
 
5
- #MODEL_NAME="ctaguchi/wav2vec2-large-xlsr-japlmthufielta-ipa1000-ns"
6
- MODEL_NAME="ginic/wav2vec-large-xlsr-en-ipa"
7
-
8
- pipe = pipeline(task="automatic-speech-recognition", model=MODEL_NAME)
9
-
10
- def predict(audio_in):
11
- return pipe(audio_in)["text"]
12
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  def launch_demo():
15
- with gr.Blocks() as demo:
16
- gr.Markdown(f"""
17
- # Automatic International Phonetic Alphabet Transcription
18
- This demo allows you to experiment with producing phonetic transcriptions of uploaded or recorded audio using the model '{MODEL_NAME}'.
19
- """)
20
- gr.Interface(fn=predict, inputs=gr.Audio(type="filepath"), outputs="text", allow_flagging="never")
 
 
 
 
 
 
 
 
21
 
22
  demo.launch()
23
 
 
2
 
3
  from transformers import pipeline
4
 
5
+ DEFAULT_MODEL = "ginic/data_seed_4_wav2vec2-large-xlsr-buckeye-ipa"
6
+
7
+ VALID_MODELS = [
8
+ "ctaguchi/wav2vec2-large-xlsr-japlmthufielta-ipa-plus-2000",
9
+ "ginic/hyperparam_tuning_1_wav2vec2-large-xlsr-buckeye-ipa",
10
+ "ginic/data_seed_1_wav2vec2-large-xlsr-buckeye-ipa",
11
+ "ginic/data_seed_2_wav2vec2-large-xlsr-buckeye-ipa",
12
+ "ginic/data_seed_3_wav2vec2-large-xlsr-buckeye-ipa",
13
+ "ginic/data_seed_4_wav2vec2-large-xlsr-buckeye-ipa",
14
+ "ginic/gender_split_30_female_1_wav2vec2-large-xlsr-buckeye-ipa",
15
+ "ginic/gender_split_30_female_2_wav2vec2-large-xlsr-buckeye-ipa",
16
+ "ginic/gender_split_30_female_3_wav2vec2-large-xlsr-buckeye-ipa",
17
+ "ginic/gender_split_30_female_4_wav2vec2-large-xlsr-buckeye-ipa",
18
+ "ginic/gender_split_30_female_5_wav2vec2-large-xlsr-buckeye-ipa",
19
+ "ginic/gender_split_70_female_1_wav2vec2-large-xlsr-buckeye-ipa",
20
+ "ginic/gender_split_70_female_2_wav2vec2-large-xlsr-buckeye-ipa",
21
+ "ginic/gender_split_70_female_3_wav2vec2-large-xlsr-buckeye-ipa",
22
+ "ginic/gender_split_70_female_4_wav2vec2-large-xlsr-buckeye-ipa",
23
+ "ginic/gender_split_70_female_5_wav2vec2-large-xlsr-buckeye-ipa",
24
+ "ginic/vary_individuals_old_only_1_wav2vec2-large-xlsr-buckeye-ipa",
25
+ "ginic/vary_individuals_old_only_2_wav2vec2-large-xlsr-buckeye-ipa",
26
+ "ginic/vary_individuals_old_only_3_wav2vec2-large-xlsr-buckeye-ipa",
27
+ "ginic/vary_individuals_young_only_1_wav2vec2-large-xlsr-buckeye-ipa",
28
+ "ginic/vary_individuals_young_only_2_wav2vec2-large-xlsr-buckeye-ipa",
29
+ "ginic/vary_individuals_young_only_3_wav2vec2-large-xlsr-buckeye-ipa"
30
+ ]
31
+
32
+ def load_model_and_predict(model_name, audio_in, model_state):
33
+ if model_state["model_name"] != model_name:
34
+ model_state = {"loaded_model":pipeline(task="automatic-speech-recognition", model=model_name),
35
+ "model_name": model_name}
36
+
37
+ return model_state["loaded_model"](audio_in)["text"], model_state
38
 
39
  def launch_demo():
40
+ initial_model = {"loaded_model":pipeline(task="automatic-speech-recognition", model=DEFAULT_MODEL),
41
+ "model_name": DEFAULT_MODEL}
42
+ demo= gr.Interface(
43
+ fn=load_model_and_predict,
44
+ inputs= [
45
+ gr.Dropdown(VALID_MODELS, value=DEFAULT_MODEL, label="IPA transcription ASR model", info="Select the model to use for prediction."),
46
+ gr.Audio(type="filepath"),
47
+ gr.State(value=initial_model) #Store the name of the currently loaded model
48
+ ],
49
+ outputs=["text", gr.State()],
50
+ allow_flagging="never",
51
+ title="Automatic International Phonetic Alphabet Transcription",
52
+ description="This demo allows you to experiment with producing phonetic transcriptions of uploaded or recorded audio using a selected automatic speech recognition (ASR) model."
53
+ )
54
 
55
  demo.launch()
56