jcvsalinas commited on
Commit
34e9313
·
verified ·
1 Parent(s): 7170e51

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -5,6 +5,7 @@ import matplotlib.pyplot as plt
5
  HOME_DIR = ""
6
  local_config_path = 'config.json'
7
  local_preprocessor_config_path = 'preprocessor_config.json'
 
8
  local_training_args_path = 'training_args.bin'
9
 
10
  import torch
@@ -138,8 +139,7 @@ with demo:
138
  show_label=True
139
  )
140
  text_output = gr.Textbox(label="Recognized Emotion")
141
- predict_button = gr.Button("Predict Emotion")
142
 
143
  # Automatically call the recognize_emotion function when audio is recorded
144
- predict_button.click(recognize_emotion, inputs=audio_input, outputs=text_output)
145
  demo.launch(share=True)
 
5
  HOME_DIR = ""
6
  local_config_path = 'config.json'
7
  local_preprocessor_config_path = 'preprocessor_config.json'
8
+ local_weights_path = 'pytorch_model.bin'
9
  local_training_args_path = 'training_args.bin'
10
 
11
  import torch
 
139
  show_label=True
140
  )
141
  text_output = gr.Textbox(label="Recognized Emotion")
 
142
 
143
  # Automatically call the recognize_emotion function when audio is recorded
144
+ audio_input.stop_recording(fn=recognize_emotion, inputs=audio_input, outputs=text_output)
145
  demo.launch(share=True)