rohitp1 commited on
Commit
5329b4a
·
1 Parent(s): b323f3d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -44,14 +44,17 @@ def transcribe(mic_input, upl_input, model_type):
44
  else:
45
  audio = upl_input
46
  time.sleep(3)
 
47
  if model_type == 'NoisyFinetuned':
48
  text = p2(audio)["text"]
49
  elif model_type == 'CleanFinetuned':
50
  text = p3(audio)["text"]
51
  else:
52
  text = p1(audio)["text"]
 
53
  # state = text + " "
54
- return text
 
55
 
56
 
57
 
@@ -79,7 +82,7 @@ def transcribe(mic_input, upl_input, model_type):
79
  # demo.launch()
80
 
81
  def clear_inputs_and_outputs():
82
- return [None, None, "CleanFinetuned", None]
83
 
84
  # Main function
85
  if __name__ == "__main__":
@@ -111,7 +114,10 @@ if __name__ == "__main__":
111
 
112
  # Outputs
113
  with gr.Column():
114
- lbl_output = gr.Label(label="Top Predictions")
 
 
 
115
  # with gr.Group():
116
  # gr.Markdown("<center>Prediction per time slot</center>")
117
  # plt_output = gr.Plot(
@@ -126,7 +132,7 @@ if __name__ == "__main__":
126
  os.path.join(os.path.dirname(__file__), "audio/sample2.wav"),
127
  ],
128
  upl_input,
129
- lbl_output,
130
  transcribe
131
  )
132
  # Credits
@@ -142,12 +148,12 @@ if __name__ == "__main__":
142
  clr_btn.click(
143
  fn=clear_inputs_and_outputs,
144
  inputs=[],
145
- outputs=[mic_input, upl_input, model_type, lbl_output],
146
  )
147
  prd_btn.click(
148
  fn=transcribe,
149
  inputs=[mic_input, upl_input, model_type],
150
- outputs=[lbl_output],
151
  )
152
 
153
  demo.launch(debug=True)
 
44
  else:
45
  audio = upl_input
46
  time.sleep(3)
47
+ st_time = time.time()
48
  if model_type == 'NoisyFinetuned':
49
  text = p2(audio)["text"]
50
  elif model_type == 'CleanFinetuned':
51
  text = p3(audio)["text"]
52
  else:
53
  text = p1(audio)["text"]
54
+ end_time = time.time()
55
  # state = text + " "
56
+ time_taken = (end_time - st_time) / 60
57
+ return text, time_taken
58
 
59
 
60
 
 
82
  # demo.launch()
83
 
84
  def clear_inputs_and_outputs():
85
+ return [None, None, "CleanFinetuned", None, None]
86
 
87
  # Main function
88
  if __name__ == "__main__":
 
114
 
115
  # Outputs
116
  with gr.Column():
117
+ lbl_output = gr.Label(label="Transcription")
118
+
119
+ with gr.Column():
120
+ time_output = gr.Label(label="Time Taken (in sec)")
121
  # with gr.Group():
122
  # gr.Markdown("<center>Prediction per time slot</center>")
123
  # plt_output = gr.Plot(
 
132
  os.path.join(os.path.dirname(__file__), "audio/sample2.wav"),
133
  ],
134
  upl_input,
135
+ [lbl_output, time_output],
136
  transcribe
137
  )
138
  # Credits
 
148
  clr_btn.click(
149
  fn=clear_inputs_and_outputs,
150
  inputs=[],
151
+ outputs=[mic_input, upl_input, model_type, lbl_output, time_output],
152
  )
153
  prd_btn.click(
154
  fn=transcribe,
155
  inputs=[mic_input, upl_input, model_type],
156
+ outputs=[lbl_output, time_output],
157
  )
158
 
159
  demo.launch(debug=True)