Spaces:
Runtime error
Runtime error
Try to fix input error
Browse files
app.py
CHANGED
@@ -102,16 +102,21 @@ def process_func(x: np.ndarray, sampling_rate: int) -> dict:
|
|
102 |
|
103 |
|
104 |
@spaces.GPU
|
105 |
-
def recognize(
|
106 |
-
if
|
|
|
|
|
|
|
|
|
107 |
raise gr.Error(
|
108 |
"No audio file submitted! "
|
109 |
"Please upload or record an audio file "
|
110 |
"before submitting your request."
|
111 |
)
|
112 |
-
|
113 |
target_rate = 16000
|
114 |
signal = audresample.resample(signal, sampling_rate, target_rate)
|
|
|
115 |
age_gender = process_func(signal, target_rate)
|
116 |
age = f"{round(age_gender['age'])} years"
|
117 |
gender = {k: v for k, v in age_gender.items() if k != "age"}
|
@@ -198,9 +203,11 @@ with gr.Blocks() as demo:
|
|
198 |
inputs = [input_microphone, input_file]
|
199 |
outputs = [output_age, output_gender]
|
200 |
|
201 |
-
submit_btn.click(recognize, [input_file], outputs)
|
202 |
input_selection.change(toggle_input, input_selection, inputs)
|
203 |
input_microphone.change(lambda x: x, input_microphone, outputs)
|
204 |
input_file.change(lambda x: x, input_file, outputs)
|
205 |
|
|
|
|
|
|
|
206 |
demo.launch(debug=True)
|
|
|
102 |
|
103 |
|
104 |
@spaces.GPU
|
105 |
+
def recognize(input_microphone, input_file):
|
106 |
+
if input_microphone:
|
107 |
+
sampling_rate, signal = input_microphone
|
108 |
+
elif input_file:
|
109 |
+
signal, sampling_rate = audiofile.read(file, duration=duration)
|
110 |
+
else::
|
111 |
raise gr.Error(
|
112 |
"No audio file submitted! "
|
113 |
"Please upload or record an audio file "
|
114 |
"before submitting your request."
|
115 |
)
|
116 |
+
# Resample to sampling rate supported byu the models
|
117 |
target_rate = 16000
|
118 |
signal = audresample.resample(signal, sampling_rate, target_rate)
|
119 |
+
|
120 |
age_gender = process_func(signal, target_rate)
|
121 |
age = f"{round(age_gender['age'])} years"
|
122 |
gender = {k: v for k, v in age_gender.items() if k != "age"}
|
|
|
203 |
inputs = [input_microphone, input_file]
|
204 |
outputs = [output_age, output_gender]
|
205 |
|
|
|
206 |
input_selection.change(toggle_input, input_selection, inputs)
|
207 |
input_microphone.change(lambda x: x, input_microphone, outputs)
|
208 |
input_file.change(lambda x: x, input_file, outputs)
|
209 |
|
210 |
+
submit_btn.click(recognize, inputs, outputs)
|
211 |
+
|
212 |
+
|
213 |
demo.launch(debug=True)
|