Spaces:
Runtime error
Runtime error
Max Brodeur-Urbas
commited on
Commit
·
a2a1f80
1
Parent(s):
143df12
:sparkles:
Browse files- app.py +209 -0
- ex_adult_female_voice.wav +0 -0
- ex_adult_female_voice2.mp3 +0 -0
- ex_adult_male_voice.wav +0 -0
- ex_kid_voice.mp3 +0 -0
- ex_teen_female_voice.mp3 +0 -0
- ex_teen_female_voice2.mp3 +0 -0
- ex_teen_male_voice.mp3 +0 -0
- ex_teen_male_voice2.mp3 +0 -0
- output1.wav +0 -0
- packages.txt +2 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
from json.decoder import JSONDecodeError
|
5 |
+
import time
|
6 |
+
import sys
|
7 |
+
from subprocess import call
|
8 |
+
from pip._internal import main as pip
|
9 |
+
|
10 |
+
# pip(['install', 'sounddevice'])
|
11 |
+
# pip(['install', 'scipy'])
|
12 |
+
def run_cmd(command):
|
13 |
+
try:
|
14 |
+
print(command)
|
15 |
+
call(command, shell=True)
|
16 |
+
except KeyboardInterrupt:
|
17 |
+
print("Process interrupted")
|
18 |
+
sys.exit(1)
|
19 |
+
run_cmd('pip install gradio==3.16.0')
|
20 |
+
# run_cmd('pip install git+https://github.com/ricardodeazambuja/colab_utils.git')
|
21 |
+
# import colab_utils as cu
|
22 |
+
import gradio as gr
|
23 |
+
import sounddevice as sd
|
24 |
+
from scipy.io.wavfile import write
|
25 |
+
|
26 |
+
scoring_uri = os.environ.get('url')
|
27 |
+
key = os.environ.get('key')
|
28 |
+
|
29 |
+
from IPython.display import Javascript, display
|
30 |
+
from js2py import eval_js6
|
31 |
+
from base64 import b64decode
|
32 |
+
|
33 |
+
from io import BytesIO
|
34 |
+
run_cmd('pip -q install pydub')
|
35 |
+
from pydub import AudioSegment
|
36 |
+
prediction_count = 0
|
37 |
+
failed_prediction_count = 0
|
38 |
+
|
39 |
+
def predict(audio_file_path):
|
40 |
+
input_data = open(audio_file_path, 'rb').read()
|
41 |
+
print(len(input_data))
|
42 |
+
|
43 |
+
if(len(input_data) == 88108 or len(input_data) == 94252):
|
44 |
+
failed_prediction_count = failed_prediction_count + 1
|
45 |
+
output = "It appears your recording device isn't supported by Hugging Face/Gradio yet (iOS and macOS are causing issues). Windows and android record properly, sorry for the temporary inconvenience!"
|
46 |
+
return output, {}, ""
|
47 |
+
else:
|
48 |
+
prediction_count = prediction_count + 1
|
49 |
+
|
50 |
+
print("Prediction count: " + str(prediction_count))
|
51 |
+
print("Failed prediction count: " + str(failed_prediction_count))
|
52 |
+
# Set the content type
|
53 |
+
headers = {'Content-Type': 'application/json'}
|
54 |
+
# If authentication is enabled, set the authorization header
|
55 |
+
headers['Authorization'] = f'Bearer {key}'
|
56 |
+
# Make the request and display the response
|
57 |
+
resp = requests.post(scoring_uri, input_data, headers=headers)
|
58 |
+
try:
|
59 |
+
obj = json.loads(resp.text)
|
60 |
+
predictions = obj['agegroup_predictions']
|
61 |
+
labels = {'child_unknown':'Child (genderless)', 'teens_female':'Teen Female', 'teens_male':'Teen Male', 'twenties+_female':'Adult Female', 'twenties+_male':'Adult Male'}
|
62 |
+
confs = {}
|
63 |
+
for label in labels.keys():
|
64 |
+
confArray = predictions[label]
|
65 |
+
avg = sum(confArray) / len(confArray)
|
66 |
+
confs[labels[label]] = avg
|
67 |
+
|
68 |
+
output = "Audio processed successfully."
|
69 |
+
return output, confs, obj['whisper'].get('text')
|
70 |
+
except JSONDecodeError as e:
|
71 |
+
if "viable" in resp.text or "detected" in resp.text:
|
72 |
+
output = "No viable audio detected within your clip! Make sure the clip you recorded is audible!"
|
73 |
+
else:
|
74 |
+
output = "Our servers are currently overloaded, try again in a few minutes."
|
75 |
+
return output, {}, ""
|
76 |
+
|
77 |
+
btn_label_dict = {'Child': 'child_unknown', 'Teen Female': 'teens_female', 'Teen Male':'teens_male', 'Adult Female':'twenties+_female', 'Adult Male':'twenties+_male'}
|
78 |
+
|
79 |
+
def send_flag_correction(btn):
|
80 |
+
correct_label = btn
|
81 |
+
correct_label = btn_label_dict[btn]
|
82 |
+
# Set the content type
|
83 |
+
headers = {'Content-Type': 'application/json'}
|
84 |
+
# If authentication is enabled, set the authorization header
|
85 |
+
headers['Authorization'] = f'Bearer {key}'
|
86 |
+
|
87 |
+
# format a json object containing the correct_label variable
|
88 |
+
input_data = json.dumps({"correct_label": correct_label})
|
89 |
+
|
90 |
+
resp = requests.post(scoring_uri + "?feedback", input_data, headers=headers)
|
91 |
+
print(resp.text)
|
92 |
+
|
93 |
+
example_list = [
|
94 |
+
['ex_kid_voice.mp3'], ["ex_adult_female_voice2.mp3"], ["ex_adult_male_voice.wav"], ["ex_teen_female_voice.mp3"], ["ex_teen_female_voice2.mp3"], ["ex_teen_male_voice.mp3"], ["ex_teen_male_voice2.mp3"]
|
95 |
+
]
|
96 |
+
|
97 |
+
with gr.Blocks() as demo:
|
98 |
+
with gr.Row():
|
99 |
+
gr.Markdown("# Litmus")
|
100 |
+
with gr.Row():
|
101 |
+
gr.Markdown("A tool for detecting your age group and gender with only a few seconds of audio. Record a short clip of your voice (3 or more seconds) or try out some of our examples. If the response is incorrect be sure to flag it so we can improve! Leave a comment or PM me on hugging face if you have any questions!")
|
102 |
+
with gr.Row():
|
103 |
+
with gr.Column(scale=1):
|
104 |
+
audio = gr.Audio(type="filepath", source="microphone", label="Voice Recording")
|
105 |
+
with gr.Row():
|
106 |
+
submit_btn = gr.Button("Submit")
|
107 |
+
with gr.Column(scale=1):
|
108 |
+
resp = gr.Textbox(label="Response")
|
109 |
+
labels = gr.Label(num_top_classes=5, label="Prediction confidences")
|
110 |
+
words = gr.Textbox(label="Detected words")
|
111 |
+
flag_btn = gr.Button("Flag as incorrect", visible=False)
|
112 |
+
with gr.Row(visible=False) as flag_options:
|
113 |
+
with gr.Row():
|
114 |
+
gr.Markdown(
|
115 |
+
"""
|
116 |
+
Thanks for flagging our error!
|
117 |
+
Please select the category which best represents you.
|
118 |
+
(NOTE: When a submission is flagged it is saved for training purposes. We appreciate you helping us improve!)
|
119 |
+
""")
|
120 |
+
with gr.Row():
|
121 |
+
child_flag_btn = gr.Button("Child")
|
122 |
+
teen_f_flag_btn = gr.Button("Teen Female")
|
123 |
+
teen_m_flag_btn = gr.Button("Teen Male")
|
124 |
+
adult_f_flag_btn = gr.Button("Adult Female")
|
125 |
+
adult_m_flag_btn = gr.Button("Adult Male")
|
126 |
+
|
127 |
+
def show_main_flag_btn():
|
128 |
+
return gr.update(visible=True)
|
129 |
+
|
130 |
+
def hide_main_flag_btn():
|
131 |
+
return gr.update(visible=False)
|
132 |
+
|
133 |
+
def show_flagging_options():
|
134 |
+
print("showing flagging options")
|
135 |
+
return {
|
136 |
+
flag_options: gr.update(visible=True),
|
137 |
+
flag_btn: gr.update(visible=False)
|
138 |
+
}
|
139 |
+
|
140 |
+
def hide_flagging_options():
|
141 |
+
print("hiding flagging options")
|
142 |
+
return gr.update(visible=False)
|
143 |
+
|
144 |
+
def send_flagged_feedback(label):
|
145 |
+
send_flag_correction(label)
|
146 |
+
main_btn = hide_main_flag_btn()
|
147 |
+
options = hide_flagging_options()
|
148 |
+
return main_btn, options
|
149 |
+
|
150 |
+
def trigger_predict(audio):
|
151 |
+
print("triggering prediction")
|
152 |
+
# options = hide_flagging_options()
|
153 |
+
output, confs, words = predict(audio)
|
154 |
+
btn = show_main_flag_btn()
|
155 |
+
return output, confs, words, btn
|
156 |
+
|
157 |
+
ex = gr.Examples(
|
158 |
+
examples=example_list,
|
159 |
+
fn=trigger_predict,
|
160 |
+
inputs=audio,
|
161 |
+
outputs=[resp, labels, words],
|
162 |
+
)
|
163 |
+
submit_btn.click(
|
164 |
+
fn = trigger_predict,
|
165 |
+
inputs=audio,
|
166 |
+
outputs=[resp, labels, words, flag_btn]
|
167 |
+
)
|
168 |
+
child_flag_btn.click(
|
169 |
+
fn=send_flagged_feedback,
|
170 |
+
inputs=child_flag_btn,
|
171 |
+
outputs=[flag_btn, flag_options]
|
172 |
+
)
|
173 |
+
teen_f_flag_btn.click(
|
174 |
+
fn=send_flagged_feedback,
|
175 |
+
inputs=teen_f_flag_btn,
|
176 |
+
outputs=[flag_btn, flag_options]
|
177 |
+
)
|
178 |
+
teen_m_flag_btn.click(
|
179 |
+
fn=send_flagged_feedback,
|
180 |
+
inputs=teen_m_flag_btn,
|
181 |
+
outputs=[flag_btn, flag_options]
|
182 |
+
)
|
183 |
+
adult_f_flag_btn.click(
|
184 |
+
fn=send_flagged_feedback,
|
185 |
+
inputs=adult_f_flag_btn,
|
186 |
+
outputs=[flag_btn, flag_options]
|
187 |
+
)
|
188 |
+
adult_m_flag_btn.click(
|
189 |
+
fn=send_flagged_feedback,
|
190 |
+
inputs=adult_m_flag_btn,
|
191 |
+
outputs=[flag_btn, flag_options]
|
192 |
+
)
|
193 |
+
flag_btn.click(
|
194 |
+
show_flagging_options,
|
195 |
+
outputs=[flag_options, flag_btn]
|
196 |
+
)
|
197 |
+
# returning a dict with one value crashes the entire app
|
198 |
+
# passing in an fn with parentheses calls that function
|
199 |
+
# demo2 = gr.Interface(fn=predict,
|
200 |
+
# inputs=gr.Audio(type="filepath", source="microphone", label="Voice Recording"),
|
201 |
+
# outputs=[gr.Textbox(label="Response"),
|
202 |
+
# gr.Label(num_top_classes=5, label="Prediction confidences"),
|
203 |
+
# gr.Textbox(label="Detected words")],
|
204 |
+
# examples=example_list,
|
205 |
+
# cache_examples=False,
|
206 |
+
# allow_flagging="manual",
|
207 |
+
# )
|
208 |
+
|
209 |
+
demo.launch()
|
ex_adult_female_voice.wav
ADDED
Binary file (340 kB). View file
|
|
ex_adult_female_voice2.mp3
ADDED
Binary file (35.9 kB). View file
|
|
ex_adult_male_voice.wav
ADDED
Binary file (295 kB). View file
|
|
ex_kid_voice.mp3
ADDED
Binary file (135 kB). View file
|
|
ex_teen_female_voice.mp3
ADDED
Binary file (193 kB). View file
|
|
ex_teen_female_voice2.mp3
ADDED
Binary file (129 kB). View file
|
|
ex_teen_male_voice.mp3
ADDED
Binary file (109 kB). View file
|
|
ex_teen_male_voice2.mp3
ADDED
Binary file (103 kB). View file
|
|
output1.wav
ADDED
Binary file (768 kB). View file
|
|
packages.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
libportaudio2
|
2 |
+
python-scipy
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
sounddevice
|
2 |
+
scipy
|
3 |
+
gradio>=3.16.0
|
4 |
+
IPython
|
5 |
+
js2py
|