File size: 7,819 Bytes
a2a1f80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6912fb9
a2a1f80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
import os
import requests
import json
from json.decoder import JSONDecodeError
import time
import sys
from subprocess import call
from pip._internal import main as pip

# pip(['install', 'sounddevice'])
# pip(['install', 'scipy'])
def run_cmd(command):
    try:
        print(command)
        call(command, shell=True)
    except KeyboardInterrupt:
        print("Process interrupted")
        sys.exit(1)
run_cmd('pip install gradio==3.16.0')
# run_cmd('pip install git+https://github.com/ricardodeazambuja/colab_utils.git')
# import colab_utils as cu
import gradio as gr
import sounddevice as sd
from scipy.io.wavfile import write

scoring_uri = os.environ.get('url')
key = os.environ.get('key')

from IPython.display import Javascript, display
from js2py import eval_js6
from base64 import b64decode

from io import BytesIO
run_cmd('pip -q install pydub')
from pydub import AudioSegment

def predict(audio_file_path):
    input_data = open(audio_file_path, 'rb').read()
    print(len(input_data))
    
    if(len(input_data) == 88108 or len(input_data) == 94252):
        output = "It appears your recording device isn't supported by Hugging Face/Gradio yet (iOS and macOS are causing issues). Windows and android record properly, sorry for the temporary inconvenience!"
        return output, {}, ""

    # Set the content type
    headers = {'Content-Type': 'application/json'}
    # If authentication is enabled, set the authorization header
    headers['Authorization'] = f'Bearer {key}'
    # Make the request and display the response
    resp = requests.post(scoring_uri, input_data, headers=headers)
    try:
        obj = json.loads(resp.text)
        predictions = obj['agegroup_predictions']
        labels = {'child_unknown':'Child (genderless)', 'teens_female':'Teen Female', 'teens_male':'Teen Male', 'twenties+_female':'Adult Female', 'twenties+_male':'Adult Male'}
        confs = {}
        for label in labels.keys():
            confArray = predictions[label]
            avg = sum(confArray) / len(confArray)
            confs[labels[label]] = avg

        output = "Audio processed successfully."
        return output, confs, obj['whisper'].get('text')
    except JSONDecodeError as e:
        if "viable" in resp.text or "detected" in resp.text:
            output = "No viable audio detected within your clip! Make sure the clip you recorded is audible!"
        else:
            output = "Our servers are currently overloaded, try again in a few minutes."       
    return output, {}, ""

btn_label_dict = {'Child': 'child_unknown', 'Teen Female': 'teens_female', 'Teen Male':'teens_male', 'Adult Female':'twenties+_female', 'Adult Male':'twenties+_male'}

def send_flag_correction(btn):
    correct_label = btn
    correct_label = btn_label_dict[btn]
    # Set the content type
    headers = {'Content-Type': 'application/json'}
    # If authentication is enabled, set the authorization header
    headers['Authorization'] = f'Bearer {key}'
    
    # format a json object containing the correct_label variable
    input_data = json.dumps({"correct_label": correct_label})
    
    resp = requests.post(scoring_uri + "?feedback", input_data, headers=headers)
    print(resp.text)
    
example_list = [
    ['ex_kid_voice.mp3'], ["ex_adult_female_voice2.mp3"], ["ex_adult_male_voice.wav"], ["ex_teen_female_voice.mp3"], ["ex_teen_female_voice2.mp3"], ["ex_teen_male_voice.mp3"], ["ex_teen_male_voice2.mp3"]
]

with gr.Blocks() as demo:
    with gr.Row():
         gr.Markdown("# Litmus")
    with gr.Row():
        gr.Markdown("A tool for detecting your age group and gender with only a few seconds of audio. Record a short clip of your voice (3 or more seconds) or try out some of our examples. If the response is incorrect be sure to flag it so we can improve! Leave a comment or PM me on hugging face if you have any questions!")
    with gr.Row():
        with gr.Column(scale=1):
            audio = gr.Audio(type="filepath", source="microphone", label="Voice Recording")
            with gr.Row():
                submit_btn = gr.Button("Submit")
        with gr.Column(scale=1):
            resp = gr.Textbox(label="Response")
            labels = gr.Label(num_top_classes=5, label="Prediction confidences")
            words = gr.Textbox(label="Detected words")
            flag_btn = gr.Button("Flag as incorrect", visible=False)
            with gr.Row(visible=False) as flag_options:
                with gr.Row():
                    gr.Markdown(
                        """
                        Thanks for flagging our error! 
                        Please select the category which best represents you.
                        (NOTE: When a submission is flagged it is saved for training purposes. We appreciate you helping us improve!)
                        """)
                with gr.Row():
                    child_flag_btn = gr.Button("Child")
                    teen_f_flag_btn = gr.Button("Teen Female")
                    teen_m_flag_btn = gr.Button("Teen Male")
                    adult_f_flag_btn = gr.Button("Adult Female")
                    adult_m_flag_btn = gr.Button("Adult Male")
    
    def show_main_flag_btn():
        return gr.update(visible=True)
    
    def hide_main_flag_btn():
        return gr.update(visible=False)
    
    def show_flagging_options():
        print("showing flagging options")
        return {
            flag_options: gr.update(visible=True),
            flag_btn: gr.update(visible=False)
        }
        
    def hide_flagging_options():
        print("hiding flagging options")
        return gr.update(visible=False)
    
    def send_flagged_feedback(label):
        send_flag_correction(label)
        main_btn = hide_main_flag_btn()
        options = hide_flagging_options()
        return main_btn, options
        
    def trigger_predict(audio):
        print("triggering prediction")
        # options = hide_flagging_options()
        output, confs, words = predict(audio)
        btn = show_main_flag_btn()
        return output, confs, words, btn
    
    ex = gr.Examples(
            examples=example_list, 
            fn=trigger_predict,
            inputs=audio, 
            outputs=[resp, labels, words], 
        )
    submit_btn.click(
        fn = trigger_predict,
        inputs=audio,
        outputs=[resp, labels, words, flag_btn]
    )
    child_flag_btn.click(
        fn=send_flagged_feedback,
        inputs=child_flag_btn,
        outputs=[flag_btn, flag_options]
    )
    teen_f_flag_btn.click(
        fn=send_flagged_feedback,
        inputs=teen_f_flag_btn,
        outputs=[flag_btn, flag_options]
    )
    teen_m_flag_btn.click(
        fn=send_flagged_feedback,
        inputs=teen_m_flag_btn,
        outputs=[flag_btn, flag_options]
    )
    adult_f_flag_btn.click(
        fn=send_flagged_feedback,
        inputs=adult_f_flag_btn,
        outputs=[flag_btn, flag_options]
    )
    adult_m_flag_btn.click(
        fn=send_flagged_feedback,
        inputs=adult_m_flag_btn,
        outputs=[flag_btn, flag_options]
    )
    flag_btn.click(
        show_flagging_options,
        outputs=[flag_options, flag_btn]
    )
# returning a dict with one value crashes the entire app
# passing in an fn with parentheses calls that function
# demo2 = gr.Interface(fn=predict,
#                     inputs=gr.Audio(type="filepath", source="microphone", label="Voice Recording"),
#                     outputs=[gr.Textbox(label="Response"),
#                              gr.Label(num_top_classes=5, label="Prediction confidences"), 
#                              gr.Textbox(label="Detected words")],
#                     examples=example_list,
#                     cache_examples=False,
#                     allow_flagging="manual",
#                     )

demo.launch()