Spaces:
Runtime error
Runtime error
import os | |
import requests | |
import json | |
from json.decoder import JSONDecodeError | |
import time | |
import uuid | |
import sys | |
from subprocess import call | |
from pip._internal import main as pip | |
# pip(['install', 'sounddevice']) | |
# pip(['install', 'scipy']) | |
def run_cmd(command): | |
try: | |
print(command) | |
call(command, shell=True) | |
except KeyboardInterrupt: | |
print("Process interrupted") | |
sys.exit(1) | |
# run_cmd('pip install git+https://github.com/ricardodeazambuja/colab_utils.git') | |
# import colab_utils as cu | |
import gradio as gr | |
import sounddevice as sd | |
from scipy.io.wavfile import write | |
scoring_uri = os.environ.get('url') | |
key = os.environ.get('key') | |
from IPython.display import Javascript, display | |
from js2py import eval_js6 | |
from base64 import b64decode | |
from io import BytesIO | |
run_cmd('pip -q install pydub') | |
from pydub import AudioSegment | |
current_session_id = "" | |
DEMO_APP_ID = "demo_app_id" | |
DEMO_USER_ID = "demo_user_id" | |
def predict(audio_file_path): | |
if(audio_file_path == None): | |
output = "Please record your voice using the record button before submitting :)" | |
return output, {}, {}, "" | |
input_data = open(audio_file_path, 'rb').read() | |
print(len(input_data)) | |
if(len(input_data) == 88108 or len(input_data) == 94252): | |
output = "It appears your recording device isn't supported by Hugging Face/Gradio yet (iOS and macOS are causing issues). Windows and android record properly, sorry for the temporary inconvenience!" | |
return output, {}, {}, "" | |
# Set the content type | |
headers = {'Content-Type': 'application/json'} | |
# If authentication is enabled, set the authorization header | |
headers['Authorization'] = f'Bearer {key}' | |
# Make the request and display the response | |
global current_session_id | |
current_session_id = str(uuid.uuid4()) | |
input_data = append_auth_bytes(input_data) | |
resp = requests.post(scoring_uri, input_data, headers=headers) | |
try: | |
obj = json.loads(resp.text) | |
predictions = obj['agegroup_predictions'] | |
labels = {'child_unknown':'Child (genderless)', 'teens_female':'Teen Female', 'teens_male':'Teen Male', 'twenties+_female':'Adult Female', 'twenties+_male':'Adult Male'} | |
confs = {} | |
for label in labels.keys(): | |
confArray = predictions[label] | |
avg = sum(confArray) / len(confArray) | |
confs[labels[label]] = avg | |
sentiments = obj['acidity_predictions'] | |
sentiment_labels = {'toxicity':'Toxic', 'severe_toxicity':'Severe Toxicity', 'obscene':'Obscene', 'threat':'Threat', 'insult':'Insult', 'identity_attack':'Identity Hate', 'sexual_explicit':'Sexually Explicit'} | |
sentiment_confs = {} | |
detected_toxicity = False | |
for s in sentiment_labels.keys(): | |
sentiment_conf = sentiments[s] | |
if float(sentiment_conf) > 0.01: | |
detected_toxicity = True | |
sentiment_confs[sentiment_labels[s]] = sentiment_conf | |
del sentiment_confs['Toxic'] | |
if detected_toxicity: | |
sentiment_confs['Not Toxic'] = "0.0" | |
else: | |
sentiment_confs['Not Toxic'] = "0.99" | |
output = "Audio processed successfully." | |
return output, confs, sentiment_confs, obj['whisper'].get('text') | |
except JSONDecodeError as e: | |
if "viable" in resp.text or "detected" in resp.text: | |
output = "No viable audio detected within your clip! Make sure the clip you recorded is audible!" | |
else: | |
output = "Our servers are currently overloaded, try again in a few minutes." | |
return output, {}, {}, "" | |
btn_label_dict = {'Child': 'child_unknown', 'Teen Female': 'teens_female', 'Teen Male':'teens_male', 'Adult Female':'twenties+_female', 'Adult Male':'twenties+_male'} | |
def append_auth_bytes(input_data): | |
auth_string = DEMO_APP_ID + str(len(DEMO_APP_ID)) + DEMO_USER_ID + str(len(DEMO_USER_ID)) + current_session_id + str(len(current_session_id)) | |
print(auth_string) | |
auth_bytes = bytes(auth_string, 'utf-8') | |
new_input_data = input_data + auth_bytes | |
return new_input_data | |
def send_flag_correction(btn): | |
correct_label = btn | |
correct_label = btn_label_dict[btn] | |
# Set the content type | |
headers = {'Content-Type': 'application/json'} | |
# If authentication is enabled, set the authorization header | |
headers['Authorization'] = f'Bearer {key}' | |
# format a json object containing the correct_label variable | |
input_data = json.dumps({"correct_label": correct_label, "session_id": current_session_id, "app_id": DEMO_APP_ID, "user_id": DEMO_USER_ID}) | |
resp = requests.post(scoring_uri + "?feedback", input_data, headers=headers) | |
print(resp.text) | |
example_list = [ | |
['ex_kid_voice.mp3'], ["ex_adult_female_voice2.mp3"], ["ex_adult_male_voice.wav"], ["ex_teen_female_voice.mp3"], ["ex_teen_female_voice2.mp3"], ["ex_teen_male_voice.mp3"] | |
] | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
gr.Markdown("# Litmus") | |
with gr.Row(): | |
gr.Markdown("A tool for detecting toxicity in voice chat and user demographic with only few seconds of audio. Record a short clip of your voice (3 or more seconds) or try out some of our examples. If the response is incorrect be sure to flag it so we can improve! Leave a comment or PM me on hugging face if you have any questions!") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
audio = gr.Audio(type="filepath", source="microphone", label="Voice Recording") | |
with gr.Row(): | |
submit_btn = gr.Button("Submit") | |
with gr.Column(scale=1): | |
resp = gr.Textbox(label="Response") | |
words = gr.Textbox(label="Detected words") | |
labels2 = gr.Label(num_top_classes=7, label="Sentiment analysis") | |
labels = gr.Label(num_top_classes=5, label="Demographic confidences") | |
flag_btn = gr.Button("Flag as incorrect", visible=False) | |
with gr.Row(visible=False) as flag_options: | |
with gr.Row(): | |
gr.Markdown( | |
""" | |
Thanks for flagging our error! | |
Please select the category which best represents you. | |
(NOTE: When a submission is flagged it is saved for training purposes. We appreciate you helping us improve!) | |
""") | |
with gr.Row(): | |
child_flag_btn = gr.Button("Child") | |
teen_f_flag_btn = gr.Button("Teen Female") | |
teen_m_flag_btn = gr.Button("Teen Male") | |
adult_f_flag_btn = gr.Button("Adult Female") | |
adult_m_flag_btn = gr.Button("Adult Male") | |
def show_main_flag_btn(): | |
return gr.update(visible=True) | |
def hide_main_flag_btn(): | |
return gr.update(visible=False) | |
def show_flagging_options(): | |
print("showing flagging options") | |
return { | |
flag_options: gr.update(visible=True), | |
flag_btn: gr.update(visible=False) | |
} | |
def hide_flagging_options(): | |
print("hiding flagging options") | |
return gr.update(visible=False) | |
def send_flagged_feedback(label): | |
send_flag_correction(label) | |
main_btn = hide_main_flag_btn() | |
options = hide_flagging_options() | |
return main_btn, options | |
def trigger_predict(audio): | |
print("triggering prediction") | |
# options = hide_flagging_options() | |
output, confs, sentiments, words = predict(audio) | |
btn = show_main_flag_btn() | |
return output, confs, sentiments, words, btn | |
ex = gr.Examples( | |
examples=example_list, | |
fn=trigger_predict, | |
inputs=audio, | |
outputs=[resp, labels, words], | |
) | |
submit_btn.click( | |
fn = trigger_predict, | |
inputs=audio, | |
outputs=[resp, labels, labels2, words, flag_btn] | |
) | |
child_flag_btn.click( | |
fn=send_flagged_feedback, | |
inputs=child_flag_btn, | |
outputs=[flag_btn, flag_options] | |
) | |
teen_f_flag_btn.click( | |
fn=send_flagged_feedback, | |
inputs=teen_f_flag_btn, | |
outputs=[flag_btn, flag_options] | |
) | |
teen_m_flag_btn.click( | |
fn=send_flagged_feedback, | |
inputs=teen_m_flag_btn, | |
outputs=[flag_btn, flag_options] | |
) | |
adult_f_flag_btn.click( | |
fn=send_flagged_feedback, | |
inputs=adult_f_flag_btn, | |
outputs=[flag_btn, flag_options] | |
) | |
adult_m_flag_btn.click( | |
fn=send_flagged_feedback, | |
inputs=adult_m_flag_btn, | |
outputs=[flag_btn, flag_options] | |
) | |
flag_btn.click( | |
show_flagging_options, | |
outputs=[flag_options, flag_btn] | |
) | |
# returning a dict with one value crashes the entire app | |
# passing in an fn with parentheses calls that function | |
# demo2 = gr.Interface(fn=predict, | |
# inputs=gr.Audio(type="filepath", source="microphone", label="Voice Recording"), | |
# outputs=[gr.Textbox(label="Response"), | |
# gr.Label(num_top_classes=5, label="Prediction confidences"), | |
# gr.Textbox(label="Detected words")], | |
# examples=example_list, | |
# cache_examples=False, | |
# allow_flagging="manual", | |
# ) | |
demo.launch() |