Max Brodeur-Urbas commited on
Commit
8a6be38
·
1 Parent(s): cb40d90

sentiment analysis added :4head:

Browse files
Files changed (1) hide show
  1. app.py +25 -9
app.py CHANGED
@@ -37,14 +37,14 @@ from pydub import AudioSegment
37
  def predict(audio_file_path):
38
  if(audio_file_path == None):
39
  output = "Please record your voice using the record button before submitting :)"
40
- return output, {}, ""
41
 
42
  input_data = open(audio_file_path, 'rb').read()
43
  print(len(input_data))
44
 
45
  if(len(input_data) == 88108 or len(input_data) == 94252):
46
  output = "It appears your recording device isn't supported by Hugging Face/Gradio yet (iOS and macOS are causing issues). Windows and android record properly, sorry for the temporary inconvenience!"
47
- return output, {}, ""
48
 
49
  # Set the content type
50
  headers = {'Content-Type': 'application/json'}
@@ -61,15 +61,30 @@ def predict(audio_file_path):
61
  confArray = predictions[label]
62
  avg = sum(confArray) / len(confArray)
63
  confs[labels[label]] = avg
64
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  output = "Audio processed successfully."
66
- return output, confs, obj['whisper'].get('text')
67
  except JSONDecodeError as e:
68
  if "viable" in resp.text or "detected" in resp.text:
69
  output = "No viable audio detected within your clip! Make sure the clip you recorded is audible!"
70
  else:
71
  output = "Our servers are currently overloaded, try again in a few minutes."
72
- return output, {}, ""
73
 
74
  btn_label_dict = {'Child': 'child_unknown', 'Teen Female': 'teens_female', 'Teen Male':'teens_male', 'Adult Female':'twenties+_female', 'Adult Male':'twenties+_male'}
75
 
@@ -103,8 +118,9 @@ with gr.Blocks() as demo:
103
  submit_btn = gr.Button("Submit")
104
  with gr.Column(scale=1):
105
  resp = gr.Textbox(label="Response")
106
- labels = gr.Label(num_top_classes=5, label="Prediction confidences")
107
  words = gr.Textbox(label="Detected words")
 
 
108
  flag_btn = gr.Button("Flag as incorrect", visible=False)
109
  with gr.Row(visible=False) as flag_options:
110
  with gr.Row():
@@ -147,9 +163,9 @@ with gr.Blocks() as demo:
147
  def trigger_predict(audio):
148
  print("triggering prediction")
149
  # options = hide_flagging_options()
150
- output, confs, words = predict(audio)
151
  btn = show_main_flag_btn()
152
- return output, confs, words, btn
153
 
154
  ex = gr.Examples(
155
  examples=example_list,
@@ -160,7 +176,7 @@ with gr.Blocks() as demo:
160
  submit_btn.click(
161
  fn = trigger_predict,
162
  inputs=audio,
163
- outputs=[resp, labels, words, flag_btn]
164
  )
165
  child_flag_btn.click(
166
  fn=send_flagged_feedback,
 
37
  def predict(audio_file_path):
38
  if(audio_file_path == None):
39
  output = "Please record your voice using the record button before submitting :)"
40
+ return output, {}, {}, ""
41
 
42
  input_data = open(audio_file_path, 'rb').read()
43
  print(len(input_data))
44
 
45
  if(len(input_data) == 88108 or len(input_data) == 94252):
46
  output = "It appears your recording device isn't supported by Hugging Face/Gradio yet (iOS and macOS are causing issues). Windows and android record properly, sorry for the temporary inconvenience!"
47
+ return output, {}, {}, ""
48
 
49
  # Set the content type
50
  headers = {'Content-Type': 'application/json'}
 
61
  confArray = predictions[label]
62
  avg = sum(confArray) / len(confArray)
63
  confs[labels[label]] = avg
64
+
65
+ sentiments = obj['toxicity_predictions']
66
+ sentiment_labels = {'toxicity':'Toxic', 'severe_toxicity':'Severe Toxicity', 'obscene':'Obscene', 'threat':'Threat', 'insult':'Insult', 'identity_attack':'Identity Hate', 'sexual_explicit':'Sexually Explicit'}
67
+ sentiment_confs = {}
68
+ detected_toxicity = False
69
+ for s in sentiment_labels.keys():
70
+ sentiment_conf = sentiments[s]
71
+ if float(sentiment_conf) > 0.01:
72
+ detected_toxicity = True
73
+ sentiment_confs[sentiment_labels[s]] = sentiment_conf
74
+
75
+ if detected_toxicity:
76
+ sentiment_confs['Not Toxic'] = "0.0"
77
+ else:
78
+ sentiment_confs['Not Toxic'] = "0.99"
79
+
80
  output = "Audio processed successfully."
81
+ return output, confs, sentiment_confs, obj['whisper'].get('text')
82
  except JSONDecodeError as e:
83
  if "viable" in resp.text or "detected" in resp.text:
84
  output = "No viable audio detected within your clip! Make sure the clip you recorded is audible!"
85
  else:
86
  output = "Our servers are currently overloaded, try again in a few minutes."
87
+ return output, {}, {}, ""
88
 
89
  btn_label_dict = {'Child': 'child_unknown', 'Teen Female': 'teens_female', 'Teen Male':'teens_male', 'Adult Female':'twenties+_female', 'Adult Male':'twenties+_male'}
90
 
 
118
  submit_btn = gr.Button("Submit")
119
  with gr.Column(scale=1):
120
  resp = gr.Textbox(label="Response")
 
121
  words = gr.Textbox(label="Detected words")
122
+ labels2 = gr.Label(num_top_classes=7, label="Sentiment analysis")
123
+ labels = gr.Label(num_top_classes=5, label="Demographic confidences")
124
  flag_btn = gr.Button("Flag as incorrect", visible=False)
125
  with gr.Row(visible=False) as flag_options:
126
  with gr.Row():
 
163
  def trigger_predict(audio):
164
  print("triggering prediction")
165
  # options = hide_flagging_options()
166
+ output, confs, sentiments, words = predict(audio)
167
  btn = show_main_flag_btn()
168
+ return output, confs, sentiments, words, btn
169
 
170
  ex = gr.Examples(
171
  examples=example_list,
 
176
  submit_btn.click(
177
  fn = trigger_predict,
178
  inputs=audio,
179
+ outputs=[resp, labels, labels2, words, flag_btn]
180
  )
181
  child_flag_btn.click(
182
  fn=send_flagged_feedback,