echung682 commited on
Commit
3e22f18
·
verified ·
1 Parent(s): b651257

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -6
app.py CHANGED
@@ -9,11 +9,15 @@ import matplotlib.pyplot as plt
9
  import gradio as gr
10
  from fer import FER
11
  import cv2
 
12
 
13
  # Dictionaries to store emotion data over time
14
  text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
15
  face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
16
 
 
 
 
17
  # Load model and tokenizer directly from HuggingFace
18
  emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection")
19
  tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") # Load tokenizer directly from model
@@ -37,8 +41,13 @@ def emotionAnalysis(message, face):
37
  Returns:
38
  tuple: (str, plt) Contains the emotion results text and the updated plot
39
  """
40
- if (message.lower() == "quit"):
41
- return "Quitting...", displayResults()
 
 
 
 
 
42
 
43
  # Process text emotion
44
  result = pipe(message)
@@ -77,8 +86,12 @@ def emotionAnalysis(message, face):
77
  face_dataDict["Emotion"].append(face_emotion) # Now face_emotion will always be a string
78
  face_dataDict["Confidence Score"].append(face_score)
79
 
 
 
 
 
80
  # Return both the text result and the updated plot
81
- return f"Text: {text_emotion} | Face: {face_emotion}", displayResults()
82
 
83
  def displayResults():
84
  """
@@ -131,6 +144,18 @@ def process_webcam(img):
131
  print(f"Error processing image: {str(e)}")
132
  return img
133
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  '''
135
  2 rows, 2 columns
136
  column 1: inputs
@@ -158,13 +183,13 @@ with gr.Blocks(title="Emotion Reader", theme=gr.themes.Ocean()) as emotion_reade
158
 
159
  5. In the "Emotion Results" box, you will see something like "Text: (emotion) | Face: (emotion) " and the timeline will update
160
 
161
- 6. You can press "Stop" to turn off the camera or type "quit" as your message to stop the program
162
  """
163
  )
164
  with gr.Row():
165
  with gr.Column(): #user text input
166
  text_input = gr.Textbox(
167
- label="Type your thoughts here. Type 'quit' to see final results.",
168
  placeholder="Enter text"
169
  )
170
  examples = gr.Examples(
@@ -177,6 +202,7 @@ with gr.Blocks(title="Emotion Reader", theme=gr.themes.Ocean()) as emotion_reade
177
  )
178
  with gr.Column(): #emotion results
179
  emotion_result = gr.Textbox(label="Emotion Results")
 
180
 
181
  with gr.Row():
182
  with gr.Column(): #camera live feed
@@ -199,7 +225,12 @@ with gr.Blocks(title="Emotion Reader", theme=gr.themes.Ocean()) as emotion_reade
199
  text_input.submit(
200
  emotionAnalysis,
201
  inputs=[text_input, output_img],
202
- outputs=[emotion_result, plot_output]
 
 
 
 
 
203
  )
204
 
205
  # Launch the interface
 
9
  import gradio as gr
10
  from fer import FER
11
  import cv2
12
+ import os
13
 
14
  # Dictionaries to store emotion data over time
15
  text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
16
  face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
17
 
18
+ # List of temporary files to clean up
19
+ temp_files = []
20
+
21
  # Load model and tokenizer directly from HuggingFace
22
  emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection")
23
  tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") # Load tokenizer directly from model
 
41
  Returns:
42
  tuple: (str, plt) Contains the emotion results text and the updated plot
43
  """
44
+ if (message.lower() == "finish"):
45
+ graph = displayResults()
46
+ filename = "Emotion_Timeline.png"
47
+ graph.savefig(filename)
48
+ temp_files.append(filename)
49
+ download = gr.DownloadButton(label="Download Emotion Timeline", value=filename, visible=True)
50
+ return "Quitting...", graph, download
51
 
52
  # Process text emotion
53
  result = pipe(message)
 
86
  face_dataDict["Emotion"].append(face_emotion) # Now face_emotion will always be a string
87
  face_dataDict["Confidence Score"].append(face_score)
88
 
89
+ data = displayResults()
90
+ file_name = "unfinishedPLT.png"
91
+ data.savefig(file_name)
92
+ dL = gr.DownloadButton(label="Download Emotion Timeline", value=file_name, visible=False)
93
  # Return both the text result and the updated plot
94
+ return f"Text: {text_emotion} | Face: {face_emotion}", data, dL
95
 
96
  def displayResults():
97
  """
 
144
  print(f"Error processing image: {str(e)}")
145
  return img
146
 
147
+ def cleanUp_Files():
148
+ """
149
+ Removes temporary plot files created during the application's runtime
150
+ """
151
+ for file in temp_files:
152
+ try:
153
+ if os.path.exists(file):
154
+ os.remove(file)
155
+ print(f"Cleaned up {file}")
156
+ except Exception as e:
157
+ print(f"Error cleaning up {file}: {str(e)}")
158
+
159
  '''
160
  2 rows, 2 columns
161
  column 1: inputs
 
183
 
184
  5. In the "Emotion Results" box, you will see something like "Text: (emotion) | Face: (emotion) " and the timeline will update
185
 
186
+ 6. You can press "Stop" to turn off the camera or type "finish" as your message to be able to download your results
187
  """
188
  )
189
  with gr.Row():
190
  with gr.Column(): #user text input
191
  text_input = gr.Textbox(
192
+ label="Type your thoughts here. Type 'finish' to see final results.",
193
  placeholder="Enter text"
194
  )
195
  examples = gr.Examples(
 
202
  )
203
  with gr.Column(): #emotion results
204
  emotion_result = gr.Textbox(label="Emotion Results")
205
+ download_button = gr.DownloadButton(label="Download Emotion Timeline", visible=False)
206
 
207
  with gr.Row():
208
  with gr.Column(): #camera live feed
 
225
  text_input.submit(
226
  emotionAnalysis,
227
  inputs=[text_input, output_img],
228
+ outputs=[emotion_result, plot_output, download_button]
229
+ )
230
+
231
+ #cleanup files
232
+ download_button.click(
233
+ cleanUp_Files
234
  )
235
 
236
  # Launch the interface