Update app.py
Browse files
app.py
CHANGED
@@ -55,17 +55,25 @@ def emotionAnalysis(message, face):
|
|
55 |
text_dataDict["Emotion"].append(text_emotion)
|
56 |
text_dataDict["Confidence Score"].append(round(text_score, 2))
|
57 |
|
58 |
-
# Capture and process facial emotion
|
59 |
-
img_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
|
60 |
-
|
61 |
-
face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb)
|
62 |
face_timestamp = datetime.now().astimezone().strftime(localFormat)
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
# Return both the text result and the updated plot
|
70 |
return f"Text: {text_emotion} | Face: {face_emotion}", displayResults()
|
71 |
|
@@ -96,27 +104,79 @@ def displayResults():
|
|
96 |
|
97 |
return plt
|
98 |
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
# Launch the interface
|
121 |
if __name__ == "__main__":
|
122 |
-
|
|
|
55 |
text_dataDict["Emotion"].append(text_emotion)
|
56 |
text_dataDict["Confidence Score"].append(round(text_score, 2))
|
57 |
|
|
|
|
|
|
|
|
|
58 |
face_timestamp = datetime.now().astimezone().strftime(localFormat)
|
59 |
+
|
60 |
+
if (face.any() == None):
|
61 |
+
face_emotion = "Unreadable"
|
62 |
+
|
63 |
+
face_dataDict["Time"].append(face_timestamp)
|
64 |
+
face_dataDict["Emotion"].append(face_emotion)
|
65 |
+
face_dataDict["Confidence Score"].append(0.0)
|
66 |
+
else:
|
67 |
+
# Capture and process facial emotion
|
68 |
+
img_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
|
69 |
+
|
70 |
+
face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb)
|
71 |
+
|
72 |
+
# Store facial emotion data for plotting
|
73 |
+
face_dataDict["Time"].append(face_timestamp)
|
74 |
+
face_dataDict["Emotion"].append(face_emotion)
|
75 |
+
face_dataDict["Confidence Score"].append(face_score)
|
76 |
+
|
77 |
# Return both the text result and the updated plot
|
78 |
return f"Text: {text_emotion} | Face: {face_emotion}", displayResults()
|
79 |
|
|
|
104 |
|
105 |
return plt
|
106 |
|
107 |
+
|
108 |
+
def process_webcam(img):
|
109 |
+
"""
|
110 |
+
Process webcam frame and draw emotion detection results
|
111 |
+
Args:
|
112 |
+
img: Input image from webcam
|
113 |
+
Returns:
|
114 |
+
numpy.ndarray: Image with emotion detection results drawn on it
|
115 |
+
"""
|
116 |
+
if img is None:
|
117 |
+
return None
|
118 |
+
|
119 |
+
try:
|
120 |
+
# Convert to RGB for emotion detection
|
121 |
+
#img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
122 |
+
|
123 |
+
# Detect faces and emotions
|
124 |
+
#result = face_emotion_detector.detect_emotions(img_rgb)
|
125 |
+
|
126 |
+
return img
|
127 |
+
except Exception as e:
|
128 |
+
print(f"Error processing image: {str(e)}")
|
129 |
+
return img
|
130 |
+
|
131 |
+
'''
|
132 |
+
2 rows, 2 columns
|
133 |
+
column 1: inputs
|
134 |
+
row 1, col 1 = user text input
|
135 |
+
row 2, col 1 = camera live feed
|
136 |
+
column 2: outputs
|
137 |
+
row 1, col 2 = emotion results
|
138 |
+
row 2, col 2 = plt graph
|
139 |
+
'''
|
140 |
+
with gr.Blocks(title="Emotion Reader") as emotion_reader:
|
141 |
+
gr.Markdown(
|
142 |
+
"""
|
143 |
+
# Emotion Analysis from Text and Face
|
144 |
+
⚠️ This application will use your webcam to detect facial emotions. By using this app, you consent to webcam access. Type text and press Enter to analyze both text and facial emotions.
|
145 |
+
"""
|
146 |
+
)
|
147 |
+
with gr.Row():
|
148 |
+
with gr.Column(): #user text input
|
149 |
+
text_input = gr.Textbox(
|
150 |
+
label="Enter your text",
|
151 |
+
placeholder="Type your message here. Type 'quit' to see final results."
|
152 |
+
)
|
153 |
+
with gr.Column(): #emotion results
|
154 |
+
emotion_result = gr.Textbox(label="Emotion Results")
|
155 |
+
|
156 |
+
with gr.Row():
|
157 |
+
with gr.Column(): #camera live feed
|
158 |
+
input_img = gr.Image(label="Webcam Feed", sources="webcam")
|
159 |
+
with gr.Column(): #plt graph
|
160 |
+
output_img = gr.Image(label="Emotion Detection", visible=False)
|
161 |
+
plot_output = gr.Plot(value=displayResults(), label="Emotion Timeline")
|
162 |
+
|
163 |
+
# Stream webcam with emotion detection
|
164 |
+
input_img.stream(
|
165 |
+
process_webcam,
|
166 |
+
inputs=input_img,
|
167 |
+
outputs=output_img,
|
168 |
+
time_limit=15,
|
169 |
+
stream_every=0.1,
|
170 |
+
concurrency_limit=30
|
171 |
+
)
|
172 |
+
|
173 |
+
# Process text input
|
174 |
+
text_input.submit(
|
175 |
+
emotionAnalysis,
|
176 |
+
inputs=[text_input, output_img],
|
177 |
+
outputs=[emotion_result, plot_output]
|
178 |
+
)
|
179 |
|
180 |
# Launch the interface
|
181 |
if __name__ == "__main__":
|
182 |
+
emotion_reader.launch()
|