|
from transformers import ( |
|
AutoModelForSequenceClassification, |
|
AutoTokenizer, |
|
pipeline, |
|
|
|
) |
|
from datetime import datetime |
|
import matplotlib.pyplot as plt |
|
import gradio as gr |
|
from fer import FER |
|
import cv2 |
|
import os |
|
|
|
|
|
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []} |
|
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []} |
|
|
|
|
|
temp_files = [] |
|
|
|
|
|
emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") |
|
tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") |
|
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer) |
|
|
|
face_emotion_detector = FER() |
|
|
|
localFormat = "%Y-%m-%d %H:%M:%S" |
|
|
|
|
|
|
|
|
|
def emotionAnalysis(message, face): |
|
""" |
|
Main function that processes both text and facial emotions |
|
Args: |
|
message (str): User input text |
|
face: Image input from Gradio interface, can be either: |
|
- numpy.ndarray: Direct webcam capture (RGB or BGR format) |
|
- str: File path to uploaded image |
|
Returns: |
|
tuple: (str, plt, Gradio Download Button Object) Contains the emotion results text and the updated plot |
|
""" |
|
if (message.lower() == "finish"): |
|
graph = displayResults() |
|
filename = "Emotion_Timeline.png" |
|
graph.savefig(filename) |
|
temp_files.append(filename) |
|
download = gr.DownloadButton(label="Download Emotion Timeline", value=filename, visible=True) |
|
return "Quitting...", graph, download |
|
|
|
|
|
result = pipe(message) |
|
text_emotion = result[0]["label"] |
|
text_score = result[0]["score"] |
|
words_timestamp = datetime.now().astimezone().strftime(localFormat) |
|
|
|
|
|
text_dataDict["Time"].append(words_timestamp) |
|
text_dataDict["Emotion"].append(text_emotion) |
|
text_dataDict["Confidence Score"].append(round(text_score, 2)) |
|
|
|
face_timestamp = datetime.now().astimezone().strftime(localFormat) |
|
|
|
|
|
face_emotion = "No image" |
|
face_score = 0.0 |
|
|
|
if face is not None: |
|
try: |
|
img_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) |
|
result = face_emotion_detector.top_emotion(img_rgb) |
|
print(result) |
|
if result[0] is not None: |
|
face_emotion, face_score = result |
|
else: |
|
face_emotion = "No face detected" |
|
face_score = 0.0 |
|
except Exception as e: |
|
face_emotion = f"Error processing image: {str(e)}" |
|
face_score = 0.0 |
|
|
|
|
|
face_dataDict["Time"].append(face_timestamp) |
|
face_dataDict["Emotion"].append(face_emotion) |
|
face_dataDict["Confidence Score"].append(face_score) |
|
|
|
data = displayResults() |
|
file_name = "unfinishedPLT.png" |
|
data.savefig(file_name) |
|
temp_files.append(file_name) |
|
dL = gr.DownloadButton(label="Download Emotion Timeline", value=file_name, visible=False) |
|
|
|
return f"Text: {text_emotion} | Face: {face_emotion}", data, dL |
|
|
|
def displayResults(): |
|
""" |
|
Creates and returns a matplotlib plot showing emotion trends over time |
|
Returns: |
|
matplotlib.pyplot: Plot object showing emotion analysis results |
|
""" |
|
|
|
plt.figure(figsize=(10, 6)) |
|
|
|
|
|
plt.title("Emotions Detected Through Facial Expressions and Text Over Time") |
|
plt.xlabel("Time") |
|
plt.ylabel("Emotions") |
|
|
|
|
|
plt.plot(face_dataDict["Time"], face_dataDict["Emotion"], marker='o', linestyle='-', label="Facial Emotions") |
|
|
|
|
|
plt.plot(text_dataDict["Time"], text_dataDict["Emotion"], marker='o', linestyle='-', color='red', label="Text Emotions") |
|
|
|
|
|
plt.legend() |
|
plt.xticks(rotation=45) |
|
plt.tight_layout() |
|
|
|
return plt |
|
|
|
|
|
def process_webcam(img): |
|
""" |
|
Process webcam frame and draw emotion detection results |
|
""" |
|
if img is None: |
|
return None |
|
|
|
try: |
|
|
|
return img |
|
except Exception as e: |
|
print(f"Error processing image: {str(e)}") |
|
return img |
|
|
|
def cleanUp_Files(): |
|
""" |
|
Removes temporary plot files created during the application's runtime |
|
""" |
|
for file in temp_files: |
|
try: |
|
if os.path.exists(file): |
|
os.remove(file) |
|
print(f"Cleaned up {file}") |
|
except Exception as e: |
|
print(f"Error cleaning up {file}: {str(e)}") |
|
|
|
''' |
|
2 rows, 2 columns |
|
column 1: inputs |
|
row 1, col 1 = user text input |
|
row 2, col 1 = camera live feed |
|
column 2: outputs |
|
row 1, col 2 = emotion results |
|
row 2, col 2 = plt graph |
|
''' |
|
with gr.Blocks(title="Emotion Reader", theme=gr.themes.Ocean()) as emotion_reader: |
|
gr.Markdown( |
|
""" |
|
# Emotion Analysis from Text and Face |
|
⚠️ This application will use your webcam to detect facial emotions. By using this app, you consent to webcam access. Type text and press Enter to analyze both text and facial emotions. |
|
|
|
Steps to use the app: |
|
|
|
1. Turn on the camera clicking where it says "Click to Access Webcam" and Allow access |
|
|
|
2. Click "Record" (the dropdown arrow is for if you want to change your camera) |
|
|
|
3. Type a sentence into the text input box |
|
|
|
4. Press "Enter" to see your results |
|
|
|
5. In the "Emotion Results" box, you will see something like "Text: (emotion) | Face: (emotion) " and the timeline will update |
|
|
|
6. You can press "Stop" to turn off the camera or type "finish" as your message to be able to download your results |
|
""" |
|
) |
|
with gr.Row(): |
|
with gr.Column(): |
|
text_input = gr.Textbox( |
|
label="Type your thoughts here. Type 'finish' to see final results.", |
|
placeholder="Enter text" |
|
) |
|
examples = gr.Examples( |
|
examples=[ |
|
"I am feeling happy today!", |
|
"I am feeling sad today.", |
|
"I wish I could go on vacation." |
|
], |
|
inputs=text_input |
|
) |
|
with gr.Column(): |
|
emotion_result = gr.Textbox(label="Emotion Results") |
|
download_button = gr.DownloadButton(label="Download Emotion Timeline", visible=False) |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
input_img = gr.Image(label="Webcam Feed", sources="webcam") |
|
with gr.Column(): |
|
output_img = gr.Image(label="Emotion Detection", visible=False) |
|
plot_output = gr.Plot(value=displayResults(), label="Emotion Timeline") |
|
|
|
|
|
input_img.stream( |
|
process_webcam, |
|
inputs=input_img, |
|
outputs=output_img, |
|
time_limit=15, |
|
stream_every=0.1, |
|
concurrency_limit=30 |
|
) |
|
|
|
|
|
text_input.submit( |
|
emotionAnalysis, |
|
inputs=[text_input, output_img], |
|
outputs=[emotion_result, plot_output, download_button] |
|
) |
|
|
|
|
|
download_button.click( |
|
cleanUp_Files |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
emotion_reader.launch() |