|
from transformers import ( |
|
AutoModelForSequenceClassification, |
|
AutoTokenizer, |
|
pipeline, |
|
|
|
) |
|
from colorama import Fore, Style |
|
import pandas as pd |
|
import time |
|
from datetime import datetime |
|
import matplotlib.pyplot as plt |
|
import gradio as gr |
|
from fer import FER |
|
import cv2 |
|
|
|
|
|
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []} |
|
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []} |
|
|
|
|
|
emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") |
|
tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") |
|
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer) |
|
|
|
face_emotion_detector = FER() |
|
|
|
localFormat = "%Y-%m-%d %H:%M:%S" |
|
|
|
|
|
|
|
|
|
def emotionAnalysis(message, face): |
|
""" |
|
Main function that processes both text and facial emotions |
|
Args: |
|
message (str): User input text |
|
Returns: |
|
tuple: (str, plt) Contains the emotion results text and the updated plot |
|
""" |
|
if (message.lower() == "quit"): |
|
return "Quitting...", displayResults() |
|
|
|
|
|
result = pipe(message) |
|
text_emotion = result[0]["label"] |
|
text_score = result[0]["score"] |
|
words_timestamp = datetime.now().astimezone().strftime(localFormat) |
|
|
|
|
|
text_dataDict["Time"].append(words_timestamp) |
|
text_dataDict["Emotion"].append(text_emotion) |
|
text_dataDict["Confidence Score"].append(round(text_score, 2)) |
|
|
|
|
|
|
|
img = cv2.imread(face) |
|
|
|
if img is None: |
|
print("Error: Could not load the image. Check the image path.") |
|
return |
|
|
|
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
|
|
face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb) |
|
face_timestamp = datetime.now().astimezone().strftime(localFormat) |
|
|
|
|
|
face_dataDict["Time"].append(face_timestamp) |
|
face_dataDict["Emotion"].append(face_emotion) |
|
face_dataDict["Confidence Score"].append(face_score) |
|
|
|
|
|
return f"Text: {text_emotion} | Face: {face_emotion}", displayResults() |
|
|
|
def displayResults(): |
|
""" |
|
Creates and returns a matplotlib plot showing emotion trends over time |
|
Returns: |
|
matplotlib.pyplot: Plot object showing emotion analysis results |
|
""" |
|
|
|
plt.figure(figsize=(10, 6)) |
|
|
|
|
|
plt.title("Emotions Detected Through Facial Expressions and Text Over Time") |
|
plt.xlabel("Time") |
|
plt.ylabel("Emotions") |
|
|
|
|
|
plt.plot(face_dataDict["Time"], face_dataDict["Emotion"], marker='o', linestyle='-', label="Facial Emotions") |
|
|
|
|
|
plt.plot(text_dataDict["Time"], text_dataDict["Emotion"], marker='o', linestyle='-', color='red', label="Text Emotions") |
|
|
|
|
|
plt.legend() |
|
plt.xticks(rotation=45) |
|
plt.tight_layout() |
|
|
|
return plt |
|
|
|
|
|
|
|
|
|
|
|
interface = gr.Interface( |
|
fn=emotionAnalysis, |
|
inputs=[ |
|
gr.Textbox( |
|
label="Enter your text", |
|
placeholder="Type your message here. Type 'quit' to see final results." |
|
), |
|
gr.Image(label="Webcam Facial Expression", source="webcam") |
|
], |
|
outputs=[ |
|
gr.Text(label="Emotion Results"), |
|
gr.Plot(label="Emotion Timeline") |
|
], |
|
title="Emotion Analysis from Text and Face", |
|
description="⚠️ This application will use your webcam to detect facial emotions. By using this app, you consent to webcam access. Type text and press Enter to analyze both text and facial emotions." |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
interface.launch() |