echung682's picture
Update app.py
01dcc89 verified
raw
history blame
5.21 kB
from transformers import (
AutoModelForSequenceClassification, # For text emotion detection model
AutoTokenizer,
pipeline, # For creating inference pipeline
)
from colorama import Fore, Style # For colored console output
import pandas as pd # For data handling
import time
from datetime import datetime
import matplotlib.pyplot as plt
import gradio as gr
from fer import FER
import cv2
# Dictionaries to store emotion data over time
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
# Load model and tokenizer directly from HuggingFace
emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection")
tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") # Load tokenizer directly from model
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)
face_emotion_detector = FER()
localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
#currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
#current_Time_Tuple = time.strptime(str(currTime), str(localFormat)) #creates a tuple that contains each part of the local format separate
#current_Time_In_Seconds = time.mktime(current_Time_Tuple) #converts the tuple into the number of seconds
def emotionAnalysis(message, face):
"""
Main function that processes both text and facial emotions
Args:
message (str): User input text
Returns:
tuple: (str, plt) Contains the emotion results text and the updated plot
"""
if (message.lower() == "quit"):
return "Quitting...", displayResults()
# Process text emotion
result = pipe(message)
text_emotion = result[0]["label"]
text_score = result[0]["score"]
words_timestamp = datetime.now().astimezone().strftime(localFormat)
# Store text emotion data for plotting
text_dataDict["Time"].append(words_timestamp)
text_dataDict["Emotion"].append(text_emotion)
text_dataDict["Confidence Score"].append(round(text_score, 2))
# Capture and process facial emotion
# Load the image using OpenCV (BGR format)
img = cv2.imread(face)
# Convert the image to RGB (FER uses RGB, OpenCV loads images in BGR format)
if img is None:
print("Error: Could not load the image. Check the image path.")
return
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb)
face_timestamp = datetime.now().astimezone().strftime(localFormat)
# Store facial emotion data for plotting
face_dataDict["Time"].append(face_timestamp)
face_dataDict["Emotion"].append(face_emotion)
face_dataDict["Confidence Score"].append(face_score)
# Return both the text result and the updated plot
return f"Text: {text_emotion} | Face: {face_emotion}", displayResults()
def displayResults():
"""
Creates and returns a matplotlib plot showing emotion trends over time
Returns:
matplotlib.pyplot: Plot object showing emotion analysis results
"""
# Create a new figure with specified size
plt.figure(figsize=(10, 6))
# Set up plot labels and title
plt.title("Emotions Detected Through Facial Expressions and Text Over Time")
plt.xlabel("Time")
plt.ylabel("Emotions")
#plot facial emotions versus time where time is on the x-axis
plt.plot(face_dataDict["Time"], face_dataDict["Emotion"], marker='o', linestyle='-', label="Facial Emotions")
#plot facial emotions versus time where time is on the x-axis
plt.plot(text_dataDict["Time"], text_dataDict["Emotion"], marker='o', linestyle='-', color='red', label="Text Emotions")
#showing the graph and the legend
plt.legend()
plt.xticks(rotation=45) # Rotate timestamps for better readability
plt.tight_layout() # Adjust layout to prevent label cutoff
return plt
#with gr.Blocks as demo:
#conesnt_radio = gr.Radio(["yes", "no"], label="This app uses your webcam to detect emotions from your face and reads your text inputs to determine emotions from your writing. Do you give consent? ")
# Create Gradio interface with consent notice in the description
interface = gr.Interface(
fn=emotionAnalysis,
inputs=[
gr.Textbox(
label="Enter your text",
placeholder="Type your message here. Type 'quit' to see final results."
),
gr.Image(label="Webcam Facial Expression", source="webcam")
],
outputs=[
gr.Text(label="Emotion Results"),
gr.Plot(label="Emotion Timeline")
],
title="Emotion Analysis from Text and Face",
description="⚠️ This application will use your webcam to detect facial emotions. By using this app, you consent to webcam access. Type text and press Enter to analyze both text and facial emotions."
)
# Launch the interface
if __name__ == "__main__":
interface.launch()