File size: 5,208 Bytes
73150c9
 
01dcc89
73150c9
01dcc89
73150c9
 
 
 
 
 
 
01dcc89
 
73150c9
 
 
 
 
01dcc89
 
 
73150c9
 
01dcc89
 
73150c9
 
 
 
 
01dcc89
73150c9
 
 
01dcc89
73150c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01dcc89
 
 
 
 
 
 
 
 
 
73150c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01dcc89
 
 
bba4822
 
 
 
 
 
 
01dcc89
 
bba4822
 
 
 
 
 
 
 
73150c9
bba4822
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from transformers import (
    AutoModelForSequenceClassification,  # For text emotion detection model
    AutoTokenizer, 
    pipeline,                           # For creating inference pipeline

)
from colorama import Fore, Style        # For colored console output
import pandas as pd                     # For data handling
import time
from datetime import datetime
import matplotlib.pyplot as plt
import gradio as gr
from fer import FER
import cv2

# Dictionaries to store emotion data over time
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}

# Load model and tokenizer directly from HuggingFace
emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection")
tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection")  # Load tokenizer directly from model
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)

face_emotion_detector = FER()

localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
#currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
#current_Time_Tuple = time.strptime(str(currTime), str(localFormat)) #creates a tuple that contains each part of the local format separate
#current_Time_In_Seconds = time.mktime(current_Time_Tuple) #converts the tuple into the number of seconds

def emotionAnalysis(message, face):
    """
    Main function that processes both text and facial emotions
    Args:
        message (str): User input text
    Returns:
        tuple: (str, plt) Contains the emotion results text and the updated plot
    """
    if (message.lower() == "quit"):
        return "Quitting...", displayResults()
    
    # Process text emotion
    result = pipe(message)
    text_emotion = result[0]["label"]
    text_score = result[0]["score"]
    words_timestamp = datetime.now().astimezone().strftime(localFormat)
    
    # Store text emotion data for plotting
    text_dataDict["Time"].append(words_timestamp)
    text_dataDict["Emotion"].append(text_emotion)
    text_dataDict["Confidence Score"].append(round(text_score, 2))
    
    # Capture and process facial emotion
    # Load the image using OpenCV (BGR format)
    img = cv2.imread(face)
    # Convert the image to RGB (FER uses RGB, OpenCV loads images in BGR format)
    if img is None:
        print("Error: Could not load the image. Check the image path.")
        return
    
    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb)
    face_timestamp = datetime.now().astimezone().strftime(localFormat)
    
    # Store facial emotion data for plotting
    face_dataDict["Time"].append(face_timestamp)
    face_dataDict["Emotion"].append(face_emotion)
    face_dataDict["Confidence Score"].append(face_score)
    
    # Return both the text result and the updated plot
    return f"Text: {text_emotion} | Face: {face_emotion}", displayResults()

def displayResults():
    """
    Creates and returns a matplotlib plot showing emotion trends over time
    Returns:
        matplotlib.pyplot: Plot object showing emotion analysis results
    """
    # Create a new figure with specified size
    plt.figure(figsize=(10, 6))
    
    # Set up plot labels and title
    plt.title("Emotions Detected Through Facial Expressions and Text Over Time")
    plt.xlabel("Time")
    plt.ylabel("Emotions")

    #plot facial emotions versus time where time is on the x-axis
    plt.plot(face_dataDict["Time"], face_dataDict["Emotion"], marker='o', linestyle='-', label="Facial Emotions")

    #plot facial emotions versus time where time is on the x-axis
    plt.plot(text_dataDict["Time"], text_dataDict["Emotion"], marker='o', linestyle='-', color='red', label="Text Emotions")

    #showing the graph and the legend
    plt.legend()
    plt.xticks(rotation=45)  # Rotate timestamps for better readability
    plt.tight_layout()       # Adjust layout to prevent label cutoff
    
    return plt

#with gr.Blocks as demo:
    #conesnt_radio = gr.Radio(["yes", "no"], label="This app uses your webcam to detect emotions from your face and reads your text inputs to determine emotions from your writing. Do you give consent? ")

# Create Gradio interface with consent notice in the description
interface = gr.Interface(
    fn=emotionAnalysis,
    inputs=[
        gr.Textbox(
            label="Enter your text",
            placeholder="Type your message here. Type 'quit' to see final results."
        ),
        gr.Image(label="Webcam Facial Expression", source="webcam")
    ],
    outputs=[
        gr.Text(label="Emotion Results"),
        gr.Plot(label="Emotion Timeline")
    ],
    title="Emotion Analysis from Text and Face",
    description="⚠️ This application will use your webcam to detect facial emotions. By using this app, you consent to webcam access. Type text and press Enter to analyze both text and facial emotions."
)

# Launch the interface
if __name__ == "__main__":
    interface.launch()