Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import (
|
2 |
+
AutoModelForSequenceClassification, # For text emotion detection model
|
3 |
+
pipeline, # For creating inference pipeline
|
4 |
+
PreTrainedTokenizerFast # For processing text input
|
5 |
+
)
|
6 |
+
from WebCam import capture_webcam
|
7 |
+
from colorama import Fore, Style # For colored console output
|
8 |
+
import pandas as pd # For data handling
|
9 |
+
import time
|
10 |
+
from datetime import datetime
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
import gradio as gr
|
13 |
+
|
14 |
+
# Dictionaries to store emotion data over time
|
15 |
+
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
|
16 |
+
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
|
17 |
+
|
18 |
+
emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") #to be replaced with my fine-tuned model once it is ready
|
19 |
+
tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json")
|
20 |
+
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)
|
21 |
+
|
22 |
+
localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
|
23 |
+
#currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
|
24 |
+
#current_Time_Tuple = time.strptime(str(currTime), str(localFormat)) #creates a tuple that contains each part of the local format separate
|
25 |
+
#current_Time_In_Seconds = time.mktime(current_Time_Tuple) #converts the tuple into the number of seconds
|
26 |
+
|
27 |
+
def emotionAnalysis(message):
|
28 |
+
"""
|
29 |
+
Main function that processes both text and facial emotions
|
30 |
+
Args:
|
31 |
+
inp (str): User input text
|
32 |
+
Returns:
|
33 |
+
tuple: (str, plt) Contains the emotion results text and the updated plot
|
34 |
+
"""
|
35 |
+
if (message.lower() == "quit"):
|
36 |
+
return "Quitting...", displayResults()
|
37 |
+
|
38 |
+
# Process text emotion
|
39 |
+
result = pipe(message)
|
40 |
+
text_emotion = result[0]["label"]
|
41 |
+
text_score = result[0]["score"]
|
42 |
+
words_timestamp = datetime.now().astimezone().strftime(localFormat)
|
43 |
+
|
44 |
+
# Store text emotion data for plotting
|
45 |
+
text_dataDict["Time"].append(words_timestamp)
|
46 |
+
text_dataDict["Emotion"].append(text_emotion)
|
47 |
+
text_dataDict["Confidence Score"].append(round(text_score, 2))
|
48 |
+
|
49 |
+
# Capture and process facial emotion
|
50 |
+
face_emotion, face_score = capture_webcam()
|
51 |
+
face_timestamp = datetime.now().astimezone().strftime(localFormat)
|
52 |
+
|
53 |
+
# Store facial emotion data for plotting
|
54 |
+
face_dataDict["Time"].append(face_timestamp)
|
55 |
+
face_dataDict["Emotion"].append(face_emotion)
|
56 |
+
face_dataDict["Confidence Score"].append(face_score)
|
57 |
+
|
58 |
+
# Return both the text result and the updated plot
|
59 |
+
return f"Text: {text_emotion} | Face: {face_emotion}", displayResults()
|
60 |
+
|
61 |
+
def displayResults():
|
62 |
+
"""
|
63 |
+
Creates and returns a matplotlib plot showing emotion trends over time
|
64 |
+
Returns:
|
65 |
+
matplotlib.pyplot: Plot object showing emotion analysis results
|
66 |
+
"""
|
67 |
+
# Create a new figure with specified size
|
68 |
+
plt.figure(figsize=(10, 6))
|
69 |
+
|
70 |
+
# Set up plot labels and title
|
71 |
+
plt.title("Emotions Detected Through Facial Expressions and Text Over Time")
|
72 |
+
plt.xlabel("Time")
|
73 |
+
plt.ylabel("Emotions")
|
74 |
+
|
75 |
+
#plot facial emotions versus time where time is on the x-axis
|
76 |
+
plt.plot(face_dataDict["Time"], face_dataDict["Emotion"], marker='o', linestyle='-', label="Facial Emotions")
|
77 |
+
|
78 |
+
#plot facial emotions versus time where time is on the x-axis
|
79 |
+
plt.plot(text_dataDict["Time"], text_dataDict["Emotion"], marker='o', linestyle='-', color='red', label="Text Emotions")
|
80 |
+
|
81 |
+
#showing the graph and the legend
|
82 |
+
plt.legend()
|
83 |
+
plt.xticks(rotation=45) # Rotate timestamps for better readability
|
84 |
+
plt.tight_layout() # Adjust layout to prevent label cutoff
|
85 |
+
|
86 |
+
return plt
|
87 |
+
|
88 |
+
# Get user consent for webcam access
|
89 |
+
print(Fore.GREEN + "This program will analyze your text for emotions and use your webcame to detect your emotions from your face. Do you give consent? (yes/no): ")
|
90 |
+
consent = input()
|
91 |
+
|
92 |
+
if (consent.lower() == 'yes'):
|
93 |
+
# Create Gradio interface with both text output and plot visualization
|
94 |
+
interface = gr.Interface(
|
95 |
+
fn=emotionAnalysis,
|
96 |
+
inputs=["text"],
|
97 |
+
outputs=[
|
98 |
+
gr.Text(label="Emotion Results"), # Shows current emotion analysis
|
99 |
+
gr.Plot(label="Emotion Timeline") # Shows emotion trends over time
|
100 |
+
],
|
101 |
+
title="Emotion Analysis from Text and Face",
|
102 |
+
description="Enter text into the textbox. Then, press 'Submit' or 'Enter' to activate the webcam. Wait and see the results."
|
103 |
+
)
|
104 |
+
|
105 |
+
interface.launch()
|