Update app.py
Browse files
app.py
CHANGED
@@ -1,35 +1,39 @@
|
|
1 |
from transformers import (
|
2 |
AutoModelForSequenceClassification, # For text emotion detection model
|
3 |
-
AutoTokenizer,
|
4 |
pipeline, # For creating inference pipeline
|
5 |
-
|
6 |
)
|
7 |
-
from WebCam import capture_webcam
|
8 |
from colorama import Fore, Style # For colored console output
|
9 |
import pandas as pd # For data handling
|
10 |
import time
|
11 |
from datetime import datetime
|
12 |
import matplotlib.pyplot as plt
|
13 |
import gradio as gr
|
|
|
|
|
14 |
|
15 |
# Dictionaries to store emotion data over time
|
16 |
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
|
17 |
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
|
18 |
|
19 |
-
|
20 |
-
|
|
|
21 |
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)
|
22 |
|
|
|
|
|
23 |
localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
|
24 |
#currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
|
25 |
#current_Time_Tuple = time.strptime(str(currTime), str(localFormat)) #creates a tuple that contains each part of the local format separate
|
26 |
#current_Time_In_Seconds = time.mktime(current_Time_Tuple) #converts the tuple into the number of seconds
|
27 |
|
28 |
-
def emotionAnalysis(message):
|
29 |
"""
|
30 |
Main function that processes both text and facial emotions
|
31 |
Args:
|
32 |
-
|
33 |
Returns:
|
34 |
tuple: (str, plt) Contains the emotion results text and the updated plot
|
35 |
"""
|
@@ -48,7 +52,16 @@ def emotionAnalysis(message):
|
|
48 |
text_dataDict["Confidence Score"].append(round(text_score, 2))
|
49 |
|
50 |
# Capture and process facial emotion
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
face_timestamp = datetime.now().astimezone().strftime(localFormat)
|
53 |
|
54 |
# Store facial emotion data for plotting
|
@@ -86,6 +99,9 @@ def displayResults():
|
|
86 |
|
87 |
return plt
|
88 |
|
|
|
|
|
|
|
89 |
# Create Gradio interface with consent notice in the description
|
90 |
interface = gr.Interface(
|
91 |
fn=emotionAnalysis,
|
@@ -93,7 +109,8 @@ interface = gr.Interface(
|
|
93 |
gr.Textbox(
|
94 |
label="Enter your text",
|
95 |
placeholder="Type your message here. Type 'quit' to see final results."
|
96 |
-
)
|
|
|
97 |
],
|
98 |
outputs=[
|
99 |
gr.Text(label="Emotion Results"),
|
|
|
1 |
from transformers import (
|
2 |
AutoModelForSequenceClassification, # For text emotion detection model
|
3 |
+
AutoTokenizer,
|
4 |
pipeline, # For creating inference pipeline
|
5 |
+
|
6 |
)
|
|
|
7 |
from colorama import Fore, Style # For colored console output
|
8 |
import pandas as pd # For data handling
|
9 |
import time
|
10 |
from datetime import datetime
|
11 |
import matplotlib.pyplot as plt
|
12 |
import gradio as gr
|
13 |
+
from fer import FER
|
14 |
+
import cv2
|
15 |
|
16 |
# Dictionaries to store emotion data over time
|
17 |
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
|
18 |
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
|
19 |
|
20 |
+
# Load model and tokenizer directly from HuggingFace
|
21 |
+
emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection")
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") # Load tokenizer directly from model
|
23 |
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)
|
24 |
|
25 |
+
face_emotion_detector = FER()
|
26 |
+
|
27 |
localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
|
28 |
#currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
|
29 |
#current_Time_Tuple = time.strptime(str(currTime), str(localFormat)) #creates a tuple that contains each part of the local format separate
|
30 |
#current_Time_In_Seconds = time.mktime(current_Time_Tuple) #converts the tuple into the number of seconds
|
31 |
|
32 |
+
def emotionAnalysis(message, face):
|
33 |
"""
|
34 |
Main function that processes both text and facial emotions
|
35 |
Args:
|
36 |
+
message (str): User input text
|
37 |
Returns:
|
38 |
tuple: (str, plt) Contains the emotion results text and the updated plot
|
39 |
"""
|
|
|
52 |
text_dataDict["Confidence Score"].append(round(text_score, 2))
|
53 |
|
54 |
# Capture and process facial emotion
|
55 |
+
# Load the image using OpenCV (BGR format)
|
56 |
+
img = cv2.imread(face)
|
57 |
+
# Convert the image to RGB (FER uses RGB, OpenCV loads images in BGR format)
|
58 |
+
if img is None:
|
59 |
+
print("Error: Could not load the image. Check the image path.")
|
60 |
+
return
|
61 |
+
|
62 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
63 |
+
|
64 |
+
face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb)
|
65 |
face_timestamp = datetime.now().astimezone().strftime(localFormat)
|
66 |
|
67 |
# Store facial emotion data for plotting
|
|
|
99 |
|
100 |
return plt
|
101 |
|
102 |
+
#with gr.Blocks as demo:
|
103 |
+
#conesnt_radio = gr.Radio(["yes", "no"], label="This app uses your webcam to detect emotions from your face and reads your text inputs to determine emotions from your writing. Do you give consent? ")
|
104 |
+
|
105 |
# Create Gradio interface with consent notice in the description
|
106 |
interface = gr.Interface(
|
107 |
fn=emotionAnalysis,
|
|
|
109 |
gr.Textbox(
|
110 |
label="Enter your text",
|
111 |
placeholder="Type your message here. Type 'quit' to see final results."
|
112 |
+
),
|
113 |
+
gr.Image(label="Webcam Facial Expression", source="webcam")
|
114 |
],
|
115 |
outputs=[
|
116 |
gr.Text(label="Emotion Results"),
|