File size: 8,454 Bytes
73150c9 01dcc89 73150c9 01dcc89 73150c9 01dcc89 3e22f18 73150c9 3e22f18 01dcc89 73150c9 01dcc89 73150c9 01dcc89 73150c9 01dcc89 c2f3fbb 73150c9 ccab522 73150c9 3e22f18 73150c9 dd19745 4b9f81c 7e6b3cc 1bdf26c 0ee729f 7e6b3cc 4b9f81c 06fbfcf 7e6b3cc 4b9f81c 06fbfcf dd19745 7e6b3cc 4b9f81c 7e6b3cc dd19745 3e22f18 5801426 3e22f18 73150c9 3e22f18 73150c9 dd19745 3e22f18 dd19745 e8f8e70 dd19745 2f5e6eb eb18c01 2f5e6eb 2ff1a2a 2f5e6eb 2ff1a2a 2f5e6eb 2ff1a2a 2f5e6eb 2ff1a2a 2f5e6eb 2ff1a2a 2f5e6eb 3e22f18 dd19745 3e22f18 a281269 dd19745 b651257 dd19745 3e22f18 dd19745 95136f8 3e22f18 dd19745 73150c9 67069a4 bba4822 dd19745 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
from transformers import (
AutoModelForSequenceClassification, # For text emotion detection model
AutoTokenizer,
pipeline, # For creating inference pipeline
)
from datetime import datetime
import matplotlib.pyplot as plt
import gradio as gr
from fer import FER
import cv2
import os
# Dictionaries to store emotion data over time
text_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
face_dataDict = {"Time": [], "Emotion": [], "Confidence Score": []}
# List of temporary files to clean up
temp_files = []
# Load model and tokenizer directly from HuggingFace
emotionDetectModel = AutoModelForSequenceClassification.from_pretrained("borisn70/bert-43-multilabel-emotion-detection")
tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-detection") # Load tokenizer directly from model
pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)
face_emotion_detector = FER()
localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
#currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
#current_Time_Tuple = time.strptime(str(currTime), str(localFormat)) #creates a tuple that contains each part of the local format separate
#current_Time_In_Seconds = time.mktime(current_Time_Tuple) #converts the tuple into the number of seconds
def emotionAnalysis(message, face):
"""
Main function that processes both text and facial emotions
Args:
message (str): User input text
face: Image input from Gradio interface, can be either:
- numpy.ndarray: Direct webcam capture (RGB or BGR format)
- str: File path to uploaded image
Returns:
tuple: (str, plt, Gradio Download Button Object) Contains the emotion results text and the updated plot
"""
if (message.lower() == "finish"):
graph = displayResults()
filename = "Emotion_Timeline.png"
graph.savefig(filename)
temp_files.append(filename)
download = gr.DownloadButton(label="Download Emotion Timeline", value=filename, visible=True)
return "Quitting...", graph, download
# Process text emotion
result = pipe(message)
text_emotion = result[0]["label"]
text_score = result[0]["score"]
words_timestamp = datetime.now().astimezone().strftime(localFormat)
# Store text emotion data for plotting
text_dataDict["Time"].append(words_timestamp)
text_dataDict["Emotion"].append(text_emotion)
text_dataDict["Confidence Score"].append(round(text_score, 2))
face_timestamp = datetime.now().astimezone().strftime(localFormat)
# Initialize with default values
face_emotion = "No image" # Default value
face_score = 0.0
if face is not None:
try:
img_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
result = face_emotion_detector.top_emotion(img_rgb)
print(result)
if result[0] is not None: # Only update if we got a valid result
face_emotion, face_score = result
else:
face_emotion = "No face detected"
face_score = 0.0
except Exception as e:
face_emotion = f"Error processing image: {str(e)}"
face_score = 0.0
# Store facial emotion data for plotting
face_dataDict["Time"].append(face_timestamp)
face_dataDict["Emotion"].append(face_emotion) # Now face_emotion will always be a string
face_dataDict["Confidence Score"].append(face_score)
data = displayResults()
file_name = "unfinishedPLT.png"
data.savefig(file_name)
temp_files.append(file_name)
dL = gr.DownloadButton(label="Download Emotion Timeline", value=file_name, visible=False)
# Return both the text result and the updated plot
return f"Text: {text_emotion} | Face: {face_emotion}", data, dL
def displayResults():
"""
Creates and returns a matplotlib plot showing emotion trends over time
Returns:
matplotlib.pyplot: Plot object showing emotion analysis results
"""
# Create a new figure with specified size
plt.figure(figsize=(10, 6))
# Set up plot labels and title
plt.title("Emotions Detected Through Facial Expressions and Text Over Time")
plt.xlabel("Time")
plt.ylabel("Emotions")
#plot facial emotions versus time where time is on the x-axis
plt.plot(face_dataDict["Time"], face_dataDict["Emotion"], marker='o', linestyle='-', label="Facial Emotions")
#plot facial emotions versus time where time is on the x-axis
plt.plot(text_dataDict["Time"], text_dataDict["Emotion"], marker='o', linestyle='-', color='red', label="Text Emotions")
#showing the graph and the legend
plt.legend()
plt.xticks(rotation=45) # Rotate timestamps for better readability
plt.tight_layout() # Adjust layout to prevent label cutoff
return plt
def process_webcam(img):
"""
Process webcam frame and draw emotion detection results
"""
if img is None:
return None
try:
return img
except Exception as e:
print(f"Error processing image: {str(e)}")
return img
def cleanUp_Files():
"""
Removes temporary plot files created during the application's runtime
"""
for file in temp_files:
try:
if os.path.exists(file):
os.remove(file)
print(f"Cleaned up {file}")
except Exception as e:
print(f"Error cleaning up {file}: {str(e)}")
'''
2 rows, 2 columns
column 1: inputs
row 1, col 1 = user text input
row 2, col 1 = camera live feed
column 2: outputs
row 1, col 2 = emotion results
row 2, col 2 = plt graph
'''
with gr.Blocks(title="Emotion Reader", theme=gr.themes.Ocean()) as emotion_reader:
gr.Markdown(
"""
# Emotion Analysis from Text and Face
⚠️ This application will use your webcam to detect facial emotions. By using this app, you consent to webcam access. Type text and press Enter to analyze both text and facial emotions.
Steps to use the app:
1. Turn on the camera clicking where it says "Click to Access Webcam" and Allow access
2. Click "Record" (the dropdown arrow is for if you want to change your camera)
3. Type a sentence into the text input box
4. Press "Enter" to see your results
5. In the "Emotion Results" box, you will see something like "Text: (emotion) | Face: (emotion) " and the timeline will update
6. You can press "Stop" to turn off the camera or type "finish" as your message to be able to download your results
"""
)
with gr.Row():
with gr.Column(): #user text input
text_input = gr.Textbox(
label="Type your thoughts here. Type 'finish' to see final results.",
placeholder="Enter text"
)
examples = gr.Examples(
examples=[
"I am feeling happy today!",
"I am feeling sad today.",
"I wish I could go on vacation."
],
inputs=text_input
)
with gr.Column(): #emotion results
emotion_result = gr.Textbox(label="Emotion Results")
download_button = gr.DownloadButton(label="Download Emotion Timeline", visible=False)
with gr.Row():
with gr.Column(): #camera live feed
input_img = gr.Image(label="Webcam Feed", sources="webcam")
with gr.Column(): #plt graph
output_img = gr.Image(label="Emotion Detection", visible=False)
plot_output = gr.Plot(value=displayResults(), label="Emotion Timeline")
# Stream webcam with emotion detection
input_img.stream(
process_webcam,
inputs=input_img,
outputs=output_img,
time_limit=15,
stream_every=0.1,
concurrency_limit=30
)
# Process text input
text_input.submit(
emotionAnalysis,
inputs=[text_input, output_img],
outputs=[emotion_result, plot_output, download_button]
)
#cleanup files
download_button.click(
cleanUp_Files
)
# Launch the interface
if __name__ == "__main__":
emotion_reader.launch() |