echung682 commited on
Commit
c2f3fbb
·
verified ·
1 Parent(s): 47a3f65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -23,6 +23,7 @@ tokenizer = AutoTokenizer.from_pretrained("borisn70/bert-43-multilabel-emotion-d
23
  pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)
24
 
25
  face_emotion_detector = FER()
 
26
 
27
  localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
28
  #currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
@@ -34,6 +35,9 @@ def emotionAnalysis(message, face):
34
  Main function that processes both text and facial emotions
35
  Args:
36
  message (str): User input text
 
 
 
37
  Returns:
38
  tuple: (str, plt) Contains the emotion results text and the updated plot
39
  """
@@ -52,14 +56,7 @@ def emotionAnalysis(message, face):
52
  text_dataDict["Confidence Score"].append(round(text_score, 2))
53
 
54
  # Capture and process facial emotion
55
- # Load the image using OpenCV (BGR format)
56
- img = cv2.imread(face)
57
- # Convert the image to RGB (FER uses RGB, OpenCV loads images in BGR format)
58
- if img is None:
59
- print("Error: Could not load the image. Check the image path.")
60
- return
61
-
62
- img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
63
 
64
  face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb)
65
  face_timestamp = datetime.now().astimezone().strftime(localFormat)
@@ -110,7 +107,7 @@ interface = gr.Interface(
110
  label="Enter your text",
111
  placeholder="Type your message here. Type 'quit' to see final results."
112
  ),
113
- gr.Image(label="Webcam Facial Expression", source="webcam")
114
  ],
115
  outputs=[
116
  gr.Text(label="Emotion Results"),
 
23
  pipe = pipeline(task="text-classification", model=emotionDetectModel, tokenizer=tokenizer)
24
 
25
  face_emotion_detector = FER()
26
+ cap = cv2.VideoCapture(0)
27
 
28
  localFormat = "%Y-%m-%d %H:%M:%S" #this is how will print the timestamp: year-month-day hour-minutes-seconds (army time)
29
  #currTime = datetime.now().astimezone().strftime(localFormat) this returns the time in the localFormat
 
35
  Main function that processes both text and facial emotions
36
  Args:
37
  message (str): User input text
38
+ face: Image input from Gradio interface, can be either:
39
+ - numpy.ndarray: Direct webcam capture (RGB or BGR format)
40
+ - str: File path to uploaded image
41
  Returns:
42
  tuple: (str, plt) Contains the emotion results text and the updated plot
43
  """
 
56
  text_dataDict["Confidence Score"].append(round(text_score, 2))
57
 
58
  # Capture and process facial emotion
59
+ img_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
 
 
 
 
 
 
 
60
 
61
  face_emotion, face_score = face_emotion_detector.top_emotion(img_rgb)
62
  face_timestamp = datetime.now().astimezone().strftime(localFormat)
 
107
  label="Enter your text",
108
  placeholder="Type your message here. Type 'quit' to see final results."
109
  ),
110
+ gr.Image(label="Webcam Facial Expression", sources=['webcam'])
111
  ],
112
  outputs=[
113
  gr.Text(label="Emotion Results"),