Npps commited on
Commit
074d8ee
·
verified ·
1 Parent(s): bb86ffa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -110
app.py CHANGED
@@ -1,110 +1,48 @@
1
- # Import necessary libraries
2
- import os
3
- import cv2
4
- import numpy as np
5
- import gradio as gr
6
- from keras.models import load_model
7
-
8
- # Set TensorFlow environment variable
9
- os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
10
-
11
- # Load the pre-trained emotion classification model
12
- classifier = load_model(r'Final_Resnet50_Best_model.keras')
13
-
14
- # Initialize the face classifier with the Haar Cascade model for face detection
15
- face_classifier = cv2.CascadeClassifier(r'haarcascade_frontalface_default.xml')
16
-
17
- # Define the list of emotion labels
18
- emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
19
-
20
- # Emotion detection function
21
- def detect_emotion(frame):
22
- # Convert the frame to grayscale for the face detection
23
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
24
-
25
- # Detect faces in the grayscale frame
26
- faces = face_classifier.detectMultiScale(gray)
27
-
28
- # Process each face detected
29
- for (x, y, w, h) in faces:
30
- # Draw a rectangle around each detected face
31
- cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
32
-
33
- # Extract the region of interest (ROI) as the face area from the grayscale frame
34
- roi_gray = gray[y:y+h, x:x+w]
35
- roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
36
-
37
- # Proceed if the ROI is not empty
38
- if np.sum([roi_gray]) != 0:
39
- roi = roi_gray.astype('float') / 255.0 # Normalize pixel values
40
- roi = np.expand_dims(roi, axis=0) # Add batch dimension
41
-
42
- # Predict the emotion of the face using the pre-trained model
43
- prediction = classifier.predict(roi)[0]
44
- label = emotion_labels[prediction.argmax()]
45
- label_position = (x, y)
46
-
47
- # Display the predicted emotion label on the frame
48
- cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
49
-
50
- return frame
51
-
52
- # Define the Gradio interface
53
- def process_video(video):
54
- # Convert the video frame to a format Gradio can handle
55
- frame = cv2.cvtColor(video, cv2.COLOR_BGR2RGB)
56
- result_frame = detect_emotion(frame)
57
- return cv2.cvtColor(result_frame, cv2.COLOR_BGR2RGB)
58
-
59
- # Create Gradio Interface
60
- gr.Interface(fn=process_video, inputs=gr.Video(sources=["webcam"]), outputs="video", live=True).launch()
61
-
62
-
63
- # import numpy as np
64
- # from tensorflow.keras.preprocessing.image import img_to_array, load_img
65
- # import gradio as gr
66
- # import gradio as gr
67
- # import tensorflow as tf
68
- # import numpy as np
69
- # from PIL import Image
70
- # import cv2
71
- # from tensorflow.keras.preprocessing import image
72
-
73
- # model = tf.keras.models.load_model('Final_Resnet50_Best_model.keras')
74
-
75
- # # Emotion labels dictionary
76
- # emotion_labels = {'angry': 0, 'disgust': 1, 'fear': 2, 'happy': 3, 'neutral': 4, 'sad': 5, 'surprise': 6}
77
- # index_to_emotion = {v: k for k, v in emotion_labels.items()}
78
-
79
- # def prepare_image(img_pil):
80
- # """Preprocess the PIL image to fit your model's input requirements."""
81
- # # Convert the PIL image to a numpy array with the target size
82
- # img = img_pil.resize((224, 224))
83
- # img_array = img_to_array(img)
84
- # img_array = np.expand_dims(img_array, axis=0) # Convert single image to a batch.
85
- # img_array /= 255.0 # Rescale pixel values to [0,1], as done during training
86
- # return img_array
87
-
88
-
89
-
90
- # # Define the Gradio interface
91
- # def predict_emotion(image):
92
- # # Preprocess the image
93
- # processed_image = prepare_image(image)
94
- # # Make prediction using the model
95
- # prediction = model.predict(processed_image)
96
- # # Get the emotion label with the highest probability
97
- # predicted_class = np.argmax(prediction, axis=1)
98
- # predicted_emotion = index_to_emotion.get(predicted_class[0], "Unknown Emotion")
99
- # return predicted_emotion
100
-
101
- # interface = gr.Interface(
102
- # fn=predict_emotion, # Your prediction function
103
- # inputs=gr.Image(type="pil"), # Input for uploading an image, directly compatible with PIL images
104
- # outputs="text", # Output as text displaying the predicted emotion
105
- # title="Emotion Detection",
106
- # description="Upload an image and see the predicted emotion."
107
- # )
108
-
109
- # # Launch the Gradio interface
110
- # interface.launch()
 
1
+ import numpy as np
2
+ from tensorflow.keras.preprocessing.image import img_to_array, load_img
3
+ import gradio as gr
4
+ import gradio as gr
5
+ import tensorflow as tf
6
+ import numpy as np
7
+ from PIL import Image
8
+ import cv2
9
+ from tensorflow.keras.preprocessing import image
10
+
11
+ model = tf.keras.models.load_model('Final_Resnet50_Best_model.keras')
12
+
13
+ # Emotion labels dictionary
14
+ emotion_labels = {'angry': 0, 'disgust': 1, 'fear': 2, 'happy': 3, 'neutral': 4, 'sad': 5, 'surprise': 6}
15
+ index_to_emotion = {v: k for k, v in emotion_labels.items()}
16
+
17
+ def prepare_image(img_pil):
18
+ """Preprocess the PIL image to fit your model's input requirements."""
19
+ # Convert the PIL image to a numpy array with the target size
20
+ img = img_pil.resize((224, 224))
21
+ img_array = img_to_array(img)
22
+ img_array = np.expand_dims(img_array, axis=0) # Convert single image to a batch.
23
+ img_array /= 255.0 # Rescale pixel values to [0,1], as done during training
24
+ return img_array
25
+
26
+
27
+
28
+ # Define the Gradio interface
29
+ def predict_emotion(image):
30
+ # Preprocess the image
31
+ processed_image = prepare_image(image)
32
+ # Make prediction using the model
33
+ prediction = model.predict(processed_image)
34
+ # Get the emotion label with the highest probability
35
+ predicted_class = np.argmax(prediction, axis=1)
36
+ predicted_emotion = index_to_emotion.get(predicted_class[0], "Unknown Emotion")
37
+ return predicted_emotion
38
+
39
+ interface = gr.Interface(
40
+ fn=predict_emotion, # Your prediction function
41
+ inputs=gr.Image(type="pil"), # Input for uploading an image, directly compatible with PIL images
42
+ outputs="text", # Output as text displaying the predicted emotion
43
+ title="Emotion Detection",
44
+ description="Upload an image and see the predicted emotion."
45
+ )
46
+
47
+ # Launch the Gradio interface
48
+ interface.launch()