ev6 commited on
Commit
279cc21
·
verified ·
1 Parent(s): b8890bb

Update app/apps/emotion_recognition/emotion_recognition.py

Browse files
app/apps/emotion_recognition/emotion_recognition.py CHANGED
@@ -1,203 +1,203 @@
1
-
2
- from tensorflow import keras
3
- from keras.models import Sequential
4
- from keras.models import load_model
5
- from keras.models import model_from_json
6
- from keras.utils import img_to_array
7
- import keras.utils as image
8
-
9
- import cv2
10
- import numpy as np
11
- import os
12
-
13
- from django_app.settings import BASE_DIR
14
-
15
-
16
- model = Sequential()
17
-
18
- model = model_from_json(open(
19
- os.path.join(BASE_DIR,'model/model_4layer_2_2_pool.json'), "r").read())
20
-
21
- model.load_weights(os.path.join(
22
- BASE_DIR,'model/model_4layer_2_2_pool.h5'))
23
-
24
- class_labels = {0: 'Angry', 1: 'Disgust', 2: 'Fear',
25
- 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
26
- classes = list(class_labels.values())
27
-
28
- face_classifier = cv2.CascadeClassifier(os.path.join(
29
- BASE_DIR,'model/haarcascade_frontalface.xml'))
30
-
31
- camera = cv2.VideoCapture(0)
32
-
33
-
34
- def text_on_detected_boxes(text, text_x, text_y, image, font_scale=1,
35
- font=cv2.FONT_HERSHEY_SIMPLEX,
36
- FONT_COLOR=(0, 0, 0),
37
- FONT_THICKNESS=2,
38
- rectangle_bgr=(0, 255, 0)):
39
- (text_width, text_height) = cv2.getTextSize(
40
- text, font, fontScale=font_scale, thickness=2)[0]
41
- box_coords = ((text_x-10, text_y+4), (text_x +
42
- text_width+10, text_y - text_height-5))
43
- cv2.rectangle(image, box_coords[0],
44
- box_coords[1], rectangle_bgr, cv2.FILLED)
45
- cv2.putText(image, text, (text_x, text_y), font,
46
- fontScale=font_scale, color=FONT_COLOR, thickness=FONT_THICKNESS)
47
-
48
-
49
- def face_detector_image(img):
50
- """
51
- Обнаружение лиц на изображении.
52
-
53
- Args:
54
- img (numpy array): Исходное изображение.
55
-
56
- Returns:
57
- tuple: (rects, allfaces, img) - координаты лиц, обрезанные лица и изображение с рамками.
58
- """
59
- gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)
60
- faces = face_classifier.detectMultiScale(gray, 1.3, 5)
61
- if faces == ():
62
- return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
63
- allfaces = []
64
- rects = []
65
- for (x, y, w, h) in faces:
66
- cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
67
- roi_gray = gray[y:y + h, x:x + w]
68
- roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
69
- allfaces.append(roi_gray)
70
- rects.append((x, w, y, h))
71
- return rects, allfaces, img
72
-
73
-
74
- def emotionImage(imgPath):
75
- img = cv2.imread(BASE_DIR + '\\media\\' + imgPath)
76
- rects, faces, image = face_detector_image(img)
77
- i = 0
78
- for face in faces:
79
- roi = face.astype("float") / 255.0
80
- roi = img_to_array(roi)
81
- roi = np.expand_dims(roi, axis=0)
82
- preds = model.predict(roi)[0]
83
- label = class_labels[preds.argmax()]
84
- label_position = (
85
- rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
86
- i += 1
87
-
88
- # Отрисовка текста и рамок
89
- text_on_detected_boxes(
90
- label, label_position[0], label_position[1], image)
91
-
92
- precentages = dict(zip(classes, preds*100))
93
-
94
- return image, precentages, label
95
-
96
-
97
- def emotionImageFromArray(img_array):
98
- """
99
- Обрабатывает изображение и возвращает результат обработки.
100
-
101
- Args:
102
- img_array (numpy array): Исходное изображение (numpy array).
103
-
104
- Returns:
105
- tuple: (image, precentages, label)
106
- - image: Изображение с рамками и текстом эмоций.
107
- - precentages: Вероятности каждой эмоции.
108
- - label: Определенная эмоция.
109
- """
110
- rects, faces, image = face_detector_image(img_array)
111
- i = 0
112
- for face in faces:
113
- roi = face.astype("float") / 255.0
114
- roi = img_to_array(roi)
115
- roi = np.expand_dims(roi, axis=0)
116
- preds = model.predict(roi)[0]
117
- label = class_labels[preds.argmax()]
118
- label_position = (
119
- rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
120
- i += 1
121
-
122
- # Отрисовка текста и рамок
123
- text_on_detected_boxes(
124
- label, label_position[0], label_position[1], image)
125
-
126
- precentages = dict(zip(classes, preds*100))
127
-
128
- return image, precentages, label
129
-
130
- def face_detector_video(img):
131
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
132
- faces = face_classifier.detectMultiScale(gray, 1.3, 5)
133
- if faces is ():
134
- return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
135
- for (x, y, w, h) in faces:
136
- cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
137
- roi_gray = gray[y:y + h, x:x + w]
138
- roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
139
- return (x, w, y, h), roi_gray, img
140
-
141
-
142
- def emotionVideo():
143
- while True:
144
- ret, frame = camera.read()
145
- rect, face, image = face_detector_video(frame)
146
- if np.sum([face]) != 0.0:
147
- roi = face.astype("float") / 255.0
148
- roi = img_to_array(roi)
149
- roi = np.expand_dims(roi, axis=0)
150
- preds = model.predict(roi)[0]
151
- label = class_labels[preds.argmax()]
152
- label_position = (rect[0] + rect[1]//50, rect[2] + rect[3]//50)
153
- text_on_detected_boxes(label, label_position[0], label_position[1], image)
154
- fps = camera.get(cv2.CAP_PROP_FPS)
155
- cv2.putText(image, str(fps),(5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
156
- else:
157
- cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
158
-
159
- ret, buffer = cv2.imencode('.jpg', image)
160
-
161
- frame = buffer.tobytes()
162
- yield (b'--frame\r\n'
163
- b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
164
-
165
-
166
-
167
- def gen_frames():
168
- while True:
169
- success, frame = camera.read()
170
- if not success:
171
- cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
172
- break
173
- else:
174
- gray_img= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
175
-
176
- faces_detected = face_classifier.detectMultiScale(gray_img, 1.32, 5)
177
-
178
-
179
- for (x,y,w,h) in faces_detected:
180
-
181
- cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),thickness=7)
182
- roi_gray=gray_img[y:y+w,x:x+h]
183
- roi_gray=cv2.resize(roi_gray,(48,48))
184
- img_pixels = image.img_to_array(roi_gray)
185
- img_pixels = np.expand_dims(img_pixels, axis = 0)
186
- img_pixels /= 255
187
-
188
- predictions = model.predict(img_pixels)
189
-
190
- max_index = np.argmax(predictions[0])
191
-
192
- emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
193
- predicted_emotion = emotions[max_index]
194
-
195
- cv2.putText(frame, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
196
-
197
- resized_img = cv2.resize(frame, (600, 400))
198
-
199
- ret, buffer = cv2.imencode('.jpg', frame)
200
-
201
- frame = buffer.tobytes()
202
- yield (b'--frame\r\n'
203
- b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
 
1
+
2
+ from tensorflow import keras
3
+ from keras.models import Sequential
4
+ from keras.models import load_model
5
+ from keras.models import model_from_json
6
+ from keras.utils import img_to_array
7
+ import keras.utils as image
8
+
9
+ import cv2
10
+ import numpy as np
11
+ import os
12
+
13
+ from django_app.settings import BASE_DIR
14
+
15
+
16
+ model = Sequential()
17
+
18
+ model = model_from_json(open(
19
+ os.path.join(BASE_DIR,'model/model_4layer_2_2_pool.json'), "r").read())
20
+
21
+ model.load_weights(os.path.join(
22
+ BASE_DIR,'model/model_4layer_2_2_pool.h5'))
23
+
24
+ class_labels = {0: 'Angry', 1: 'Disgust', 2: 'Fear',
25
+ 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
26
+ classes = list(class_labels.values())
27
+
28
+ face_classifier = cv2.CascadeClassifier(os.path.join(
29
+ BASE_DIR,'model/haarcascade_frontalface.xml'))
30
+
31
+ camera = cv2.VideoCapture(0)
32
+
33
+
34
+ def text_on_detected_boxes(text, text_x, text_y, image, font_scale=1,
35
+ font=cv2.FONT_HERSHEY_SIMPLEX,
36
+ FONT_COLOR=(0, 0, 0),
37
+ FONT_THICKNESS=2,
38
+ rectangle_bgr=(0, 255, 0)):
39
+ (text_width, text_height) = cv2.getTextSize(
40
+ text, font, fontScale=font_scale, thickness=2)[0]
41
+ box_coords = ((text_x-10, text_y+4), (text_x +
42
+ text_width+10, text_y - text_height-5))
43
+ cv2.rectangle(image, box_coords[0],
44
+ box_coords[1], rectangle_bgr, cv2.FILLED)
45
+ cv2.putText(image, text, (text_x, text_y), font,
46
+ fontScale=font_scale, color=FONT_COLOR, thickness=FONT_THICKNESS)
47
+
48
+
49
+ def face_detector_image(img):
50
+ """
51
+ Обнаружение лиц на изображении.
52
+
53
+ Args:
54
+ img (numpy array): Исходное изображение.
55
+
56
+ Returns:
57
+ tuple: (rects, allfaces, img) - координаты лиц, обрезанные лица и изображение с рамками.
58
+ """
59
+ gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)
60
+ faces = face_classifier.detectMultiScale(gray, 1.3, 5)
61
+ if faces == ():
62
+ return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
63
+ allfaces = []
64
+ rects = []
65
+ for (x, y, w, h) in faces:
66
+ cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
67
+ roi_gray = gray[y:y + h, x:x + w]
68
+ roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
69
+ allfaces.append(roi_gray)
70
+ rects.append((x, w, y, h))
71
+ return rects, allfaces, img
72
+
73
+
74
+ def emotionImage(imgPath):
75
+ img = cv2.imread(BASE_DIR + '\\media\\' + imgPath)
76
+ rects, faces, image = face_detector_image(img)
77
+ i = 0
78
+ for face in faces:
79
+ roi = face.astype("float") / 255.0
80
+ roi = img_to_array(roi)
81
+ roi = np.expand_dims(roi, axis=0)
82
+ preds = model.predict(roi)[0]
83
+ label = class_labels[preds.argmax()]
84
+ label_position = (
85
+ rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
86
+ i += 1
87
+
88
+ # Отрисовка текста и рамок
89
+ text_on_detected_boxes(
90
+ label, label_position[0], label_position[1], image)
91
+
92
+ precentages = dict(zip(classes, preds*100))
93
+
94
+ return image, precentages, label
95
+
96
+
97
+ def emotionImageFromArray(img_array):
98
+ """
99
+ Обрабатывает изображение и возвращает результат обработки.
100
+
101
+ Args:
102
+ img_array (numpy array): Исходное изображение (numpy array).
103
+
104
+ Returns:
105
+ tuple: (image, precentages, label)
106
+ - image: Изображение с рамками и текстом эмоций.
107
+ - precentages: Вероятности каждой эмоции.
108
+ - label: Определенная эмоция.
109
+ """
110
+ rects, faces, image = face_detector_image(img_array)
111
+ i = 0
112
+ for face in faces:
113
+ roi = face.astype("float") / 255.0
114
+ roi = img_to_array(roi)
115
+ roi = np.expand_dims(roi, axis=0)
116
+ preds = model.predict(roi)[0]
117
+ label = class_labels[preds.argmax()]
118
+ label_position = (
119
+ rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
120
+ i += 1
121
+
122
+ # Отрисовка текста и рамок
123
+ text_on_detected_boxes(
124
+ label, label_position[0], label_position[1], image)
125
+
126
+ precentages = dict(zip(classes, preds*100))
127
+
128
+ return image, precentages, label
129
+
130
+ def face_detector_video(img):
131
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
132
+ faces = face_classifier.detectMultiScale(gray, 1.3, 5)
133
+ if faces is ():
134
+ return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
135
+ for (x, y, w, h) in faces:
136
+ cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
137
+ roi_gray = gray[y:y + h, x:x + w]
138
+ roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
139
+ return (x, w, y, h), roi_gray, img
140
+
141
+
142
+ # def emotionVideo():
143
+ # while True:
144
+ # ret, frame = camera.read()
145
+ # rect, face, image = face_detector_video(frame)
146
+ # if np.sum([face]) != 0.0:
147
+ # roi = face.astype("float") / 255.0
148
+ # roi = img_to_array(roi)
149
+ # roi = np.expand_dims(roi, axis=0)
150
+ # preds = model.predict(roi)[0]
151
+ # label = class_labels[preds.argmax()]
152
+ # label_position = (rect[0] + rect[1]//50, rect[2] + rect[3]//50)
153
+ # text_on_detected_boxes(label, label_position[0], label_position[1], image)
154
+ # fps = camera.get(cv2.CAP_PROP_FPS)
155
+ # cv2.putText(image, str(fps),(5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
156
+ # else:
157
+ # cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
158
+
159
+ # ret, buffer = cv2.imencode('.jpg', image)
160
+
161
+ # frame = buffer.tobytes()
162
+ # yield (b'--frame\r\n'
163
+ # b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
164
+
165
+
166
+
167
+ # def gen_frames():
168
+ # while True:
169
+ # success, frame = camera.read()
170
+ # if not success:
171
+ # cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
172
+ # break
173
+ # else:
174
+ # gray_img= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
175
+
176
+ # faces_detected = face_classifier.detectMultiScale(gray_img, 1.32, 5)
177
+
178
+
179
+ # for (x,y,w,h) in faces_detected:
180
+
181
+ # cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),thickness=7)
182
+ # roi_gray=gray_img[y:y+w,x:x+h]
183
+ # roi_gray=cv2.resize(roi_gray,(48,48))
184
+ # img_pixels = image.img_to_array(roi_gray)
185
+ # img_pixels = np.expand_dims(img_pixels, axis = 0)
186
+ # img_pixels /= 255
187
+
188
+ # predictions = model.predict(img_pixels)
189
+
190
+ # max_index = np.argmax(predictions[0])
191
+
192
+ # emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
193
+ # predicted_emotion = emotions[max_index]
194
+
195
+ # cv2.putText(frame, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
196
+
197
+ # resized_img = cv2.resize(frame, (600, 400))
198
+
199
+ # ret, buffer = cv2.imencode('.jpg', frame)
200
+
201
+ # frame = buffer.tobytes()
202
+ # yield (b'--frame\r\n'
203
+ # b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')