Spaces:
Sleeping
Sleeping
Upload 113 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- app/.gitignore +18 -0
- app/Dockerfile +24 -0
- app/apps/emotion_recognition/__init__.py +0 -0
- app/apps/emotion_recognition/__pycache__/__init__.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/__init__.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/admin.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/admin.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/apps.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/apps.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/emotion_recognition.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/emotion_recognition.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/forms.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/forms.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/models.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/models.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/serializers.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/tasks.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/tasks.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/urls.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/urls.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/views.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/__pycache__/views.cpython-39.pyc +0 -0
- app/apps/emotion_recognition/admin.py +6 -0
- app/apps/emotion_recognition/apps.py +6 -0
- app/apps/emotion_recognition/emotion_recognition.py +203 -0
- app/apps/emotion_recognition/forms.py +9 -0
- app/apps/emotion_recognition/migrations/0001_initial.py +29 -0
- app/apps/emotion_recognition/migrations/__init__.py +0 -0
- app/apps/emotion_recognition/migrations/__pycache__/0001_initial.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/migrations/__pycache__/__init__.cpython-310.pyc +0 -0
- app/apps/emotion_recognition/models.py +34 -0
- app/apps/emotion_recognition/serializers.py +4 -0
- app/apps/emotion_recognition/tasks.py +62 -0
- app/apps/emotion_recognition/templates/recognition/index.html +74 -0
- app/apps/emotion_recognition/templates/recognition/real_time.html +33 -0
- app/apps/emotion_recognition/templates/recognition/recognition_delete.html +28 -0
- app/apps/emotion_recognition/templates/recognition/recognition_edit.html +32 -0
- app/apps/emotion_recognition/tests.py +3 -0
- app/apps/emotion_recognition/urls.py +13 -0
- app/apps/emotion_recognition/views.py +119 -0
- app/db.sqlite3 +0 -0
- app/django_app/__init__.py +0 -0
- app/django_app/__pycache__/__init__.cpython-310.pyc +0 -0
- app/django_app/__pycache__/__init__.cpython-39.pyc +0 -0
- app/django_app/__pycache__/settings.cpython-310.pyc +0 -0
- app/django_app/__pycache__/settings.cpython-39.pyc +0 -0
- app/django_app/__pycache__/urls.cpython-310.pyc +0 -0
- app/django_app/__pycache__/urls.cpython-39.pyc +0 -0
- app/django_app/__pycache__/wsgi.cpython-310.pyc +0 -0
- app/django_app/__pycache__/wsgi.cpython-39.pyc +0 -0
app/.gitignore
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Игнорировать папку media
|
2 |
+
media/
|
3 |
+
|
4 |
+
# Игнорировать файл базы данных SQLite
|
5 |
+
db.sqlite3
|
6 |
+
|
7 |
+
# Другие стандартные игнорируемые файлы для Django
|
8 |
+
*.pyc
|
9 |
+
__pycache__/
|
10 |
+
*.sqlite3
|
11 |
+
*.log
|
12 |
+
.env
|
13 |
+
.venv/
|
14 |
+
venv/
|
15 |
+
env/
|
16 |
+
.DS_Store
|
17 |
+
.idea/
|
18 |
+
.vscode/
|
app/Dockerfile
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# syntax=docker/dockerfile:1.4
|
2 |
+
|
3 |
+
FROM --platform=$BUILDPLATFORM python:3.10-alpine AS builder
|
4 |
+
EXPOSE 8000
|
5 |
+
WORKDIR /app
|
6 |
+
COPY requirements.txt /app
|
7 |
+
RUN pip3 install -r requirements.txt --no-cache-dir
|
8 |
+
COPY . /app
|
9 |
+
ENTRYPOINT ["python3"]
|
10 |
+
CMD ["manage.py", "runserver", "0.0.0.0:8000"]
|
11 |
+
|
12 |
+
FROM builder as dev-envs
|
13 |
+
RUN <<EOF
|
14 |
+
apk update
|
15 |
+
apk add git
|
16 |
+
EOF
|
17 |
+
|
18 |
+
RUN <<EOF
|
19 |
+
addgroup -S docker
|
20 |
+
adduser -S --shell /bin/bash --ingroup docker vscode
|
21 |
+
EOF
|
22 |
+
# install Docker tools (cli, buildx, compose)
|
23 |
+
COPY --from=gloursdocker/docker / /
|
24 |
+
CMD ["manage.py", "runserver", "0.0.0.0:8000"]
|
app/apps/emotion_recognition/__init__.py
ADDED
File without changes
|
app/apps/emotion_recognition/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (183 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (161 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/admin.cpython-310.pyc
ADDED
Binary file (299 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/admin.cpython-39.pyc
ADDED
Binary file (335 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/apps.cpython-310.pyc
ADDED
Binary file (492 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/apps.cpython-39.pyc
ADDED
Binary file (468 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/emotion_recognition.cpython-310.pyc
ADDED
Binary file (6.22 kB). View file
|
|
app/apps/emotion_recognition/__pycache__/emotion_recognition.cpython-39.pyc
ADDED
Binary file (4.96 kB). View file
|
|
app/apps/emotion_recognition/__pycache__/forms.cpython-310.pyc
ADDED
Binary file (645 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/forms.cpython-39.pyc
ADDED
Binary file (619 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/models.cpython-310.pyc
ADDED
Binary file (1.57 kB). View file
|
|
app/apps/emotion_recognition/__pycache__/models.cpython-39.pyc
ADDED
Binary file (2.35 kB). View file
|
|
app/apps/emotion_recognition/__pycache__/serializers.cpython-310.pyc
ADDED
Binary file (454 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/tasks.cpython-310.pyc
ADDED
Binary file (2.38 kB). View file
|
|
app/apps/emotion_recognition/__pycache__/tasks.cpython-39.pyc
ADDED
Binary file (1.1 kB). View file
|
|
app/apps/emotion_recognition/__pycache__/urls.cpython-310.pyc
ADDED
Binary file (759 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/urls.cpython-39.pyc
ADDED
Binary file (876 Bytes). View file
|
|
app/apps/emotion_recognition/__pycache__/views.cpython-310.pyc
ADDED
Binary file (4.8 kB). View file
|
|
app/apps/emotion_recognition/__pycache__/views.cpython-39.pyc
ADDED
Binary file (5.23 kB). View file
|
|
app/apps/emotion_recognition/admin.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.contrib import admin
|
2 |
+
from .models import UserImageRecognition
|
3 |
+
# Register your models here.
|
4 |
+
|
5 |
+
admin.site.register(UserImageRecognition)
|
6 |
+
|
app/apps/emotion_recognition/apps.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.apps import AppConfig
|
2 |
+
|
3 |
+
|
4 |
+
class EmotionRecognitionConfig(AppConfig):
|
5 |
+
default_auto_field = 'django.db.models.BigAutoField'
|
6 |
+
name = 'apps.emotion_recognition'
|
app/apps/emotion_recognition/emotion_recognition.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from tensorflow import keras
|
3 |
+
from keras.models import Sequential
|
4 |
+
from keras.models import load_model
|
5 |
+
from keras.models import model_from_json
|
6 |
+
from keras.utils import img_to_array
|
7 |
+
import keras.utils as image
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
import os
|
12 |
+
|
13 |
+
from django_app.settings import BASE_DIR
|
14 |
+
|
15 |
+
|
16 |
+
model = Sequential()
|
17 |
+
|
18 |
+
model = model_from_json(open(
|
19 |
+
os.path.join(BASE_DIR,'model/model_4layer_2_2_pool.json'), "r").read())
|
20 |
+
|
21 |
+
model.load_weights(os.path.join(
|
22 |
+
BASE_DIR,'model/model_4layer_2_2_pool.h5'))
|
23 |
+
|
24 |
+
class_labels = {0: 'Angry', 1: 'Disgust', 2: 'Fear',
|
25 |
+
3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
|
26 |
+
classes = list(class_labels.values())
|
27 |
+
|
28 |
+
face_classifier = cv2.CascadeClassifier(os.path.join(
|
29 |
+
BASE_DIR,'model/haarcascade_frontalface.xml'))
|
30 |
+
|
31 |
+
camera = cv2.VideoCapture(0)
|
32 |
+
|
33 |
+
|
34 |
+
def text_on_detected_boxes(text, text_x, text_y, image, font_scale=1,
|
35 |
+
font=cv2.FONT_HERSHEY_SIMPLEX,
|
36 |
+
FONT_COLOR=(0, 0, 0),
|
37 |
+
FONT_THICKNESS=2,
|
38 |
+
rectangle_bgr=(0, 255, 0)):
|
39 |
+
(text_width, text_height) = cv2.getTextSize(
|
40 |
+
text, font, fontScale=font_scale, thickness=2)[0]
|
41 |
+
box_coords = ((text_x-10, text_y+4), (text_x +
|
42 |
+
text_width+10, text_y - text_height-5))
|
43 |
+
cv2.rectangle(image, box_coords[0],
|
44 |
+
box_coords[1], rectangle_bgr, cv2.FILLED)
|
45 |
+
cv2.putText(image, text, (text_x, text_y), font,
|
46 |
+
fontScale=font_scale, color=FONT_COLOR, thickness=FONT_THICKNESS)
|
47 |
+
|
48 |
+
|
49 |
+
def face_detector_image(img):
|
50 |
+
"""
|
51 |
+
Обнаружение лиц на изображении.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
img (numpy array): Исходное изображение.
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
tuple: (rects, allfaces, img) - координаты лиц, обрезанные лица и изображение с рамками.
|
58 |
+
"""
|
59 |
+
gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)
|
60 |
+
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
|
61 |
+
if faces == ():
|
62 |
+
return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
|
63 |
+
allfaces = []
|
64 |
+
rects = []
|
65 |
+
for (x, y, w, h) in faces:
|
66 |
+
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
67 |
+
roi_gray = gray[y:y + h, x:x + w]
|
68 |
+
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
|
69 |
+
allfaces.append(roi_gray)
|
70 |
+
rects.append((x, w, y, h))
|
71 |
+
return rects, allfaces, img
|
72 |
+
|
73 |
+
|
74 |
+
def emotionImage(imgPath):
|
75 |
+
img = cv2.imread(BASE_DIR + '\\media\\' + imgPath)
|
76 |
+
rects, faces, image = face_detector_image(img)
|
77 |
+
i = 0
|
78 |
+
for face in faces:
|
79 |
+
roi = face.astype("float") / 255.0
|
80 |
+
roi = img_to_array(roi)
|
81 |
+
roi = np.expand_dims(roi, axis=0)
|
82 |
+
preds = model.predict(roi)[0]
|
83 |
+
label = class_labels[preds.argmax()]
|
84 |
+
label_position = (
|
85 |
+
rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
|
86 |
+
i += 1
|
87 |
+
|
88 |
+
# Отрисовка текста и рамок
|
89 |
+
text_on_detected_boxes(
|
90 |
+
label, label_position[0], label_position[1], image)
|
91 |
+
|
92 |
+
precentages = dict(zip(classes, preds*100))
|
93 |
+
|
94 |
+
return image, precentages, label
|
95 |
+
|
96 |
+
|
97 |
+
def emotionImageFromArray(img_array):
|
98 |
+
"""
|
99 |
+
Обрабатывает изображение и возвращает результат обработки.
|
100 |
+
|
101 |
+
Args:
|
102 |
+
img_array (numpy array): Исходное изображение (numpy array).
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
tuple: (image, precentages, label)
|
106 |
+
- image: Изображение с рамками и текстом эмоций.
|
107 |
+
- precentages: Вероятности каждой эмоции.
|
108 |
+
- label: Определенная эмоция.
|
109 |
+
"""
|
110 |
+
rects, faces, image = face_detector_image(img_array)
|
111 |
+
i = 0
|
112 |
+
for face in faces:
|
113 |
+
roi = face.astype("float") / 255.0
|
114 |
+
roi = img_to_array(roi)
|
115 |
+
roi = np.expand_dims(roi, axis=0)
|
116 |
+
preds = model.predict(roi)[0]
|
117 |
+
label = class_labels[preds.argmax()]
|
118 |
+
label_position = (
|
119 |
+
rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
|
120 |
+
i += 1
|
121 |
+
|
122 |
+
# Отрисовка текста и рамок
|
123 |
+
text_on_detected_boxes(
|
124 |
+
label, label_position[0], label_position[1], image)
|
125 |
+
|
126 |
+
precentages = dict(zip(classes, preds*100))
|
127 |
+
|
128 |
+
return image, precentages, label
|
129 |
+
|
130 |
+
def face_detector_video(img):
|
131 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
132 |
+
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
|
133 |
+
if faces is ():
|
134 |
+
return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
|
135 |
+
for (x, y, w, h) in faces:
|
136 |
+
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
|
137 |
+
roi_gray = gray[y:y + h, x:x + w]
|
138 |
+
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
|
139 |
+
return (x, w, y, h), roi_gray, img
|
140 |
+
|
141 |
+
|
142 |
+
def emotionVideo():
|
143 |
+
while True:
|
144 |
+
ret, frame = camera.read()
|
145 |
+
rect, face, image = face_detector_video(frame)
|
146 |
+
if np.sum([face]) != 0.0:
|
147 |
+
roi = face.astype("float") / 255.0
|
148 |
+
roi = img_to_array(roi)
|
149 |
+
roi = np.expand_dims(roi, axis=0)
|
150 |
+
preds = model.predict(roi)[0]
|
151 |
+
label = class_labels[preds.argmax()]
|
152 |
+
label_position = (rect[0] + rect[1]//50, rect[2] + rect[3]//50)
|
153 |
+
text_on_detected_boxes(label, label_position[0], label_position[1], image)
|
154 |
+
fps = camera.get(cv2.CAP_PROP_FPS)
|
155 |
+
cv2.putText(image, str(fps),(5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
156 |
+
else:
|
157 |
+
cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
|
158 |
+
|
159 |
+
ret, buffer = cv2.imencode('.jpg', image)
|
160 |
+
|
161 |
+
frame = buffer.tobytes()
|
162 |
+
yield (b'--frame\r\n'
|
163 |
+
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
def gen_frames():
|
168 |
+
while True:
|
169 |
+
success, frame = camera.read()
|
170 |
+
if not success:
|
171 |
+
cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
|
172 |
+
break
|
173 |
+
else:
|
174 |
+
gray_img= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
175 |
+
|
176 |
+
faces_detected = face_classifier.detectMultiScale(gray_img, 1.32, 5)
|
177 |
+
|
178 |
+
|
179 |
+
for (x,y,w,h) in faces_detected:
|
180 |
+
|
181 |
+
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),thickness=7)
|
182 |
+
roi_gray=gray_img[y:y+w,x:x+h]
|
183 |
+
roi_gray=cv2.resize(roi_gray,(48,48))
|
184 |
+
img_pixels = image.img_to_array(roi_gray)
|
185 |
+
img_pixels = np.expand_dims(img_pixels, axis = 0)
|
186 |
+
img_pixels /= 255
|
187 |
+
|
188 |
+
predictions = model.predict(img_pixels)
|
189 |
+
|
190 |
+
max_index = np.argmax(predictions[0])
|
191 |
+
|
192 |
+
emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
193 |
+
predicted_emotion = emotions[max_index]
|
194 |
+
|
195 |
+
cv2.putText(frame, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
|
196 |
+
|
197 |
+
resized_img = cv2.resize(frame, (600, 400))
|
198 |
+
|
199 |
+
ret, buffer = cv2.imencode('.jpg', frame)
|
200 |
+
|
201 |
+
frame = buffer.tobytes()
|
202 |
+
yield (b'--frame\r\n'
|
203 |
+
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
app/apps/emotion_recognition/forms.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django import forms
|
2 |
+
|
3 |
+
from .models import UserImageRecognition
|
4 |
+
|
5 |
+
|
6 |
+
class RecognitionEditForm(forms.ModelForm):
|
7 |
+
class Meta:
|
8 |
+
model = UserImageRecognition
|
9 |
+
fields = ('__all__')
|
app/apps/emotion_recognition/migrations/0001_initial.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by Django 4.1.3 on 2025-01-12 20:01
|
2 |
+
|
3 |
+
from django.db import migrations, models
|
4 |
+
|
5 |
+
|
6 |
+
class Migration(migrations.Migration):
|
7 |
+
|
8 |
+
initial = True
|
9 |
+
|
10 |
+
dependencies = [
|
11 |
+
]
|
12 |
+
|
13 |
+
operations = [
|
14 |
+
migrations.CreateModel(
|
15 |
+
name='UserImageRecognition',
|
16 |
+
fields=[
|
17 |
+
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
18 |
+
('uploaded_image', models.ImageField(upload_to='')),
|
19 |
+
('final_image', models.ImageField(blank=True, null=True, upload_to='')),
|
20 |
+
('recognized_emotion', models.CharField(blank=True, max_length=20, null=True)),
|
21 |
+
('predicted_emotions', models.CharField(blank=True, max_length=155, null=True)),
|
22 |
+
('created_at', models.DateTimeField(auto_now_add=True)),
|
23 |
+
('status', models.CharField(choices=[('PEN', 'Pending'), ('COM', 'Complete'), ('ERR', 'Error')], default='PEN', max_length=3)),
|
24 |
+
],
|
25 |
+
options={
|
26 |
+
'ordering': ['-created_at'],
|
27 |
+
},
|
28 |
+
),
|
29 |
+
]
|
app/apps/emotion_recognition/migrations/__init__.py
ADDED
File without changes
|
app/apps/emotion_recognition/migrations/__pycache__/0001_initial.cpython-310.pyc
ADDED
Binary file (1.1 kB). View file
|
|
app/apps/emotion_recognition/migrations/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (183 Bytes). View file
|
|
app/apps/emotion_recognition/models.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.db import models
|
2 |
+
from PIL import Image
|
3 |
+
|
4 |
+
|
5 |
+
# Create your models here.
|
6 |
+
class UserImageRecognition(models.Model):
|
7 |
+
uploaded_image = models.ImageField(null=False, blank=False)
|
8 |
+
final_image = models.ImageField(null=True, blank=True)
|
9 |
+
recognized_emotion = models.CharField(max_length=20,null=True, blank=True)
|
10 |
+
predicted_emotions = models.CharField(max_length=155,null=True,blank=True)
|
11 |
+
created_at = models.DateTimeField(
|
12 |
+
auto_now_add=True, null=False, blank=False)
|
13 |
+
|
14 |
+
STATUS_CHOICES = (
|
15 |
+
('PEN', 'Pending'),
|
16 |
+
('COM', 'Complete'),
|
17 |
+
('ERR', 'Error'),
|
18 |
+
)
|
19 |
+
status = models.CharField(
|
20 |
+
max_length=3, choices=STATUS_CHOICES, null=False, blank=False, default='PEN')
|
21 |
+
|
22 |
+
class Meta:
|
23 |
+
ordering = ['-created_at']
|
24 |
+
|
25 |
+
def save(self, *args, **kwargs):
|
26 |
+
super().save(*args, **kwargs)
|
27 |
+
uploaded_img = Image.open(self.uploaded_image.path)
|
28 |
+
if uploaded_img.height > 400 or uploaded_img.width > 400:
|
29 |
+
output_size = (400, 400)
|
30 |
+
uploaded_img.thumbnail(output_size)
|
31 |
+
uploaded_img.save(self.uploaded_image.path)
|
32 |
+
|
33 |
+
def __str__(self):
|
34 |
+
return f'{self.user} - {self.uploaded_image}'
|
app/apps/emotion_recognition/serializers.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from rest_framework import serializers
|
2 |
+
|
3 |
+
class ImageSerializer(serializers.Serializer):
|
4 |
+
image = serializers.ImageField(required=True)
|
app/apps/emotion_recognition/tasks.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.shortcuts import get_object_or_404
|
2 |
+
from django.core.files.base import ContentFile
|
3 |
+
|
4 |
+
from .models import UserImageRecognition
|
5 |
+
from .emotion_recognition import emotionImage, emotionImageFromArray
|
6 |
+
|
7 |
+
from io import BytesIO
|
8 |
+
import cv2
|
9 |
+
import numpy as np
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
|
13 |
+
def proccess_uploaded_image(image_data_id):
|
14 |
+
image_data = None
|
15 |
+
image_data = get_object_or_404(UserImageRecognition, pk=image_data_id)
|
16 |
+
|
17 |
+
final_image, predicted_emotions, recognized_emotion = emotionImage(
|
18 |
+
image_data.uploaded_image.name)
|
19 |
+
final_image = converter_to_django_file(final_image)
|
20 |
+
|
21 |
+
image_data.final_image = final_image
|
22 |
+
image_data.predicted_emotions = predicted_emotions
|
23 |
+
image_data.recognized_emotion = recognized_emotion
|
24 |
+
image_data.status = "COM"
|
25 |
+
image_data.save()
|
26 |
+
|
27 |
+
def process_image_from_api(image_file):
|
28 |
+
"""
|
29 |
+
Обрабатывает изображение, переданное через API, и возвращает финальное изображение и данные эмоций.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
image_file (InMemoryUploadedFile): Исходное изображение, полученное через API.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
tuple: (final_image, predicted_emotions, recognized_emotion)
|
36 |
+
- final_image: обработанное изображение в формате numpy array.
|
37 |
+
- predicted_emotions: словарь с предсказанными эмоциями и их вероятностями.
|
38 |
+
- recognized_emotion: самая вероятная эмоция.
|
39 |
+
"""
|
40 |
+
# Конвертируем загруженный файл в OpenCV-совместимый формат
|
41 |
+
file_bytes = np.frombuffer(image_file.read(), np.uint8)
|
42 |
+
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
43 |
+
|
44 |
+
# Обрабатываем изображение с помощью emotionImageFromArray
|
45 |
+
final_image, predicted_emotions, recognized_emotion = emotionImageFromArray(image)
|
46 |
+
|
47 |
+
return final_image, predicted_emotions, recognized_emotion
|
48 |
+
|
49 |
+
def converter_to_django_file(image):
|
50 |
+
img_io = BytesIO()
|
51 |
+
image = Image.fromarray(image)
|
52 |
+
image.save(img_io, format='JPEG', quality=100)
|
53 |
+
img_content = ContentFile(img_io.getvalue(), 'final_image.jpg')
|
54 |
+
|
55 |
+
return img_content
|
56 |
+
|
57 |
+
def convert_image_to_bytes(image):
|
58 |
+
# Конвертируем обработанное изображение в байты
|
59 |
+
_, buffer = cv2.imencode('.jpg', image)
|
60 |
+
output_image = BytesIO(buffer)
|
61 |
+
|
62 |
+
return output_image
|
app/apps/emotion_recognition/templates/recognition/index.html
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags %}
|
4 |
+
{% load crispy_forms_tags humanize %}
|
5 |
+
|
6 |
+
{% block content %}
|
7 |
+
|
8 |
+
<div class="container d-flex flex-wrap flex-column justify-center">
|
9 |
+
|
10 |
+
<form action="" method="POST" class='col-4'enctype="multipart/form-data">
|
11 |
+
{% csrf_token %}
|
12 |
+
<label for="formFile" class="form-label">Выберите изображение для определения эмоции</label>
|
13 |
+
<input id='formFile' name="uploaded_file" class="form-control" type="file" accept="image/*" placeholder="Выберите изображение" required multiple>
|
14 |
+
<button type="submit" class="btn btn-primary mt-3">Определить</button>
|
15 |
+
</form>
|
16 |
+
<div class="d-flex flex-column flex-wrap align-content-center">
|
17 |
+
</div>
|
18 |
+
<table class="table table-hover mt-5">
|
19 |
+
<thead>
|
20 |
+
<tr class="table-secondary">
|
21 |
+
<th scope="col">Оригинальное Изображение</th>
|
22 |
+
<th scope="col">Результат</th>
|
23 |
+
<th scope="col">Эмоция</th>
|
24 |
+
<th scope="col">Статуc</th>
|
25 |
+
<th scope="col">Дата</th>
|
26 |
+
<th scope="col"></th>
|
27 |
+
</tr>
|
28 |
+
</thead>
|
29 |
+
<tbody>
|
30 |
+
{% for sample in samples%}
|
31 |
+
<td>
|
32 |
+
<img src="{{sample.uploaded_image.url}}" alt="" style="width: 250px">
|
33 |
+
</td>
|
34 |
+
<td>
|
35 |
+
{% if sample.final_image %}
|
36 |
+
<img src="{{sample.final_image.url}}" alt="" style="width: 250px">
|
37 |
+
{% else %}
|
38 |
+
<p>Произошла ошибка</p>
|
39 |
+
{% endif %}
|
40 |
+
</td>
|
41 |
+
<td>
|
42 |
+
{{sample.recognized_emotion}}
|
43 |
+
</td>
|
44 |
+
<td class="text-center">
|
45 |
+
{% if sample.status == "COM" %}
|
46 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="green" class="bi bi-check-circle" viewBox="0 0 16 16">
|
47 |
+
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
48 |
+
<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>
|
49 |
+
</svg>
|
50 |
+
{%elif sample.status == "PEN"%}
|
51 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="orange" class="bi bi-arrow-clockwise" viewBox="0 0 16 16">
|
52 |
+
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
53 |
+
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
54 |
+
</svg>
|
55 |
+
{%else%}
|
56 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="red" class="bi bi-x-circle" viewBox="0 0 16 16">
|
57 |
+
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
58 |
+
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
|
59 |
+
</svg>
|
60 |
+
{%endif%}
|
61 |
+
</td>
|
62 |
+
<td>
|
63 |
+
{{sample.created_at|naturaltime}}
|
64 |
+
</td>
|
65 |
+
<td>
|
66 |
+
<a href="{% url "recognition:recognition_edit" sample.id %}" class="btn btn-outline-dark">Редактировать</a>
|
67 |
+
</td>
|
68 |
+
</tr>
|
69 |
+
{%endfor%}
|
70 |
+
</tbody>
|
71 |
+
</table>
|
72 |
+
</div>
|
73 |
+
|
74 |
+
{%endblock%}
|
app/apps/emotion_recognition/templates/recognition/real_time.html
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags %}
|
4 |
+
{% load crispy_forms_tags humanize %}
|
5 |
+
|
6 |
+
{% block content %}
|
7 |
+
|
8 |
+
<div class="container d-flex flex-wrap flex-column justify-content-center align-content-center">
|
9 |
+
<!--
|
10 |
+
<form action="" method="POST" class='col-4'enctype="multipart/form-data">
|
11 |
+
{% csrf_token %}
|
12 |
+
<button type="submit" class="btn btn-primary mt-3">Включить камеру</button>
|
13 |
+
</form> -->
|
14 |
+
|
15 |
+
<a class="btn btn-primary w-75 " data-bs-toggle="collapse" href="#collapseCamera" role="button" aria-expanded="false" aria-controls="collapseCamera">
|
16 |
+
Включить/Выключить камеру
|
17 |
+
</a>
|
18 |
+
|
19 |
+
<div class="collapse mt-3" id="collapseCamera">
|
20 |
+
<div class="card card-body">
|
21 |
+
<div class="container">
|
22 |
+
<div class="row">
|
23 |
+
<div class="col-lg-8 offset-lg-2">
|
24 |
+
<img src="{% url 'recognition:real_time_video_stream' %}" width="100%">
|
25 |
+
</div>
|
26 |
+
</div>
|
27 |
+
</div>
|
28 |
+
</div>
|
29 |
+
</div>
|
30 |
+
|
31 |
+
</div>
|
32 |
+
|
33 |
+
{%endblock%}
|
app/apps/emotion_recognition/templates/recognition/recognition_delete.html
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags %}
|
4 |
+
{% load crispy_forms_tags humanize %}
|
5 |
+
|
6 |
+
|
7 |
+
{%block content %}
|
8 |
+
|
9 |
+
<div class="pt-3 ps-3 bg-body-tertiary scrollspy-example overflow-auto min-vh-100" data-bs-spy="scroll" data-bs-smooth-scroll="true">
|
10 |
+
|
11 |
+
<div class="container">
|
12 |
+
<div class="d-flex flex-column align-items-center justify-content-between mt">
|
13 |
+
<h2 class="mb-3">Подтверждение Удаления</h2>
|
14 |
+
<p class="lead">Вы уверены, что хотите удалить запись<strong>"{{ object}}"?</strong>
|
15 |
+
<br>
|
16 |
+
<u class="fw-bold">Восстановить будет невозможно.</u>
|
17 |
+
</p>
|
18 |
+
<form method="post">
|
19 |
+
{% csrf_token %}
|
20 |
+
{{form}}
|
21 |
+
<button type="submit" class="btn btn-danger btn-lg">Подтверждаю</button>
|
22 |
+
<a href="javascript:history.back()" class="btn btn-outline-secondary btn-lg" role="button">Вернуться Назад</a>
|
23 |
+
</form>
|
24 |
+
</div>
|
25 |
+
</div>
|
26 |
+
</div>
|
27 |
+
|
28 |
+
{%endblock%}
|
app/apps/emotion_recognition/templates/recognition/recognition_edit.html
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags humanize %}
|
4 |
+
|
5 |
+
{%block content %}
|
6 |
+
|
7 |
+
<div class="container">
|
8 |
+
<div class="pt-3 ps-3 bg-body-tertiary scrollspy-example overflow-auto min-vh-100" data-bs-spy="scroll" data-bs-smooth-scroll="true">
|
9 |
+
<form method="POST" enctype="multipart/form-data">
|
10 |
+
{% csrf_token %}
|
11 |
+
{{form.media}}
|
12 |
+
<fieldset class="form-group">
|
13 |
+
<legend class="border-bottom mb-4"></legend>
|
14 |
+
{{ form|crispy }}
|
15 |
+
</fieldset>
|
16 |
+
<div class="form-group" style="display: flex; flex-direction: column; align-items: center;">
|
17 |
+
<button class="w-75 btn btn-lg btn-dark" type="submit">Применить Изменения</button>
|
18 |
+
</div>
|
19 |
+
|
20 |
+
<div class="form-group" style="display: flex; flex-direction: column; align-items: center;">
|
21 |
+
<a href="{% url 'recognition:recognition_delete' userimagerecognition.id%}" class="mt-3 w-75 btn btn-lg btn-danger " role="button">
|
22 |
+
Удалить Запись
|
23 |
+
</a>
|
24 |
+
<a href="{% url 'recognition:index'%}" class="mt-3 w-75 btn btn-lg btn-outline-secondary " role="button">
|
25 |
+
Вернуться Назад
|
26 |
+
</a>
|
27 |
+
</div>
|
28 |
+
</form>
|
29 |
+
</div>
|
30 |
+
</div>
|
31 |
+
|
32 |
+
{%endblock%}
|
app/apps/emotion_recognition/tests.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from django.test import TestCase
|
2 |
+
|
3 |
+
# Create your tests here.
|
app/apps/emotion_recognition/urls.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.urls import path
|
2 |
+
from . import views
|
3 |
+
|
4 |
+
|
5 |
+
app_name = 'recognition'
|
6 |
+
urlpatterns = [
|
7 |
+
path('recognition/', views.index, name="index"),
|
8 |
+
path('api/emotion-recognition/', views.ImageProcessingView.as_view(), name='emotion_recognition'),
|
9 |
+
path('recognition/real-time', views.real_time_recognition, name="real_time"),
|
10 |
+
path('real-time-stream', views.real_time_stream, name="real_time_video_stream"),
|
11 |
+
path('recognition/edit/<pk>', views.RecognitionUpdateView.as_view(), name="recognition_edit"),
|
12 |
+
path('recognition/delete/<pk>', views.RecognitionDeleteView.as_view(), name="recognition_delete"),
|
13 |
+
]
|
app/apps/emotion_recognition/views.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.shortcuts import render, redirect
|
2 |
+
from django.views.decorators.http import require_http_methods
|
3 |
+
from django.http import HttpResponse, StreamingHttpResponse
|
4 |
+
from django.contrib.auth.mixins import LoginRequiredMixin
|
5 |
+
from django.views.generic import UpdateView, DeleteView
|
6 |
+
from django.contrib import messages
|
7 |
+
from django.urls import reverse_lazy
|
8 |
+
|
9 |
+
from rest_framework.views import APIView
|
10 |
+
from rest_framework.response import Response
|
11 |
+
from rest_framework import status
|
12 |
+
from rest_framework.parsers import MultiPartParser, FormParser
|
13 |
+
from .serializers import ImageSerializer
|
14 |
+
from PIL import Image
|
15 |
+
|
16 |
+
from .models import UserImageRecognition
|
17 |
+
from .emotion_recognition import emotionVideo
|
18 |
+
from .tasks import proccess_uploaded_image, process_image_from_api, convert_image_to_bytes
|
19 |
+
from .forms import RecognitionEditForm
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
@require_http_methods(['GET', 'POST'])
|
24 |
+
def index(request):
|
25 |
+
try:
|
26 |
+
|
27 |
+
# GET method, return HTML page
|
28 |
+
if request.method == 'GET':
|
29 |
+
samples = UserImageRecognition.objects.all()
|
30 |
+
return render(request, 'recognition/index.html', {'samples': samples, })
|
31 |
+
|
32 |
+
if request.FILES and request.method == 'POST':
|
33 |
+
for f in request.FILES.getlist('uploaded_file'):
|
34 |
+
uploaded_image = f
|
35 |
+
image_data = UserImageRecognition.objects.create(uploaded_image=uploaded_image)
|
36 |
+
|
37 |
+
proccess_uploaded_image(image_data.id)
|
38 |
+
|
39 |
+
return redirect('recognition:index')
|
40 |
+
|
41 |
+
except Exception as e:
|
42 |
+
|
43 |
+
image_data.status = 'ERR'
|
44 |
+
image_data.error_occurred = True
|
45 |
+
image_data.error_message = str(e)
|
46 |
+
image_data.save()
|
47 |
+
|
48 |
+
return HttpResponse(f'Error: {str(e)}')
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
@require_http_methods(['GET', 'POST'])
|
53 |
+
def real_time_recognition(request):
|
54 |
+
return render(request, 'recognition/real_time.html')
|
55 |
+
|
56 |
+
def real_time_stream(request):
|
57 |
+
return StreamingHttpResponse(emotionVideo(),content_type="multipart/x-mixed-replace;boundary=frame")
|
58 |
+
|
59 |
+
|
60 |
+
class RecognitionUpdateView(LoginRequiredMixin, UpdateView):
|
61 |
+
model = UserImageRecognition
|
62 |
+
form_class = RecognitionEditForm
|
63 |
+
template_name = "recognition/recognition_edit.html"
|
64 |
+
|
65 |
+
def get(self, request, pk):
|
66 |
+
self.object = self.get_object()
|
67 |
+
|
68 |
+
context = self.get_context_data(object=self.object)
|
69 |
+
return self.render_to_response(context)
|
70 |
+
|
71 |
+
def get_success_url(self, **kwargs):
|
72 |
+
pk = self.object.pk
|
73 |
+
messages.success(self.request, 'Запись была успешно изменена!')
|
74 |
+
return reverse_lazy('recognition:recognition_edit', args=(pk,))
|
75 |
+
|
76 |
+
|
77 |
+
class RecognitionDeleteView(LoginRequiredMixin, DeleteView):
|
78 |
+
model = UserImageRecognition
|
79 |
+
template_name = "recognition/recognition_delete.html"
|
80 |
+
|
81 |
+
def delete(self, request, pk):
|
82 |
+
return super().delete(request, pk)
|
83 |
+
|
84 |
+
def get_success_url(self, **kwargs):
|
85 |
+
obj = self.get_object()
|
86 |
+
messages.success(self.request, 'Запись была успешно удалёна!')
|
87 |
+
return reverse_lazy('recognition:index')
|
88 |
+
|
89 |
+
|
90 |
+
class ImageProcessingView(APIView):
|
91 |
+
parser_classes = (MultiPartParser, FormParser) # Для обработки загруженных файлов
|
92 |
+
serializer_class = ImageSerializer
|
93 |
+
|
94 |
+
def post(self, request, format=None):
|
95 |
+
"""
|
96 |
+
Обрабатывает изображение, переданное через API, и возвращает финальное изображение с эмоциями.
|
97 |
+
"""
|
98 |
+
serializer = self.serializer_class(data=request.data)
|
99 |
+
if serializer.is_valid():
|
100 |
+
image_file = serializer.validated_data['image']
|
101 |
+
|
102 |
+
try:
|
103 |
+
# Обработка изображения
|
104 |
+
final_image, predicted_emotions, recognized_emotion = process_image_from_api(image_file)
|
105 |
+
output_image = convert_image_to_bytes(final_image)
|
106 |
+
|
107 |
+
# Формируем ответ
|
108 |
+
response_data = {
|
109 |
+
"predicted_emotions": predicted_emotions,
|
110 |
+
"recognized_emotion": recognized_emotion,
|
111 |
+
}
|
112 |
+
response = Response(response_data, status=status.HTTP_200_OK)
|
113 |
+
response['Content-Type'] = 'image/jpeg'
|
114 |
+
response.content = output_image.getvalue()
|
115 |
+
return response
|
116 |
+
except Exception as e:
|
117 |
+
return Response({"error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
118 |
+
|
119 |
+
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
app/db.sqlite3
ADDED
Binary file (135 kB). View file
|
|
app/django_app/__init__.py
ADDED
File without changes
|
app/django_app/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (169 Bytes). View file
|
|
app/django_app/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (147 Bytes). View file
|
|
app/django_app/__pycache__/settings.cpython-310.pyc
ADDED
Binary file (2.71 kB). View file
|
|
app/django_app/__pycache__/settings.cpython-39.pyc
ADDED
Binary file (3.09 kB). View file
|
|
app/django_app/__pycache__/urls.cpython-310.pyc
ADDED
Binary file (1.19 kB). View file
|
|
app/django_app/__pycache__/urls.cpython-39.pyc
ADDED
Binary file (1.23 kB). View file
|
|
app/django_app/__pycache__/wsgi.cpython-310.pyc
ADDED
Binary file (578 Bytes). View file
|
|
app/django_app/__pycache__/wsgi.cpython-39.pyc
ADDED
Binary file (556 Bytes). View file
|
|