Spaces:
Sleeping
Sleeping
Upload 30 files
Browse files- app/Dockerfile +24 -0
- app/apps/emotion_recognition/__init__.py +0 -0
- app/apps/emotion_recognition/admin.py +6 -0
- app/apps/emotion_recognition/apps.py +6 -0
- app/apps/emotion_recognition/emotion_recognition.py +203 -0
- app/apps/emotion_recognition/forms.py +9 -0
- app/apps/emotion_recognition/models.py +34 -0
- app/apps/emotion_recognition/serializers.py +4 -0
- app/apps/emotion_recognition/tasks.py +62 -0
- app/apps/emotion_recognition/templates/recognition/index.html +74 -0
- app/apps/emotion_recognition/templates/recognition/real_time.html +33 -0
- app/apps/emotion_recognition/templates/recognition/recognition_delete.html +28 -0
- app/apps/emotion_recognition/templates/recognition/recognition_edit.html +32 -0
- app/apps/emotion_recognition/tests.py +3 -0
- app/apps/emotion_recognition/urls.py +13 -0
- app/apps/emotion_recognition/views.py +119 -0
- app/django_app/__init__.py +0 -0
- app/django_app/settings.py +147 -0
- app/django_app/urls.py +24 -0
- app/django_app/wsgi.py +16 -0
- app/manage.py +22 -0
- app/model/haarcascade_frontalface.xml +0 -0
- app/model/model_4layer_2_2_pool.h5 +3 -0
- app/model/model_4layer_2_2_pool.json +1 -0
- app/requirements.txt +61 -0
- app/static/css/main.css +6 -0
- app/static/images/logo.svg +3 -0
- app/static/images/undraw_remotely_2j6y.svg +1 -0
- app/templates/base.html +48 -0
- app/templates/header.html +23 -0
app/Dockerfile
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# syntax=docker/dockerfile:1.4
|
2 |
+
|
3 |
+
FROM --platform=$BUILDPLATFORM python:3.10-alpine AS builder
|
4 |
+
EXPOSE 8000
|
5 |
+
WORKDIR /app
|
6 |
+
COPY requirements.txt /app
|
7 |
+
RUN pip3 install -r requirements.txt --no-cache-dir
|
8 |
+
COPY . /app
|
9 |
+
ENTRYPOINT ["python3"]
|
10 |
+
CMD ["manage.py", "runserver", "0.0.0.0:8000"]
|
11 |
+
|
12 |
+
FROM builder as dev-envs
|
13 |
+
RUN <<EOF
|
14 |
+
apk update
|
15 |
+
apk add git
|
16 |
+
EOF
|
17 |
+
|
18 |
+
RUN <<EOF
|
19 |
+
addgroup -S docker
|
20 |
+
adduser -S --shell /bin/bash --ingroup docker vscode
|
21 |
+
EOF
|
22 |
+
# install Docker tools (cli, buildx, compose)
|
23 |
+
COPY --from=gloursdocker/docker / /
|
24 |
+
CMD ["manage.py", "runserver", "0.0.0.0:8000"]
|
app/apps/emotion_recognition/__init__.py
ADDED
File without changes
|
app/apps/emotion_recognition/admin.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.contrib import admin
|
2 |
+
from .models import UserImageRecognition
|
3 |
+
# Register your models here.
|
4 |
+
|
5 |
+
admin.site.register(UserImageRecognition)
|
6 |
+
|
app/apps/emotion_recognition/apps.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.apps import AppConfig
|
2 |
+
|
3 |
+
|
4 |
+
class EmotionRecognitionConfig(AppConfig):
|
5 |
+
default_auto_field = 'django.db.models.BigAutoField'
|
6 |
+
name = 'apps.emotion_recognition'
|
app/apps/emotion_recognition/emotion_recognition.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from tensorflow import keras
|
3 |
+
from keras.models import Sequential
|
4 |
+
from keras.models import load_model
|
5 |
+
from keras.models import model_from_json
|
6 |
+
from keras.utils import img_to_array
|
7 |
+
import keras.utils as image
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
import os
|
12 |
+
|
13 |
+
from django_app.settings import BASE_DIR
|
14 |
+
|
15 |
+
|
16 |
+
model = Sequential()
|
17 |
+
|
18 |
+
model = model_from_json(open(
|
19 |
+
os.path.join(BASE_DIR,'model/model_4layer_2_2_pool.json'), "r").read())
|
20 |
+
|
21 |
+
model.load_weights(os.path.join(
|
22 |
+
BASE_DIR,'model/model_4layer_2_2_pool.h5'))
|
23 |
+
|
24 |
+
class_labels = {0: 'Angry', 1: 'Disgust', 2: 'Fear',
|
25 |
+
3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
|
26 |
+
classes = list(class_labels.values())
|
27 |
+
|
28 |
+
face_classifier = cv2.CascadeClassifier(os.path.join(
|
29 |
+
BASE_DIR,'model/haarcascade_frontalface.xml'))
|
30 |
+
|
31 |
+
camera = cv2.VideoCapture(0)
|
32 |
+
|
33 |
+
|
34 |
+
def text_on_detected_boxes(text, text_x, text_y, image, font_scale=1,
|
35 |
+
font=cv2.FONT_HERSHEY_SIMPLEX,
|
36 |
+
FONT_COLOR=(0, 0, 0),
|
37 |
+
FONT_THICKNESS=2,
|
38 |
+
rectangle_bgr=(0, 255, 0)):
|
39 |
+
(text_width, text_height) = cv2.getTextSize(
|
40 |
+
text, font, fontScale=font_scale, thickness=2)[0]
|
41 |
+
box_coords = ((text_x-10, text_y+4), (text_x +
|
42 |
+
text_width+10, text_y - text_height-5))
|
43 |
+
cv2.rectangle(image, box_coords[0],
|
44 |
+
box_coords[1], rectangle_bgr, cv2.FILLED)
|
45 |
+
cv2.putText(image, text, (text_x, text_y), font,
|
46 |
+
fontScale=font_scale, color=FONT_COLOR, thickness=FONT_THICKNESS)
|
47 |
+
|
48 |
+
|
49 |
+
def face_detector_image(img):
|
50 |
+
"""
|
51 |
+
Обнаружение лиц на изображении.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
img (numpy array): Исходное изображение.
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
tuple: (rects, allfaces, img) - координаты лиц, обрезанные лица и изображение с рамками.
|
58 |
+
"""
|
59 |
+
gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)
|
60 |
+
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
|
61 |
+
if faces == ():
|
62 |
+
return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
|
63 |
+
allfaces = []
|
64 |
+
rects = []
|
65 |
+
for (x, y, w, h) in faces:
|
66 |
+
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
67 |
+
roi_gray = gray[y:y + h, x:x + w]
|
68 |
+
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
|
69 |
+
allfaces.append(roi_gray)
|
70 |
+
rects.append((x, w, y, h))
|
71 |
+
return rects, allfaces, img
|
72 |
+
|
73 |
+
|
74 |
+
def emotionImage(imgPath):
|
75 |
+
img = cv2.imread(BASE_DIR + '\\media\\' + imgPath)
|
76 |
+
rects, faces, image = face_detector_image(img)
|
77 |
+
i = 0
|
78 |
+
for face in faces:
|
79 |
+
roi = face.astype("float") / 255.0
|
80 |
+
roi = img_to_array(roi)
|
81 |
+
roi = np.expand_dims(roi, axis=0)
|
82 |
+
preds = model.predict(roi)[0]
|
83 |
+
label = class_labels[preds.argmax()]
|
84 |
+
label_position = (
|
85 |
+
rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
|
86 |
+
i += 1
|
87 |
+
|
88 |
+
# Отрисовка текста и рамок
|
89 |
+
text_on_detected_boxes(
|
90 |
+
label, label_position[0], label_position[1], image)
|
91 |
+
|
92 |
+
precentages = dict(zip(classes, preds*100))
|
93 |
+
|
94 |
+
return image, precentages, label
|
95 |
+
|
96 |
+
|
97 |
+
def emotionImageFromArray(img_array):
|
98 |
+
"""
|
99 |
+
Обрабатывает изображение и возвращает результат обработки.
|
100 |
+
|
101 |
+
Args:
|
102 |
+
img_array (numpy array): Исходное изображение (numpy array).
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
tuple: (image, precentages, label)
|
106 |
+
- image: Изображение с рамками и текстом эмоций.
|
107 |
+
- precentages: Вероятности каждой эмоции.
|
108 |
+
- label: Определенная эмоция.
|
109 |
+
"""
|
110 |
+
rects, faces, image = face_detector_image(img_array)
|
111 |
+
i = 0
|
112 |
+
for face in faces:
|
113 |
+
roi = face.astype("float") / 255.0
|
114 |
+
roi = img_to_array(roi)
|
115 |
+
roi = np.expand_dims(roi, axis=0)
|
116 |
+
preds = model.predict(roi)[0]
|
117 |
+
label = class_labels[preds.argmax()]
|
118 |
+
label_position = (
|
119 |
+
rects[i][0] + int((rects[i][1] / 2)), abs(rects[i][2] - 10))
|
120 |
+
i += 1
|
121 |
+
|
122 |
+
# Отрисовка текста и рамок
|
123 |
+
text_on_detected_boxes(
|
124 |
+
label, label_position[0], label_position[1], image)
|
125 |
+
|
126 |
+
precentages = dict(zip(classes, preds*100))
|
127 |
+
|
128 |
+
return image, precentages, label
|
129 |
+
|
130 |
+
def face_detector_video(img):
|
131 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
132 |
+
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
|
133 |
+
if faces is ():
|
134 |
+
return (0, 0, 0, 0), np.zeros((48, 48), np.uint8), img
|
135 |
+
for (x, y, w, h) in faces:
|
136 |
+
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
|
137 |
+
roi_gray = gray[y:y + h, x:x + w]
|
138 |
+
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
|
139 |
+
return (x, w, y, h), roi_gray, img
|
140 |
+
|
141 |
+
|
142 |
+
def emotionVideo():
|
143 |
+
while True:
|
144 |
+
ret, frame = camera.read()
|
145 |
+
rect, face, image = face_detector_video(frame)
|
146 |
+
if np.sum([face]) != 0.0:
|
147 |
+
roi = face.astype("float") / 255.0
|
148 |
+
roi = img_to_array(roi)
|
149 |
+
roi = np.expand_dims(roi, axis=0)
|
150 |
+
preds = model.predict(roi)[0]
|
151 |
+
label = class_labels[preds.argmax()]
|
152 |
+
label_position = (rect[0] + rect[1]//50, rect[2] + rect[3]//50)
|
153 |
+
text_on_detected_boxes(label, label_position[0], label_position[1], image)
|
154 |
+
fps = camera.get(cv2.CAP_PROP_FPS)
|
155 |
+
cv2.putText(image, str(fps),(5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
156 |
+
else:
|
157 |
+
cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
|
158 |
+
|
159 |
+
ret, buffer = cv2.imencode('.jpg', image)
|
160 |
+
|
161 |
+
frame = buffer.tobytes()
|
162 |
+
yield (b'--frame\r\n'
|
163 |
+
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
def gen_frames():
|
168 |
+
while True:
|
169 |
+
success, frame = camera.read()
|
170 |
+
if not success:
|
171 |
+
cv2.putText(image, "No Face Found", (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
|
172 |
+
break
|
173 |
+
else:
|
174 |
+
gray_img= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
175 |
+
|
176 |
+
faces_detected = face_classifier.detectMultiScale(gray_img, 1.32, 5)
|
177 |
+
|
178 |
+
|
179 |
+
for (x,y,w,h) in faces_detected:
|
180 |
+
|
181 |
+
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),thickness=7)
|
182 |
+
roi_gray=gray_img[y:y+w,x:x+h]
|
183 |
+
roi_gray=cv2.resize(roi_gray,(48,48))
|
184 |
+
img_pixels = image.img_to_array(roi_gray)
|
185 |
+
img_pixels = np.expand_dims(img_pixels, axis = 0)
|
186 |
+
img_pixels /= 255
|
187 |
+
|
188 |
+
predictions = model.predict(img_pixels)
|
189 |
+
|
190 |
+
max_index = np.argmax(predictions[0])
|
191 |
+
|
192 |
+
emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
193 |
+
predicted_emotion = emotions[max_index]
|
194 |
+
|
195 |
+
cv2.putText(frame, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
|
196 |
+
|
197 |
+
resized_img = cv2.resize(frame, (600, 400))
|
198 |
+
|
199 |
+
ret, buffer = cv2.imencode('.jpg', frame)
|
200 |
+
|
201 |
+
frame = buffer.tobytes()
|
202 |
+
yield (b'--frame\r\n'
|
203 |
+
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
app/apps/emotion_recognition/forms.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django import forms
|
2 |
+
|
3 |
+
from .models import UserImageRecognition
|
4 |
+
|
5 |
+
|
6 |
+
class RecognitionEditForm(forms.ModelForm):
|
7 |
+
class Meta:
|
8 |
+
model = UserImageRecognition
|
9 |
+
fields = ('__all__')
|
app/apps/emotion_recognition/models.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.db import models
|
2 |
+
from PIL import Image
|
3 |
+
|
4 |
+
|
5 |
+
# Create your models here.
|
6 |
+
class UserImageRecognition(models.Model):
|
7 |
+
uploaded_image = models.ImageField(null=False, blank=False)
|
8 |
+
final_image = models.ImageField(null=True, blank=True)
|
9 |
+
recognized_emotion = models.CharField(max_length=20,null=True, blank=True)
|
10 |
+
predicted_emotions = models.CharField(max_length=155,null=True,blank=True)
|
11 |
+
created_at = models.DateTimeField(
|
12 |
+
auto_now_add=True, null=False, blank=False)
|
13 |
+
|
14 |
+
STATUS_CHOICES = (
|
15 |
+
('PEN', 'Pending'),
|
16 |
+
('COM', 'Complete'),
|
17 |
+
('ERR', 'Error'),
|
18 |
+
)
|
19 |
+
status = models.CharField(
|
20 |
+
max_length=3, choices=STATUS_CHOICES, null=False, blank=False, default='PEN')
|
21 |
+
|
22 |
+
class Meta:
|
23 |
+
ordering = ['-created_at']
|
24 |
+
|
25 |
+
def save(self, *args, **kwargs):
|
26 |
+
super().save(*args, **kwargs)
|
27 |
+
uploaded_img = Image.open(self.uploaded_image.path)
|
28 |
+
if uploaded_img.height > 400 or uploaded_img.width > 400:
|
29 |
+
output_size = (400, 400)
|
30 |
+
uploaded_img.thumbnail(output_size)
|
31 |
+
uploaded_img.save(self.uploaded_image.path)
|
32 |
+
|
33 |
+
def __str__(self):
|
34 |
+
return f'{self.user} - {self.uploaded_image}'
|
app/apps/emotion_recognition/serializers.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from rest_framework import serializers
|
2 |
+
|
3 |
+
class ImageSerializer(serializers.Serializer):
|
4 |
+
image = serializers.ImageField(required=True)
|
app/apps/emotion_recognition/tasks.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.shortcuts import get_object_or_404
|
2 |
+
from django.core.files.base import ContentFile
|
3 |
+
|
4 |
+
from .models import UserImageRecognition
|
5 |
+
from .emotion_recognition import emotionImage, emotionImageFromArray
|
6 |
+
|
7 |
+
from io import BytesIO
|
8 |
+
import cv2
|
9 |
+
import numpy as np
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
|
13 |
+
def proccess_uploaded_image(image_data_id):
|
14 |
+
image_data = None
|
15 |
+
image_data = get_object_or_404(UserImageRecognition, pk=image_data_id)
|
16 |
+
|
17 |
+
final_image, predicted_emotions, recognized_emotion = emotionImage(
|
18 |
+
image_data.uploaded_image.name)
|
19 |
+
final_image = converter_to_django_file(final_image)
|
20 |
+
|
21 |
+
image_data.final_image = final_image
|
22 |
+
image_data.predicted_emotions = predicted_emotions
|
23 |
+
image_data.recognized_emotion = recognized_emotion
|
24 |
+
image_data.status = "COM"
|
25 |
+
image_data.save()
|
26 |
+
|
27 |
+
def process_image_from_api(image_file):
|
28 |
+
"""
|
29 |
+
Обрабатывает изображение, переданное через API, и возвращает финальное изображение и данные эмоций.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
image_file (InMemoryUploadedFile): Исходное изображение, полученное через API.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
tuple: (final_image, predicted_emotions, recognized_emotion)
|
36 |
+
- final_image: обработанное изображение в формате numpy array.
|
37 |
+
- predicted_emotions: словарь с предсказанными эмоциями и их вероятностями.
|
38 |
+
- recognized_emotion: самая вероятная эмоция.
|
39 |
+
"""
|
40 |
+
# Конвертируем загруженный файл в OpenCV-совместимый формат
|
41 |
+
file_bytes = np.frombuffer(image_file.read(), np.uint8)
|
42 |
+
image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
43 |
+
|
44 |
+
# Обрабатываем изображение с помощью emotionImageFromArray
|
45 |
+
final_image, predicted_emotions, recognized_emotion = emotionImageFromArray(image)
|
46 |
+
|
47 |
+
return final_image, predicted_emotions, recognized_emotion
|
48 |
+
|
49 |
+
def converter_to_django_file(image):
|
50 |
+
img_io = BytesIO()
|
51 |
+
image = Image.fromarray(image)
|
52 |
+
image.save(img_io, format='JPEG', quality=100)
|
53 |
+
img_content = ContentFile(img_io.getvalue(), 'final_image.jpg')
|
54 |
+
|
55 |
+
return img_content
|
56 |
+
|
57 |
+
def convert_image_to_bytes(image):
|
58 |
+
# Конвертируем обработанное изображение в байты
|
59 |
+
_, buffer = cv2.imencode('.jpg', image)
|
60 |
+
output_image = BytesIO(buffer)
|
61 |
+
|
62 |
+
return output_image
|
app/apps/emotion_recognition/templates/recognition/index.html
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags %}
|
4 |
+
{% load crispy_forms_tags humanize %}
|
5 |
+
|
6 |
+
{% block content %}
|
7 |
+
|
8 |
+
<div class="container d-flex flex-wrap flex-column justify-center">
|
9 |
+
|
10 |
+
<form action="" method="POST" class='col-4'enctype="multipart/form-data">
|
11 |
+
{% csrf_token %}
|
12 |
+
<label for="formFile" class="form-label">Выберите изображение для определения эмоции</label>
|
13 |
+
<input id='formFile' name="uploaded_file" class="form-control" type="file" accept="image/*" placeholder="Выберите изображение" required multiple>
|
14 |
+
<button type="submit" class="btn btn-primary mt-3">Определить</button>
|
15 |
+
</form>
|
16 |
+
<div class="d-flex flex-column flex-wrap align-content-center">
|
17 |
+
</div>
|
18 |
+
<table class="table table-hover mt-5">
|
19 |
+
<thead>
|
20 |
+
<tr class="table-secondary">
|
21 |
+
<th scope="col">Оригинальное Изображение</th>
|
22 |
+
<th scope="col">Результат</th>
|
23 |
+
<th scope="col">Эмоция</th>
|
24 |
+
<th scope="col">Статуc</th>
|
25 |
+
<th scope="col">Дата</th>
|
26 |
+
<th scope="col"></th>
|
27 |
+
</tr>
|
28 |
+
</thead>
|
29 |
+
<tbody>
|
30 |
+
{% for sample in samples%}
|
31 |
+
<td>
|
32 |
+
<img src="{{sample.uploaded_image.url}}" alt="" style="width: 250px">
|
33 |
+
</td>
|
34 |
+
<td>
|
35 |
+
{% if sample.final_image %}
|
36 |
+
<img src="{{sample.final_image.url}}" alt="" style="width: 250px">
|
37 |
+
{% else %}
|
38 |
+
<p>Произошла ошибка</p>
|
39 |
+
{% endif %}
|
40 |
+
</td>
|
41 |
+
<td>
|
42 |
+
{{sample.recognized_emotion}}
|
43 |
+
</td>
|
44 |
+
<td class="text-center">
|
45 |
+
{% if sample.status == "COM" %}
|
46 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="green" class="bi bi-check-circle" viewBox="0 0 16 16">
|
47 |
+
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
48 |
+
<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>
|
49 |
+
</svg>
|
50 |
+
{%elif sample.status == "PEN"%}
|
51 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="orange" class="bi bi-arrow-clockwise" viewBox="0 0 16 16">
|
52 |
+
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
53 |
+
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
54 |
+
</svg>
|
55 |
+
{%else%}
|
56 |
+
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="red" class="bi bi-x-circle" viewBox="0 0 16 16">
|
57 |
+
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
58 |
+
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
|
59 |
+
</svg>
|
60 |
+
{%endif%}
|
61 |
+
</td>
|
62 |
+
<td>
|
63 |
+
{{sample.created_at|naturaltime}}
|
64 |
+
</td>
|
65 |
+
<td>
|
66 |
+
<a href="{% url "recognition:recognition_edit" sample.id %}" class="btn btn-outline-dark">Редактировать</a>
|
67 |
+
</td>
|
68 |
+
</tr>
|
69 |
+
{%endfor%}
|
70 |
+
</tbody>
|
71 |
+
</table>
|
72 |
+
</div>
|
73 |
+
|
74 |
+
{%endblock%}
|
app/apps/emotion_recognition/templates/recognition/real_time.html
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags %}
|
4 |
+
{% load crispy_forms_tags humanize %}
|
5 |
+
|
6 |
+
{% block content %}
|
7 |
+
|
8 |
+
<div class="container d-flex flex-wrap flex-column justify-content-center align-content-center">
|
9 |
+
<!--
|
10 |
+
<form action="" method="POST" class='col-4'enctype="multipart/form-data">
|
11 |
+
{% csrf_token %}
|
12 |
+
<button type="submit" class="btn btn-primary mt-3">Включить камеру</button>
|
13 |
+
</form> -->
|
14 |
+
|
15 |
+
<a class="btn btn-primary w-75 " data-bs-toggle="collapse" href="#collapseCamera" role="button" aria-expanded="false" aria-controls="collapseCamera">
|
16 |
+
Включить/Выключить камеру
|
17 |
+
</a>
|
18 |
+
|
19 |
+
<div class="collapse mt-3" id="collapseCamera">
|
20 |
+
<div class="card card-body">
|
21 |
+
<div class="container">
|
22 |
+
<div class="row">
|
23 |
+
<div class="col-lg-8 offset-lg-2">
|
24 |
+
<img src="{% url 'recognition:real_time_video_stream' %}" width="100%">
|
25 |
+
</div>
|
26 |
+
</div>
|
27 |
+
</div>
|
28 |
+
</div>
|
29 |
+
</div>
|
30 |
+
|
31 |
+
</div>
|
32 |
+
|
33 |
+
{%endblock%}
|
app/apps/emotion_recognition/templates/recognition/recognition_delete.html
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags %}
|
4 |
+
{% load crispy_forms_tags humanize %}
|
5 |
+
|
6 |
+
|
7 |
+
{%block content %}
|
8 |
+
|
9 |
+
<div class="pt-3 ps-3 bg-body-tertiary scrollspy-example overflow-auto min-vh-100" data-bs-spy="scroll" data-bs-smooth-scroll="true">
|
10 |
+
|
11 |
+
<div class="container">
|
12 |
+
<div class="d-flex flex-column align-items-center justify-content-between mt">
|
13 |
+
<h2 class="mb-3">Подтверждение Удаления</h2>
|
14 |
+
<p class="lead">Вы уверены, что хотите удалить запись<strong>"{{ object}}"?</strong>
|
15 |
+
<br>
|
16 |
+
<u class="fw-bold">Восстановить будет невозможно.</u>
|
17 |
+
</p>
|
18 |
+
<form method="post">
|
19 |
+
{% csrf_token %}
|
20 |
+
{{form}}
|
21 |
+
<button type="submit" class="btn btn-danger btn-lg">Подтверждаю</button>
|
22 |
+
<a href="javascript:history.back()" class="btn btn-outline-secondary btn-lg" role="button">Вернуться Назад</a>
|
23 |
+
</form>
|
24 |
+
</div>
|
25 |
+
</div>
|
26 |
+
</div>
|
27 |
+
|
28 |
+
{%endblock%}
|
app/apps/emotion_recognition/templates/recognition/recognition_edit.html
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends 'base.html' %}
|
2 |
+
{% load static %}
|
3 |
+
{% load crispy_forms_tags humanize %}
|
4 |
+
|
5 |
+
{%block content %}
|
6 |
+
|
7 |
+
<div class="container">
|
8 |
+
<div class="pt-3 ps-3 bg-body-tertiary scrollspy-example overflow-auto min-vh-100" data-bs-spy="scroll" data-bs-smooth-scroll="true">
|
9 |
+
<form method="POST" enctype="multipart/form-data">
|
10 |
+
{% csrf_token %}
|
11 |
+
{{form.media}}
|
12 |
+
<fieldset class="form-group">
|
13 |
+
<legend class="border-bottom mb-4"></legend>
|
14 |
+
{{ form|crispy }}
|
15 |
+
</fieldset>
|
16 |
+
<div class="form-group" style="display: flex; flex-direction: column; align-items: center;">
|
17 |
+
<button class="w-75 btn btn-lg btn-dark" type="submit">Применить Изменения</button>
|
18 |
+
</div>
|
19 |
+
|
20 |
+
<div class="form-group" style="display: flex; flex-direction: column; align-items: center;">
|
21 |
+
<a href="{% url 'recognition:recognition_delete' userimagerecognition.id%}" class="mt-3 w-75 btn btn-lg btn-danger " role="button">
|
22 |
+
Удалить Запись
|
23 |
+
</a>
|
24 |
+
<a href="{% url 'recognition:index'%}" class="mt-3 w-75 btn btn-lg btn-outline-secondary " role="button">
|
25 |
+
Вернуться Назад
|
26 |
+
</a>
|
27 |
+
</div>
|
28 |
+
</form>
|
29 |
+
</div>
|
30 |
+
</div>
|
31 |
+
|
32 |
+
{%endblock%}
|
app/apps/emotion_recognition/tests.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from django.test import TestCase
|
2 |
+
|
3 |
+
# Create your tests here.
|
app/apps/emotion_recognition/urls.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.urls import path
|
2 |
+
from . import views
|
3 |
+
|
4 |
+
|
5 |
+
app_name = 'recognition'
|
6 |
+
urlpatterns = [
|
7 |
+
path('recognition/', views.index, name="index"),
|
8 |
+
path('api/emotion-recognition/', views.ImageProcessingView.as_view(), name='emotion_recognition'),
|
9 |
+
path('recognition/real-time', views.real_time_recognition, name="real_time"),
|
10 |
+
path('real-time-stream', views.real_time_stream, name="real_time_video_stream"),
|
11 |
+
path('recognition/edit/<pk>', views.RecognitionUpdateView.as_view(), name="recognition_edit"),
|
12 |
+
path('recognition/delete/<pk>', views.RecognitionDeleteView.as_view(), name="recognition_delete"),
|
13 |
+
]
|
app/apps/emotion_recognition/views.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from django.shortcuts import render, redirect
|
2 |
+
from django.views.decorators.http import require_http_methods
|
3 |
+
from django.http import HttpResponse, StreamingHttpResponse
|
4 |
+
from django.contrib.auth.mixins import LoginRequiredMixin
|
5 |
+
from django.views.generic import UpdateView, DeleteView
|
6 |
+
from django.contrib import messages
|
7 |
+
from django.urls import reverse_lazy
|
8 |
+
|
9 |
+
from rest_framework.views import APIView
|
10 |
+
from rest_framework.response import Response
|
11 |
+
from rest_framework import status
|
12 |
+
from rest_framework.parsers import MultiPartParser, FormParser
|
13 |
+
from .serializers import ImageSerializer
|
14 |
+
from PIL import Image
|
15 |
+
|
16 |
+
from .models import UserImageRecognition
|
17 |
+
from .emotion_recognition import emotionVideo
|
18 |
+
from .tasks import proccess_uploaded_image, process_image_from_api, convert_image_to_bytes
|
19 |
+
from .forms import RecognitionEditForm
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
@require_http_methods(['GET', 'POST'])
|
24 |
+
def index(request):
|
25 |
+
try:
|
26 |
+
|
27 |
+
# GET method, return HTML page
|
28 |
+
if request.method == 'GET':
|
29 |
+
samples = UserImageRecognition.objects.all()
|
30 |
+
return render(request, 'recognition/index.html', {'samples': samples, })
|
31 |
+
|
32 |
+
if request.FILES and request.method == 'POST':
|
33 |
+
for f in request.FILES.getlist('uploaded_file'):
|
34 |
+
uploaded_image = f
|
35 |
+
image_data = UserImageRecognition.objects.create(uploaded_image=uploaded_image)
|
36 |
+
|
37 |
+
proccess_uploaded_image(image_data.id)
|
38 |
+
|
39 |
+
return redirect('recognition:index')
|
40 |
+
|
41 |
+
except Exception as e:
|
42 |
+
|
43 |
+
image_data.status = 'ERR'
|
44 |
+
image_data.error_occurred = True
|
45 |
+
image_data.error_message = str(e)
|
46 |
+
image_data.save()
|
47 |
+
|
48 |
+
return HttpResponse(f'Error: {str(e)}')
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
@require_http_methods(['GET', 'POST'])
|
53 |
+
def real_time_recognition(request):
|
54 |
+
return render(request, 'recognition/real_time.html')
|
55 |
+
|
56 |
+
def real_time_stream(request):
|
57 |
+
return StreamingHttpResponse(emotionVideo(),content_type="multipart/x-mixed-replace;boundary=frame")
|
58 |
+
|
59 |
+
|
60 |
+
class RecognitionUpdateView(LoginRequiredMixin, UpdateView):
|
61 |
+
model = UserImageRecognition
|
62 |
+
form_class = RecognitionEditForm
|
63 |
+
template_name = "recognition/recognition_edit.html"
|
64 |
+
|
65 |
+
def get(self, request, pk):
|
66 |
+
self.object = self.get_object()
|
67 |
+
|
68 |
+
context = self.get_context_data(object=self.object)
|
69 |
+
return self.render_to_response(context)
|
70 |
+
|
71 |
+
def get_success_url(self, **kwargs):
|
72 |
+
pk = self.object.pk
|
73 |
+
messages.success(self.request, 'Запись была успешно изменена!')
|
74 |
+
return reverse_lazy('recognition:recognition_edit', args=(pk,))
|
75 |
+
|
76 |
+
|
77 |
+
class RecognitionDeleteView(LoginRequiredMixin, DeleteView):
|
78 |
+
model = UserImageRecognition
|
79 |
+
template_name = "recognition/recognition_delete.html"
|
80 |
+
|
81 |
+
def delete(self, request, pk):
|
82 |
+
return super().delete(request, pk)
|
83 |
+
|
84 |
+
def get_success_url(self, **kwargs):
|
85 |
+
obj = self.get_object()
|
86 |
+
messages.success(self.request, 'Запись была успешно удалёна!')
|
87 |
+
return reverse_lazy('recognition:index')
|
88 |
+
|
89 |
+
|
90 |
+
class ImageProcessingView(APIView):
|
91 |
+
parser_classes = (MultiPartParser, FormParser) # Для обработки загруженных файлов
|
92 |
+
serializer_class = ImageSerializer
|
93 |
+
|
94 |
+
def post(self, request, format=None):
|
95 |
+
"""
|
96 |
+
Обрабатывает изображение, переданное через API, и возвращает финальное изображение с эмоциями.
|
97 |
+
"""
|
98 |
+
serializer = self.serializer_class(data=request.data)
|
99 |
+
if serializer.is_valid():
|
100 |
+
image_file = serializer.validated_data['image']
|
101 |
+
|
102 |
+
try:
|
103 |
+
# Обработка изображения
|
104 |
+
final_image, predicted_emotions, recognized_emotion = process_image_from_api(image_file)
|
105 |
+
output_image = convert_image_to_bytes(final_image)
|
106 |
+
|
107 |
+
# Формируем ответ
|
108 |
+
response_data = {
|
109 |
+
"predicted_emotions": predicted_emotions,
|
110 |
+
"recognized_emotion": recognized_emotion,
|
111 |
+
}
|
112 |
+
response = Response(response_data, status=status.HTTP_200_OK)
|
113 |
+
response['Content-Type'] = 'image/jpeg'
|
114 |
+
response.content = output_image.getvalue()
|
115 |
+
return response
|
116 |
+
except Exception as e:
|
117 |
+
return Response({"error": str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
118 |
+
|
119 |
+
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
app/django_app/__init__.py
ADDED
File without changes
|
app/django_app/settings.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Django settings for django_app project.
|
3 |
+
|
4 |
+
Generated by 'django-admin startproject' using Django 4.0.4.
|
5 |
+
|
6 |
+
For more information on this file, see
|
7 |
+
https://docs.djangoproject.com/en/4.0/topics/settings/
|
8 |
+
|
9 |
+
For the full list of settings and their values, see
|
10 |
+
https://docs.djangoproject.com/en/4.0/ref/settings/
|
11 |
+
"""
|
12 |
+
import os
|
13 |
+
from pathlib import Path
|
14 |
+
|
15 |
+
# Build paths inside the project like this: BASE_DIR / 'subdir'.
|
16 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
17 |
+
|
18 |
+
|
19 |
+
# Quick-start development settings - unsuitable for production
|
20 |
+
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
|
21 |
+
|
22 |
+
SECRET_KEY = os.environ.get('SECRET_KEY', 'your-default-secret-key')
|
23 |
+
|
24 |
+
# SECURITY WARNING: don't run with debug turned on in production!
|
25 |
+
DEBUG = True
|
26 |
+
|
27 |
+
ALLOWED_HOSTS = []
|
28 |
+
|
29 |
+
|
30 |
+
# Application definition
|
31 |
+
|
32 |
+
INSTALLED_APPS = [
|
33 |
+
'django.contrib.admin',
|
34 |
+
'django.contrib.auth',
|
35 |
+
'django.contrib.contenttypes',
|
36 |
+
'django.contrib.sessions',
|
37 |
+
'django.contrib.messages',
|
38 |
+
'django.contrib.staticfiles',
|
39 |
+
'django_extensions',
|
40 |
+
'django.contrib.humanize',
|
41 |
+
'rest_framework',
|
42 |
+
|
43 |
+
'apps.emotion_recognition',
|
44 |
+
|
45 |
+
'crispy_forms',
|
46 |
+
'crispy_bootstrap5',
|
47 |
+
]
|
48 |
+
|
49 |
+
MIDDLEWARE = [
|
50 |
+
'django.middleware.security.SecurityMiddleware',
|
51 |
+
'whitenoise.middleware.WhiteNoiseMiddleware',
|
52 |
+
'django.contrib.sessions.middleware.SessionMiddleware',
|
53 |
+
'django.middleware.common.CommonMiddleware',
|
54 |
+
'django.middleware.csrf.CsrfViewMiddleware',
|
55 |
+
'django.contrib.auth.middleware.AuthenticationMiddleware',
|
56 |
+
'django.contrib.messages.middleware.MessageMiddleware',
|
57 |
+
'django.middleware.clickjacking.XFrameOptionsMiddleware',
|
58 |
+
]
|
59 |
+
|
60 |
+
ROOT_URLCONF = 'django_app.urls'
|
61 |
+
|
62 |
+
TEMPLATES = [
|
63 |
+
{
|
64 |
+
'BACKEND': 'django.template.backends.django.DjangoTemplates',
|
65 |
+
'DIRS': [
|
66 |
+
os.path.join(BASE_DIR, 'templates')
|
67 |
+
],
|
68 |
+
'APP_DIRS': True,
|
69 |
+
'OPTIONS': {
|
70 |
+
'context_processors': [
|
71 |
+
'django.template.context_processors.debug',
|
72 |
+
'django.template.context_processors.request',
|
73 |
+
'django.contrib.auth.context_processors.auth',
|
74 |
+
'django.contrib.messages.context_processors.messages',
|
75 |
+
],
|
76 |
+
},
|
77 |
+
},
|
78 |
+
]
|
79 |
+
|
80 |
+
WSGI_APPLICATION = 'django_app.wsgi.application'
|
81 |
+
|
82 |
+
|
83 |
+
# Database
|
84 |
+
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
|
85 |
+
|
86 |
+
DATABASES = {
|
87 |
+
'default': {
|
88 |
+
'ENGINE': 'django.db.backends.sqlite3',
|
89 |
+
'NAME': str(os.path.join(BASE_DIR, "db.sqlite3")),
|
90 |
+
}
|
91 |
+
}
|
92 |
+
|
93 |
+
|
94 |
+
# Password validation
|
95 |
+
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
|
96 |
+
|
97 |
+
AUTH_USER_MODEL = 'auth.User'
|
98 |
+
|
99 |
+
AUTH_PASSWORD_VALIDATORS = [
|
100 |
+
{
|
101 |
+
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
|
102 |
+
},
|
103 |
+
{
|
104 |
+
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
|
105 |
+
},
|
106 |
+
{
|
107 |
+
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
|
108 |
+
},
|
109 |
+
{
|
110 |
+
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
|
111 |
+
},
|
112 |
+
]
|
113 |
+
|
114 |
+
|
115 |
+
# Internationalization
|
116 |
+
# https://docs.djangoproject.com/en/4.0/topics/i18n/
|
117 |
+
|
118 |
+
LANGUAGE_CODE = 'en-us'
|
119 |
+
|
120 |
+
TIME_ZONE = 'UTC'
|
121 |
+
|
122 |
+
USE_I18N = True
|
123 |
+
|
124 |
+
USE_TZ = True
|
125 |
+
|
126 |
+
|
127 |
+
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
|
128 |
+
CRISPY_TEMPLATE_PACK = "bootstrap5"
|
129 |
+
|
130 |
+
# Static files (CSS, JavaScript, Images)
|
131 |
+
# https://docs.djangoproject.com/en/4.0/howto/static-files/
|
132 |
+
|
133 |
+
|
134 |
+
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
135 |
+
MEDIA_URL = "/media/"
|
136 |
+
|
137 |
+
STATIC_URL = '/static/'
|
138 |
+
STATICFILES_DIRS = [
|
139 |
+
os.path.join(BASE_DIR, 'static')
|
140 |
+
]
|
141 |
+
|
142 |
+
# Default primary key field type
|
143 |
+
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
|
144 |
+
|
145 |
+
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
146 |
+
|
147 |
+
|
app/django_app/urls.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""django_app URL Configuration
|
2 |
+
|
3 |
+
The `urlpatterns` list routes URLs to views. For more information please see:
|
4 |
+
https://docs.djangoproject.com/en/4.0/topics/http/urls/
|
5 |
+
Examples:
|
6 |
+
Function views
|
7 |
+
1. Add an import: from my_app import views
|
8 |
+
2. Add a URL to urlpatterns: path('', views.home, name='home')
|
9 |
+
Class-based views
|
10 |
+
1. Add an import: from other_app.views import Home
|
11 |
+
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
|
12 |
+
Including another URLconf
|
13 |
+
1. Import the include() function: from django.urls import include, path
|
14 |
+
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
|
15 |
+
"""
|
16 |
+
from django.contrib import admin
|
17 |
+
from django.urls import path, include
|
18 |
+
from django.conf import settings
|
19 |
+
from django.conf.urls.static import static
|
20 |
+
|
21 |
+
urlpatterns = [
|
22 |
+
path('admin/', admin.site.urls),
|
23 |
+
path('', include('apps.emotion_recognition.urls', namespace='recognition')),
|
24 |
+
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
app/django_app/wsgi.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
WSGI config for django_app project.
|
3 |
+
|
4 |
+
It exposes the WSGI callable as a module-level variable named ``application``.
|
5 |
+
|
6 |
+
For more information on this file, see
|
7 |
+
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
|
8 |
+
"""
|
9 |
+
|
10 |
+
import os
|
11 |
+
|
12 |
+
from django.core.wsgi import get_wsgi_application
|
13 |
+
|
14 |
+
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_app.settings')
|
15 |
+
|
16 |
+
application = get_wsgi_application()
|
app/manage.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
"""Django's command-line utility for administrative tasks."""
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
|
6 |
+
|
7 |
+
def main():
|
8 |
+
"""Run administrative tasks."""
|
9 |
+
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_app.settings')
|
10 |
+
try:
|
11 |
+
from django.core.management import execute_from_command_line
|
12 |
+
except ImportError as exc:
|
13 |
+
raise ImportError(
|
14 |
+
"Couldn't import Django. Are you sure it's installed and "
|
15 |
+
"available on your PYTHONPATH environment variable? Did you "
|
16 |
+
"forget to activate a virtual environment?"
|
17 |
+
) from exc
|
18 |
+
execute_from_command_line(sys.argv)
|
19 |
+
|
20 |
+
|
21 |
+
if __name__ == '__main__':
|
22 |
+
main()
|
app/model/haarcascade_frontalface.xml
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app/model/model_4layer_2_2_pool.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:731969c7f74aead9b001510a65c0f6febf6853af317266675e34a42eb08b1e81
|
3 |
+
size 17977016
|
app/model/model_4layer_2_2_pool.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"class_name": "Sequential", "keras_version": "2.0.8", "config": [{"class_name": "Conv2D", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "conv2d_1", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "linear", "trainable": true, "data_format": "channels_last", "filters": 64, "padding": "same", "strides": [1, 1], "dilation_rate": [1, 1], "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "batch_input_shape": [null, 48, 48, 1], "use_bias": true, "activity_regularizer": null, "kernel_size": [3, 3]}}, {"class_name": "BatchNormalization", "config": {"beta_constraint": null, "gamma_initializer": {"class_name": "Ones", "config": {}}, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "name": "batch_normalization_1", "epsilon": 0.001, "trainable": true, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "beta_initializer": {"class_name": "Zeros", "config": {}}, "scale": true, "axis": -1, "gamma_constraint": null, "gamma_regularizer": null, "beta_regularizer": null, "momentum": 0.99, "center": true}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_1"}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "data_format": "channels_last", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2]}}, {"class_name": "Dropout", "config": {"rate": 0.25, "trainable": true, "name": "dropout_1"}}, {"class_name": "Conv2D", "config": {"kernel_constraint": null, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "conv2d_2", "bias_regularizer": null, "bias_constraint": null, "activation": "linear", "trainable": true, "data_format": "channels_last", "padding": "same", "strides": [1, 1], "dilation_rate": [1, 1], "kernel_regularizer": null, "filters": 128, "bias_initializer": {"class_name": "Zeros", "config": {}}, "use_bias": true, "activity_regularizer": null, "kernel_size": [5, 5]}}, {"class_name": "BatchNormalization", "config": {"beta_constraint": null, "gamma_initializer": {"class_name": "Ones", "config": {}}, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "name": "batch_normalization_2", "epsilon": 0.001, "trainable": true, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "beta_initializer": {"class_name": "Zeros", "config": {}}, "scale": true, "axis": -1, "gamma_constraint": null, "gamma_regularizer": null, "beta_regularizer": null, "momentum": 0.99, "center": true}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_2"}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "data_format": "channels_last", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2]}}, {"class_name": "Dropout", "config": {"rate": 0.25, "trainable": true, "name": "dropout_2"}}, {"class_name": "Conv2D", "config": {"kernel_constraint": null, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "conv2d_3", "bias_regularizer": null, "bias_constraint": null, "activation": "linear", "trainable": true, "data_format": "channels_last", "padding": "same", "strides": [1, 1], "dilation_rate": [1, 1], "kernel_regularizer": null, "filters": 512, "bias_initializer": {"class_name": "Zeros", "config": {}}, "use_bias": true, "activity_regularizer": null, "kernel_size": [3, 3]}}, {"class_name": "BatchNormalization", "config": {"beta_constraint": null, "gamma_initializer": {"class_name": "Ones", "config": {}}, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "name": "batch_normalization_3", "epsilon": 0.001, "trainable": true, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "beta_initializer": {"class_name": "Zeros", "config": {}}, "scale": true, "axis": -1, "gamma_constraint": null, "gamma_regularizer": null, "beta_regularizer": null, "momentum": 0.99, "center": true}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_3"}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_3", "trainable": true, "data_format": "channels_last", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2]}}, {"class_name": "Dropout", "config": {"rate": 0.25, "trainable": true, "name": "dropout_3"}}, {"class_name": "Conv2D", "config": {"kernel_constraint": null, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "conv2d_4", "bias_regularizer": null, "bias_constraint": null, "activation": "linear", "trainable": true, "data_format": "channels_last", "padding": "same", "strides": [1, 1], "dilation_rate": [1, 1], "kernel_regularizer": null, "filters": 512, "bias_initializer": {"class_name": "Zeros", "config": {}}, "use_bias": true, "activity_regularizer": null, "kernel_size": [3, 3]}}, {"class_name": "BatchNormalization", "config": {"beta_constraint": null, "gamma_initializer": {"class_name": "Ones", "config": {}}, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "name": "batch_normalization_4", "epsilon": 0.001, "trainable": true, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "beta_initializer": {"class_name": "Zeros", "config": {}}, "scale": true, "axis": -1, "gamma_constraint": null, "gamma_regularizer": null, "beta_regularizer": null, "momentum": 0.99, "center": true}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_4"}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_4", "trainable": true, "data_format": "channels_last", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2]}}, {"class_name": "Dropout", "config": {"rate": 0.25, "trainable": true, "name": "dropout_4"}}, {"class_name": "Flatten", "config": {"trainable": true, "name": "flatten_1"}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_1", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "linear", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 256, "use_bias": true, "activity_regularizer": null}}, {"class_name": "BatchNormalization", "config": {"beta_constraint": null, "gamma_initializer": {"class_name": "Ones", "config": {}}, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "name": "batch_normalization_5", "epsilon": 0.001, "trainable": true, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "beta_initializer": {"class_name": "Zeros", "config": {}}, "scale": true, "axis": -1, "gamma_constraint": null, "gamma_regularizer": null, "beta_regularizer": null, "momentum": 0.99, "center": true}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_5"}}, {"class_name": "Dropout", "config": {"rate": 0.25, "trainable": true, "name": "dropout_5"}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_2", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "linear", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 512, "use_bias": true, "activity_regularizer": null}}, {"class_name": "BatchNormalization", "config": {"beta_constraint": null, "gamma_initializer": {"class_name": "Ones", "config": {}}, "moving_mean_initializer": {"class_name": "Zeros", "config": {}}, "name": "batch_normalization_6", "epsilon": 0.001, "trainable": true, "moving_variance_initializer": {"class_name": "Ones", "config": {}}, "beta_initializer": {"class_name": "Zeros", "config": {}}, "scale": true, "axis": -1, "gamma_constraint": null, "gamma_regularizer": null, "beta_regularizer": null, "momentum": 0.99, "center": true}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_6"}}, {"class_name": "Dropout", "config": {"rate": 0.25, "trainable": true, "name": "dropout_6"}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "uniform", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_3", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "sigmoid", "trainable": true, "kernel_regularizer": null, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 7, "use_bias": true, "activity_regularizer": null}}], "backend": "tensorflow"}
|
app/requirements.txt
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==1.3.0
|
2 |
+
asgiref==3.8.1
|
3 |
+
astunparse==1.6.3
|
4 |
+
autopep8==2.0.0
|
5 |
+
cachetools==5.2.0
|
6 |
+
certifi==2022.9.24
|
7 |
+
charset-normalizer==2.1.1
|
8 |
+
crispy-bootstrap5==0.7
|
9 |
+
Django==5.1.4
|
10 |
+
django-crispy-forms==1.14.0
|
11 |
+
django-extensions==3.2.1
|
12 |
+
djangorestframework==3.15.2
|
13 |
+
docopt==0.6.2
|
14 |
+
flatbuffers==22.10.26
|
15 |
+
gast==0.4.0
|
16 |
+
google-auth==2.14.1
|
17 |
+
google-auth-oauthlib==0.4.6
|
18 |
+
google-pasta==0.2.0
|
19 |
+
grpcio==1.50.0
|
20 |
+
h5py==3.7.0
|
21 |
+
idna==3.4
|
22 |
+
importlib-metadata==5.0.0
|
23 |
+
keras==2.11.0
|
24 |
+
libclang==14.0.6
|
25 |
+
Markdown==3.4.1
|
26 |
+
MarkupSafe==2.1.1
|
27 |
+
numpy==1.23.4
|
28 |
+
oauthlib==3.2.2
|
29 |
+
opencv-python==4.6.0.66
|
30 |
+
opt-einsum==3.3.0
|
31 |
+
packaging==21.3
|
32 |
+
Pillow==9.3.0
|
33 |
+
pipreqs==0.4.11
|
34 |
+
protobuf==3.19.6
|
35 |
+
pyasn1==0.4.8
|
36 |
+
pyasn1-modules==0.2.8
|
37 |
+
pycodestyle==2.9.1
|
38 |
+
pyparsing==3.0.9
|
39 |
+
requests==2.28.1
|
40 |
+
requests-oauthlib==1.3.1
|
41 |
+
rsa==4.9
|
42 |
+
six==1.16.0
|
43 |
+
sqlparse==0.4.3
|
44 |
+
tensorboard==2.11.0
|
45 |
+
tensorboard-data-server==0.6.1
|
46 |
+
tensorboard-plugin-wit==1.8.1
|
47 |
+
tensorflow==2.11.0
|
48 |
+
tensorflow-estimator==2.11.0
|
49 |
+
tensorflow-intel==2.11.0
|
50 |
+
tensorflow-io-gcs-filesystem==0.27.0
|
51 |
+
termcolor==2.1.0
|
52 |
+
tomli==2.0.1
|
53 |
+
typing_extensions==4.4.0
|
54 |
+
tzdata==2022.6
|
55 |
+
urllib3==1.26.12
|
56 |
+
Werkzeug==2.2.2
|
57 |
+
wrapt==1.14.1
|
58 |
+
yarg==0.1.9
|
59 |
+
zipp==3.10.0
|
60 |
+
gunicorn==20.1.0
|
61 |
+
whitenoise==6.4.0
|
app/static/css/main.css
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@100;400;500;700;900&display=swap"');
|
2 |
+
|
3 |
+
*{
|
4 |
+
font-family: 'Inter', sans-serif;
|
5 |
+
|
6 |
+
}
|
app/static/images/logo.svg
ADDED
|
app/static/images/undraw_remotely_2j6y.svg
ADDED
|
app/templates/base.html
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% load static %}
|
2 |
+
{% load i18n %}
|
3 |
+
<!doctype html>
|
4 |
+
<html lang="en">
|
5 |
+
<head>
|
6 |
+
<!-- Required meta tags -->
|
7 |
+
<meta charset="utf-8">
|
8 |
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
9 |
+
<link rel="stylesheet" type="text/css" href="{% static 'css/main.css' %}" />
|
10 |
+
<!-- Bootstrap CSS -->
|
11 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous">
|
12 |
+
|
13 |
+
<title>Welcome!</title>
|
14 |
+
</head>
|
15 |
+
<body>
|
16 |
+
{% include 'header.html'%}
|
17 |
+
{% for message in messages %}
|
18 |
+
{% if forloop.last %}
|
19 |
+
{% if 'success' in message.tags %}
|
20 |
+
<div class="alert alert-primary alert-dissmisible fade show text-center" role="alert">
|
21 |
+
<button type="button" class="btn-close" data-bs-dismiss="alert" aria-hidden="true"></button>
|
22 |
+
{% blocktrans %} {{ message }} {% endblocktrans %}
|
23 |
+
</div>
|
24 |
+
{% elif 'error' in message.tags %}
|
25 |
+
<div class="alert alert-danger alert-general text-center" role="alert">
|
26 |
+
<button type="button" class="btn-close" data-bs-dismiss="alert" aria-hidden="true"></button>
|
27 |
+
{% blocktrans %} {{ message }} {% endblocktrans %}
|
28 |
+
</div>
|
29 |
+
{% endif %}
|
30 |
+
{% endif %}
|
31 |
+
{% endfor %}
|
32 |
+
|
33 |
+
<div class="container-fluid">
|
34 |
+
<div class="row">
|
35 |
+
|
36 |
+
<div class="col-sm p-3 min-vh-100">
|
37 |
+
{% block content %}
|
38 |
+
|
39 |
+
{% endblock %}
|
40 |
+
</div>
|
41 |
+
</div>
|
42 |
+
</div>
|
43 |
+
|
44 |
+
<script src="https://cdn.jsdelivr.net/npm/@popperjs/[email protected]/dist/umd/popper.min.js" integrity="sha384-IQsoLXl5PILFhosVNubq5LC7Qb9DXgDA9i+tQ8Zj3iwWAwPtgFTxbJ8NT4GN1R8p" crossorigin="anonymous"></script>
|
45 |
+
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.min.js" integrity="sha384-cVKIPhGWiC2Al4u+LWgxfKTRIcfu0JTxR+EQDz/bgldoEyl4H0zUF0QKbrJ0EcQF" crossorigin="anonymous"></script>
|
46 |
+
|
47 |
+
</body>
|
48 |
+
</html>
|
app/templates/header.html
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% load static%}
|
2 |
+
|
3 |
+
<div class="container">
|
4 |
+
<header class="d-flex flex-wrap align-items-center justify-content-center justify-content-md-between py-3 mb-4 border-bottom">
|
5 |
+
<a href="{% url 'recognition:index'%}" class="d-flex align-items-center col-md-1 mb-2 mb-md-0 ms-3 text-dark text-decoration-none">
|
6 |
+
<h1 class="h1">Распознавание эмоции</h1>
|
7 |
+
</a>
|
8 |
+
|
9 |
+
{% if request.user.is_authenticated %}
|
10 |
+
<div class="d-flex justify-content-around flex-column">
|
11 |
+
<a href="{% url 'recognition:index'%}" role='button'class="btn btn-outline-dark mb-md-0 ">
|
12 |
+
На изображении
|
13 |
+
</a>
|
14 |
+
|
15 |
+
<a href="{% url 'recognition:real_time'%}" role='button'class="btn btn-outline-dark mt-2 mb-md-0 ">
|
16 |
+
В режиме реального времени
|
17 |
+
</a>
|
18 |
+
</div>
|
19 |
+
{% endif %}
|
20 |
+
</header>
|
21 |
+
</div>
|
22 |
+
|
23 |
+
|