ElenaRyumina
commited on
Commit
β’
b404794
1
Parent(s):
c0f6432
Update
Browse files- app.css +4 -0
- app/app_utils.py +7 -7
- app/model.py +2 -2
- config.toml +1 -0
- result.mp4 +0 -0
app.css
CHANGED
@@ -36,6 +36,10 @@ div.stat {
|
|
36 |
height: 286px;
|
37 |
}
|
38 |
|
|
|
|
|
|
|
|
|
39 |
.submit {
|
40 |
display: inline-block;
|
41 |
padding: 10px 20px;
|
|
|
36 |
height: 286px;
|
37 |
}
|
38 |
|
39 |
+
div.settings-wrapper {
|
40 |
+
display: none;
|
41 |
+
}
|
42 |
+
|
43 |
.submit {
|
44 |
display: inline-block;
|
45 |
padding: 10px 20px;
|
app/app_utils.py
CHANGED
@@ -14,7 +14,7 @@ import cv2
|
|
14 |
# Importing necessary components for the Gradio app
|
15 |
from app.model import pth_model_static, pth_model_dynamic, pth_processing
|
16 |
from app.face_utils import get_box, display_info
|
17 |
-
from app.config import DICT_EMO
|
18 |
from app.plot import statistics_plot
|
19 |
|
20 |
|
@@ -43,9 +43,9 @@ def preprocess_image_and_predict(inp):
|
|
43 |
for fl in results.multi_face_landmarks:
|
44 |
startX, startY, endX, endY = get_box(fl, w, h)
|
45 |
cur_face = inp[startY:endY, startX:endX]
|
46 |
-
cur_face_n = pth_processing(Image.fromarray(cur_face))
|
47 |
prediction = (
|
48 |
-
torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1)
|
49 |
.detach()
|
50 |
.numpy()[0]
|
51 |
)
|
@@ -92,8 +92,8 @@ def preprocess_video_and_predict(video):
|
|
92 |
cur_face = frame_copy[startY:endY, startX: endX]
|
93 |
|
94 |
if (count_frame-1)%5 == 0:
|
95 |
-
cur_face_copy = pth_processing(Image.fromarray(cur_face))
|
96 |
-
features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
|
97 |
|
98 |
if len(lstm_features) == 0:
|
99 |
lstm_features = [features]*10
|
@@ -101,8 +101,8 @@ def preprocess_video_and_predict(video):
|
|
101 |
lstm_features = lstm_features[1:] + [features]
|
102 |
|
103 |
lstm_f = torch.from_numpy(np.vstack(lstm_features))
|
104 |
-
lstm_f = torch.unsqueeze(lstm_f, 0)
|
105 |
-
output = pth_model_dynamic(lstm_f).detach().numpy()
|
106 |
last_output = output
|
107 |
else:
|
108 |
if last_output is not None:
|
|
|
14 |
# Importing necessary components for the Gradio app
|
15 |
from app.model import pth_model_static, pth_model_dynamic, pth_processing
|
16 |
from app.face_utils import get_box, display_info
|
17 |
+
from app.config import DICT_EMO, config_data
|
18 |
from app.plot import statistics_plot
|
19 |
|
20 |
|
|
|
43 |
for fl in results.multi_face_landmarks:
|
44 |
startX, startY, endX, endY = get_box(fl, w, h)
|
45 |
cur_face = inp[startY:endY, startX:endX]
|
46 |
+
cur_face_n = pth_processing(Image.fromarray(cur_face)).to(config_data.DEVICE)
|
47 |
prediction = (
|
48 |
+
torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1).cpu()
|
49 |
.detach()
|
50 |
.numpy()[0]
|
51 |
)
|
|
|
92 |
cur_face = frame_copy[startY:endY, startX: endX]
|
93 |
|
94 |
if (count_frame-1)%5 == 0:
|
95 |
+
cur_face_copy = pth_processing(Image.fromarray(cur_face)).to(config_data.DEVICE)
|
96 |
+
features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).cpu().detach().numpy()
|
97 |
|
98 |
if len(lstm_features) == 0:
|
99 |
lstm_features = [features]*10
|
|
|
101 |
lstm_features = lstm_features[1:] + [features]
|
102 |
|
103 |
lstm_f = torch.from_numpy(np.vstack(lstm_features))
|
104 |
+
lstm_f = torch.unsqueeze(lstm_f, 0).to(config_data.DEVICE)
|
105 |
+
output = pth_model_dynamic(lstm_f).cpu().detach().numpy()
|
106 |
last_output = output
|
107 |
else:
|
108 |
if last_output is not None:
|
app/model.py
CHANGED
@@ -27,9 +27,9 @@ def load_model(model_url, model_path):
|
|
27 |
return None
|
28 |
|
29 |
|
30 |
-
pth_model_static = load_model(config_data.model_static_url, config_data.model_static_path)
|
31 |
|
32 |
-
pth_model_dynamic = load_model(config_data.model_dynamic_url, config_data.model_dynamic_path)
|
33 |
|
34 |
|
35 |
|
|
|
27 |
return None
|
28 |
|
29 |
|
30 |
+
pth_model_static = load_model(config_data.model_static_url, config_data.model_static_path).to(config_data.DEVICE)
|
31 |
|
32 |
+
pth_model_dynamic = load_model(config_data.model_dynamic_url, config_data.model_dynamic_path).to(config_data.DEVICE)
|
33 |
|
34 |
|
35 |
|
config.toml
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
APP_VERSION = "0.2.0"
|
|
|
2 |
|
3 |
[model_static]
|
4 |
url = "https://huggingface.co/ElenaRyumina/face_emotion_recognition/resolve/main/FER_static_ResNet50_AffectNet.pth"
|
|
|
1 |
APP_VERSION = "0.2.0"
|
2 |
+
DEVICE = "cpu"
|
3 |
|
4 |
[model_static]
|
5 |
url = "https://huggingface.co/ElenaRyumina/face_emotion_recognition/resolve/main/FER_static_ResNet50_AffectNet.pth"
|
result.mp4
CHANGED
Binary files a/result.mp4 and b/result.mp4 differ
|
|