paresh95 commited on
Commit
3334bb8
1 Parent(s): e5ce3a7

PS|Added notebooks

Browse files
notebooks/facial_age_gender.ipynb ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import cv2\n",
10
+ "import dlib\n",
11
+ "import os\n",
12
+ "import numpy as np\n",
13
+ "import pandas as pd\n",
14
+ "import matplotlib.pyplot as plt"
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": 2,
20
+ "metadata": {},
21
+ "outputs": [
22
+ {
23
+ "data": {
24
+ "text/plain": [
25
+ "'/Users/pareshar/Personal/Github/Facial-feature-detector'"
26
+ ]
27
+ },
28
+ "execution_count": 2,
29
+ "metadata": {},
30
+ "output_type": "execute_result"
31
+ }
32
+ ],
33
+ "source": [
34
+ "current_dir = os.getcwd()\n",
35
+ "parent_dir = os.path.dirname(current_dir)\n",
36
+ "os.chdir(parent_dir)\n",
37
+ "os.getcwd()"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": 3,
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "# static variables\n",
47
+ "path_to_images = \"data/images_age_gender/\"\n",
48
+ "image_files = os.listdir(path_to_images)\n",
49
+ "face_detector_weights = \"models/face_detection/res10_300x300_ssd_iter_140000.caffemodel\"\n",
50
+ "face_detector_config = \"models/face_detection/deploy.prototxt.txt\"\n",
51
+ "age_weights = \"models/face_age/age_net.caffemodel\"\n",
52
+ "age_config = \"models/face_age/age_deploy.prototxt\"\n",
53
+ "gender_weights = \"models/face_gender/gender_net.caffemodel\"\n",
54
+ "gender_config = \"models/face_gender/gender_deploy.prototxt\"\n",
55
+ "age_list = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']\n",
56
+ "gender_list = ['Male', 'Female']\n",
57
+ "model_mean = (78.4263377603, 87.7689143744, 114.895847746) # taken from paper"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 4,
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": [
66
+ "df = pd.DataFrame(columns=[\"file_name\", \"model\", \"confidence_face_detected\", \"age_range\", \"age_confidence\", \"gender\", \"gender_confidence\"])\n",
67
+ "df_list = []\n",
68
+ "\n",
69
+ "\n",
70
+ "for image_file in image_files:\n",
71
+ " image = cv2.imread(path_to_images + image_file)\n",
72
+ " h, w = image.shape[:2]\n",
73
+ " blob = cv2.dnn.blobFromImage(image=image, scalefactor=1.0, size=(300, 300))\n",
74
+ " \n",
75
+ " face_detector_net = cv2.dnn.readNetFromCaffe(face_detector_config, face_detector_weights)\n",
76
+ " face_detector_net.setInput(blob)\n",
77
+ " face_detections = face_detector_net.forward() \n",
78
+ " age_net = cv2.dnn.readNet(age_weights, age_config)\n",
79
+ " gender_net = cv2.dnn.readNet(gender_weights, gender_config)\n",
80
+ " \n",
81
+ " d = None\n",
82
+ " \n",
83
+ " for i in range(0, face_detections.shape[2]):\n",
84
+ " confidence = face_detections[0, 0, i, 2]\n",
85
+ " if confidence > 0.97:\n",
86
+ " box = face_detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n",
87
+ " (startX, startY, endX, endY) = box.astype(\"int\")\n",
88
+ " face = image[startY:endY, startX:endX]\n",
89
+ " \n",
90
+ " blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227), model_mean, swapRB=False)\n",
91
+ " \n",
92
+ " age_net.setInput(blob)\n",
93
+ " age_preds = age_net.forward()\n",
94
+ " i = age_preds[0].argmax()\n",
95
+ " age = age_list[i]\n",
96
+ " age_confidence_score = age_preds[0][i]\n",
97
+ " \n",
98
+ " gender_net.setInput(blob)\n",
99
+ " gender_preds = gender_net.forward()\n",
100
+ " i = gender_preds[0].argmax()\n",
101
+ " gender = gender_list[i]\n",
102
+ " gender_confidence_score = gender_preds[0][i]\n",
103
+ " \n",
104
+ " # plt.imshow(face)\n",
105
+ " # plt.show() \n",
106
+ " \n",
107
+ " d = {\n",
108
+ " \"file_name\": image_file,\n",
109
+ " \"model\": \"ageNet\",\n",
110
+ " \"confidence_face_detected\": confidence,\n",
111
+ " \"age_range\": age,\n",
112
+ " \"age_confidence\": age_confidence_score,\n",
113
+ " \"gender\": gender,\n",
114
+ " \"gender_confidence\": gender_confidence_score \n",
115
+ " }\n",
116
+ " df_list.append(d)\n",
117
+ " break\n",
118
+ " \n",
119
+ " if d is None or image_file != d[\"file_name\"]:\n",
120
+ " \n",
121
+ " d = {\n",
122
+ " \"file_name\": image_file,\n",
123
+ " \"model\": \"ageNet\",\n",
124
+ " \"confidence_face_detected\": confidence,\n",
125
+ " \"age_range\": \"NA\",\n",
126
+ " \"age_confidence\": \"NA\",\n",
127
+ " \"gender\": \"NA\",\n",
128
+ " \"gender_confidence\": \"NA\" \n",
129
+ " }\n",
130
+ " \n",
131
+ " df_list.append(d)\n",
132
+ " \n",
133
+ "df = pd.concat([df, pd.DataFrame(df_list)], ignore_index=True).round(2)"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": 5,
139
+ "metadata": {},
140
+ "outputs": [
141
+ {
142
+ "data": {
143
+ "text/html": [
144
+ "<div>\n",
145
+ "<style scoped>\n",
146
+ " .dataframe tbody tr th:only-of-type {\n",
147
+ " vertical-align: middle;\n",
148
+ " }\n",
149
+ "\n",
150
+ " .dataframe tbody tr th {\n",
151
+ " vertical-align: top;\n",
152
+ " }\n",
153
+ "\n",
154
+ " .dataframe thead th {\n",
155
+ " text-align: right;\n",
156
+ " }\n",
157
+ "</style>\n",
158
+ "<table border=\"1\" class=\"dataframe\">\n",
159
+ " <thead>\n",
160
+ " <tr style=\"text-align: right;\">\n",
161
+ " <th></th>\n",
162
+ " <th>file_name</th>\n",
163
+ " <th>model</th>\n",
164
+ " <th>confidence_face_detected</th>\n",
165
+ " <th>age_range</th>\n",
166
+ " <th>age_confidence</th>\n",
167
+ " <th>gender</th>\n",
168
+ " <th>gender_confidence</th>\n",
169
+ " </tr>\n",
170
+ " </thead>\n",
171
+ " <tbody>\n",
172
+ " <tr>\n",
173
+ " <th>1</th>\n",
174
+ " <td>22_me.jpg</td>\n",
175
+ " <td>ageNet</td>\n",
176
+ " <td>0.98</td>\n",
177
+ " <td>(25-32)</td>\n",
178
+ " <td>0.67165</td>\n",
179
+ " <td>Male</td>\n",
180
+ " <td>1.0</td>\n",
181
+ " </tr>\n",
182
+ " <tr>\n",
183
+ " <th>3</th>\n",
184
+ " <td>25_32_woman.jpg</td>\n",
185
+ " <td>ageNet</td>\n",
186
+ " <td>1.00</td>\n",
187
+ " <td>(25-32)</td>\n",
188
+ " <td>0.859894</td>\n",
189
+ " <td>Female</td>\n",
190
+ " <td>0.952863</td>\n",
191
+ " </tr>\n",
192
+ " <tr>\n",
193
+ " <th>2</th>\n",
194
+ " <td>38_43_man.jpg</td>\n",
195
+ " <td>ageNet</td>\n",
196
+ " <td>1.00</td>\n",
197
+ " <td>(25-32)</td>\n",
198
+ " <td>0.681306</td>\n",
199
+ " <td>Male</td>\n",
200
+ " <td>0.999431</td>\n",
201
+ " </tr>\n",
202
+ " <tr>\n",
203
+ " <th>8</th>\n",
204
+ " <td>38_43_woman.jpg</td>\n",
205
+ " <td>ageNet</td>\n",
206
+ " <td>0.99</td>\n",
207
+ " <td>(48-53)</td>\n",
208
+ " <td>0.886763</td>\n",
209
+ " <td>Female</td>\n",
210
+ " <td>0.998737</td>\n",
211
+ " </tr>\n",
212
+ " <tr>\n",
213
+ " <th>7</th>\n",
214
+ " <td>4_6_boy.jpg</td>\n",
215
+ " <td>ageNet</td>\n",
216
+ " <td>0.99</td>\n",
217
+ " <td>(4-6)</td>\n",
218
+ " <td>0.639939</td>\n",
219
+ " <td>Male</td>\n",
220
+ " <td>0.999049</td>\n",
221
+ " </tr>\n",
222
+ " <tr>\n",
223
+ " <th>4</th>\n",
224
+ " <td>4_6_girl.jpg</td>\n",
225
+ " <td>ageNet</td>\n",
226
+ " <td>0.99</td>\n",
227
+ " <td>(4-6)</td>\n",
228
+ " <td>0.319971</td>\n",
229
+ " <td>Female</td>\n",
230
+ " <td>0.998801</td>\n",
231
+ " </tr>\n",
232
+ " <tr>\n",
233
+ " <th>6</th>\n",
234
+ " <td>60_100_man.jpg</td>\n",
235
+ " <td>ageNet</td>\n",
236
+ " <td>0.99</td>\n",
237
+ " <td>(4-6)</td>\n",
238
+ " <td>0.548595</td>\n",
239
+ " <td>Male</td>\n",
240
+ " <td>0.999973</td>\n",
241
+ " </tr>\n",
242
+ " <tr>\n",
243
+ " <th>5</th>\n",
244
+ " <td>60_100_woman.jpg</td>\n",
245
+ " <td>ageNet</td>\n",
246
+ " <td>1.00</td>\n",
247
+ " <td>(60-100)</td>\n",
248
+ " <td>0.332936</td>\n",
249
+ " <td>Female</td>\n",
250
+ " <td>0.984078</td>\n",
251
+ " </tr>\n",
252
+ " <tr>\n",
253
+ " <th>9</th>\n",
254
+ " <td>60_100_woman_2.jpg</td>\n",
255
+ " <td>ageNet</td>\n",
256
+ " <td>1.00</td>\n",
257
+ " <td>(38-43)</td>\n",
258
+ " <td>0.414388</td>\n",
259
+ " <td>Male</td>\n",
260
+ " <td>0.518144</td>\n",
261
+ " </tr>\n",
262
+ " <tr>\n",
263
+ " <th>0</th>\n",
264
+ " <td>minion.jpg</td>\n",
265
+ " <td>ageNet</td>\n",
266
+ " <td>0.00</td>\n",
267
+ " <td>NA</td>\n",
268
+ " <td>NA</td>\n",
269
+ " <td>NA</td>\n",
270
+ " <td>NA</td>\n",
271
+ " </tr>\n",
272
+ " </tbody>\n",
273
+ "</table>\n",
274
+ "</div>"
275
+ ],
276
+ "text/plain": [
277
+ " file_name model confidence_face_detected age_range \\\n",
278
+ "1 22_me.jpg ageNet 0.98 (25-32) \n",
279
+ "3 25_32_woman.jpg ageNet 1.00 (25-32) \n",
280
+ "2 38_43_man.jpg ageNet 1.00 (25-32) \n",
281
+ "8 38_43_woman.jpg ageNet 0.99 (48-53) \n",
282
+ "7 4_6_boy.jpg ageNet 0.99 (4-6) \n",
283
+ "4 4_6_girl.jpg ageNet 0.99 (4-6) \n",
284
+ "6 60_100_man.jpg ageNet 0.99 (4-6) \n",
285
+ "5 60_100_woman.jpg ageNet 1.00 (60-100) \n",
286
+ "9 60_100_woman_2.jpg ageNet 1.00 (38-43) \n",
287
+ "0 minion.jpg ageNet 0.00 NA \n",
288
+ "\n",
289
+ " age_confidence gender gender_confidence \n",
290
+ "1 0.67165 Male 1.0 \n",
291
+ "3 0.859894 Female 0.952863 \n",
292
+ "2 0.681306 Male 0.999431 \n",
293
+ "8 0.886763 Female 0.998737 \n",
294
+ "7 0.639939 Male 0.999049 \n",
295
+ "4 0.319971 Female 0.998801 \n",
296
+ "6 0.548595 Male 0.999973 \n",
297
+ "5 0.332936 Female 0.984078 \n",
298
+ "9 0.414388 Male 0.518144 \n",
299
+ "0 NA NA NA "
300
+ ]
301
+ },
302
+ "execution_count": 5,
303
+ "metadata": {},
304
+ "output_type": "execute_result"
305
+ }
306
+ ],
307
+ "source": [
308
+ "df.sort_values(\"file_name\")"
309
+ ]
310
+ },
311
+ {
312
+ "cell_type": "markdown",
313
+ "metadata": {},
314
+ "source": [
315
+ "# Other\n",
316
+ "- Dataset used to train model: https://talhassner.github.io/home/projects/Adience/Adience-data.html#agegender"
317
+ ]
318
+ }
319
+ ],
320
+ "metadata": {
321
+ "kernelspec": {
322
+ "display_name": "Python 3",
323
+ "language": "python",
324
+ "name": "python3"
325
+ },
326
+ "language_info": {
327
+ "codemirror_mode": {
328
+ "name": "ipython",
329
+ "version": 3
330
+ },
331
+ "file_extension": ".py",
332
+ "mimetype": "text/x-python",
333
+ "name": "python",
334
+ "nbconvert_exporter": "python",
335
+ "pygments_lexer": "ipython3",
336
+ "version": "3.8.10"
337
+ },
338
+ "orig_nbformat": 4
339
+ },
340
+ "nbformat": 4,
341
+ "nbformat_minor": 2
342
+ }
notebooks/facial_proportions.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
notebooks/facial_symmetry.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
notebooks/facial_texture.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
notebooks/own-photos-symmetry.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
parameters.yml ADDED
File without changes
requirements.txt CHANGED
@@ -3,4 +3,5 @@ numpy==1.23.5
3
  scikit-image==0.21.0
4
  dlib==19.24.2
5
  imutils==0.5.4
6
- pillow==9.4.0
 
 
3
  scikit-image==0.21.0
4
  dlib==19.24.2
5
  imutils==0.5.4
6
+ pillow==9.4.0
7
+ pyyaml==6.0
utils/face_symmetry.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #TODO: create YAML file to point towards static parameters
2
+ #TODO: Test main output and app
3
+ #TODO: Consider using other method for face detector - this one not as reliable
4
+ #TODO: Text output showing other examples - celeb, child, gender
5
+ #TODO: Move notebooks here
utils/face_texture.py CHANGED
@@ -10,19 +10,12 @@ from utils.cv_utils import get_image
10
  from typing import Tuple
11
 
12
 
13
- #TODO: face texture class - face detector and output face
14
- #TODO: create YAML file to point towards static parameters
15
- #TODO: Test main output and app
16
- #TODO: Consider using other method for face detector - this one not as reliable
17
- #TODO: Text output showing other examples - celeb, child, gender
18
-
19
-
20
  class GetFaceTexture:
21
  def __init__(self) -> None:
22
  pass
23
 
24
  def preprocess_image(self, image) -> np.array:
25
- image = imutils.resize(image, width=800)
26
  gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
27
  return gray_image
28
 
 
10
  from typing import Tuple
11
 
12
 
 
 
 
 
 
 
 
13
  class GetFaceTexture:
14
  def __init__(self) -> None:
15
  pass
16
 
17
  def preprocess_image(self, image) -> np.array:
18
+ image = imutils.resize(image, width=400)
19
  gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
20
  return gray_image
21