paresh95 commited on
Commit
54e742e
β€’
1 Parent(s): 786ec92

PS|Add gender and age modularised

Browse files
app.py CHANGED
@@ -1,12 +1,14 @@
1
  import gradio as gr
2
  from utils.face_texture import GetFaceTexture
3
  from utils.face_symmetry import GetFaceSymmetry
 
4
 
5
 
6
  def combined_fn(input_image):
7
  texture_results = GetFaceTexture().main(input_image)
8
  symmetry_results = GetFaceSymmetry().main(input_image)
9
- return (*texture_results, *symmetry_results)
 
10
 
11
 
12
  iface = gr.Interface(
@@ -15,11 +17,11 @@ iface = gr.Interface(
15
  outputs=[
16
  gr.outputs.Image(type="pil"), # From GetFaceTexture
17
  gr.outputs.Image(type="pil"), # From GetFaceTexture
18
- "text", # From GetFaceTexture
19
  gr.outputs.Image(type="pil"), # From GetFaceSymmetry
20
- "text" # From GetFaceSymmetry
21
- ]
 
22
  )
23
 
24
  iface.launch()
25
-
 
1
  import gradio as gr
2
  from utils.face_texture import GetFaceTexture
3
  from utils.face_symmetry import GetFaceSymmetry
4
+ from utils.face_demographics import GetFaceDemographics
5
 
6
 
7
  def combined_fn(input_image):
8
  texture_results = GetFaceTexture().main(input_image)
9
  symmetry_results = GetFaceSymmetry().main(input_image)
10
+ demographics_results = GetFaceDemographics().main(input_image)
11
+ return (*texture_results, *symmetry_results, demographics_results)
12
 
13
 
14
  iface = gr.Interface(
 
17
  outputs=[
18
  gr.outputs.Image(type="pil"), # From GetFaceTexture
19
  gr.outputs.Image(type="pil"), # From GetFaceTexture
20
+ "text", # From GetFaceTexture
21
  gr.outputs.Image(type="pil"), # From GetFaceSymmetry
22
+ "text", # From GetFaceSymmetry
23
+ "text", # From GetFaceDemographics
24
+ ],
25
  )
26
 
27
  iface.launch()
 
data/{images_symmetry/gigi_hadid.webp β†’ gigi_hadid.webp} RENAMED
File without changes
data/jay_z.jpg ADDED
data/mike_tyson.jpg ADDED
data/rihanna.webp ADDED
parameters.yml CHANGED
@@ -1,3 +1,9 @@
1
  face_detection:
2
- prototxt: "models/face_detection/deploy.prototxt.txt"
3
  model: "models/face_detection/res10_300x300_ssd_iter_140000.caffemodel"
 
 
 
 
 
 
 
1
  face_detection:
2
+ config: "models/face_detection/deploy.prototxt.txt"
3
  model: "models/face_detection/res10_300x300_ssd_iter_140000.caffemodel"
4
+ face_age:
5
+ config: "models/face_age/age_deploy.prototxt"
6
+ model: "models/face_age/age_net.caffemodel"
7
+ face_gender:
8
+ config: "models/face_gender/gender_deploy.prototxt"
9
+ model: "models/face_gender/gender_net.caffemodel"
utils/face_demographics.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import yaml
3
+ import numpy as np
4
+ import os
5
+ from typing import Tuple
6
+ from utils.cv_utils import get_image
7
+
8
+
9
+ with open("parameters.yml", "r") as stream:
10
+ try:
11
+ parameters = yaml.safe_load(stream)
12
+ except yaml.YAMLError as exc:
13
+ print(exc)
14
+
15
+
16
+ class GetFaceDemographics:
17
+ def __init__(self):
18
+ pass
19
+
20
+ @staticmethod
21
+ def get_age(blob) -> Tuple:
22
+ age_net = cv2.dnn.readNet(parameters["face_age"]["config"], parameters["face_age"]["model"])
23
+ age_list = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
24
+ age_net.setInput(blob)
25
+ age_preds = age_net.forward()
26
+ i = age_preds[0].argmax()
27
+ age = age_list[i]
28
+ age_confidence_score = age_preds[0][i]
29
+ return age, age_confidence_score
30
+
31
+ @staticmethod
32
+ def get_gender(blob) -> Tuple:
33
+ gender_net = cv2.dnn.readNet(parameters["face_gender"]["config"], parameters["face_gender"]["model"])
34
+ gender_list = ['Male', 'Female']
35
+ gender_net.setInput(blob)
36
+ gender_preds = gender_net.forward()
37
+ i = gender_preds[0].argmax()
38
+ gender = gender_list[i]
39
+ gender_confidence_score = gender_preds[0][i]
40
+ return gender, gender_confidence_score
41
+
42
+ def main(self, image_input) -> dict:
43
+ image = get_image(image_input)
44
+ model_mean = (78.4263377603, 87.7689143744, 114.895847746) # taken from the model page on Caffe
45
+ blob = cv2.dnn.blobFromImage(image, 1.0, (227, 227), model_mean, swapRB=False)
46
+ age, age_confidence_score = self.get_age(blob)
47
+ gender, gender_confidence_score = self.get_gender(blob)
48
+ d = {
49
+ "age_range": age,
50
+ "age_confidence": age_confidence_score,
51
+ "gender": gender,
52
+ "gender_confidence": gender_confidence_score
53
+ }
54
+ return d
55
+
56
+
57
+ if __name__ == "__main__":
58
+ path_to_images = "data/"
59
+ image_files = os.listdir(path_to_images)
60
+ for image in image_files:
61
+ print(image)
62
+ results = GetFaceDemographics().main(path_to_images + image)
63
+ print(results)
utils/face_symmetry.py CHANGED
@@ -22,13 +22,13 @@ class GetFaceSymmetry:
22
  def get_faces(self, image: np.array) -> np.array:
23
  self.h, self.w = image.shape[:2]
24
  blob = cv2.dnn.blobFromImage(image=image, scalefactor=1.0, size=(300, 300))
25
- net = cv2.dnn.readNetFromCaffe(
26
- parameters["face_detection"]["prototxt"],
27
  parameters["face_detection"]["model"],
28
  )
29
- net.setInput(blob)
30
- detections = net.forward()
31
- return detections
32
 
33
  @staticmethod
34
  def postprocess_face(face: np.array) -> np.array:
@@ -105,15 +105,15 @@ class GetFaceSymmetry:
105
  }
106
  return d
107
 
108
- def main(self, image_input):
109
  image = get_image(image_input)
110
- detections = self.get_faces(image)
111
  lowest_mse = float("inf")
112
  best_face_data, best_left_half, best_right_half = None, None, None
113
- for i in range(0, detections.shape[2]):
114
- confidence = detections[0, 0, i, 2]
115
  if confidence > 0.99:
116
- box = detections[0, 0, i, 3:7] * np.array(
117
  [self.w, self.h, self.w, self.h]
118
  )
119
  (startX, startY, endX, endY) = box.astype("int")
@@ -137,6 +137,6 @@ class GetFaceSymmetry:
137
 
138
 
139
  if __name__ == "__main__":
140
- image_path = "data/images_symmetry/gigi_hadid.webp"
141
  results = GetFaceSymmetry().main(image_path)
142
  print(results)
 
22
  def get_faces(self, image: np.array) -> np.array:
23
  self.h, self.w = image.shape[:2]
24
  blob = cv2.dnn.blobFromImage(image=image, scalefactor=1.0, size=(300, 300))
25
+ face_detector_net = cv2.dnn.readNetFromCaffe(
26
+ parameters["face_detection"]["config"],
27
  parameters["face_detection"]["model"],
28
  )
29
+ face_detector_net.setInput(blob)
30
+ face_detections = face_detector_net.forward()
31
+ return face_detections
32
 
33
  @staticmethod
34
  def postprocess_face(face: np.array) -> np.array:
 
105
  }
106
  return d
107
 
108
+ def main(self, image_input) -> Tuple:
109
  image = get_image(image_input)
110
+ face_detections = self.get_faces(image)
111
  lowest_mse = float("inf")
112
  best_face_data, best_left_half, best_right_half = None, None, None
113
+ for i in range(0, face_detections.shape[2]):
114
+ confidence = face_detections[0, 0, i, 2]
115
  if confidence > 0.99:
116
+ box = face_detections[0, 0, i, 3:7] * np.array(
117
  [self.w, self.h, self.w, self.h]
118
  )
119
  (startX, startY, endX, endY) = box.astype("int")
 
137
 
138
 
139
  if __name__ == "__main__":
140
+ image_path = "data/gigi_hadid.webp"
141
  results = GetFaceSymmetry().main(image_path)
142
  print(results)
utils/face_texture.py CHANGED
@@ -61,5 +61,5 @@ class GetFaceTexture:
61
 
62
 
63
  if __name__ == "__main__":
64
- image_path = "data/images_symmetry/gigi_hadid.webp"
65
  print(GetFaceTexture().main(image_path))
 
61
 
62
 
63
  if __name__ == "__main__":
64
+ image_path = "data/gigi_hadid.webp"
65
  print(GetFaceTexture().main(image_path))