Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,189 +1,274 @@
|
|
1 |
import gradio as gr
|
2 |
-
from PIL import Image
|
3 |
-
import
|
4 |
-
import
|
5 |
-
import
|
6 |
-
|
7 |
-
#
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
max_score = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
|
|
|
|
|
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
suggestions = {
|
81 |
-
"Oval": {
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
},
|
93 |
-
"Round/Square": { # Simplified for demo, ideally separate
|
94 |
-
"hair": [
|
95 |
-
"Add height on top: pompadour, quiff, faux hawk.",
|
96 |
-
"Avoid blunt bobs ending at the chin or very short, round cuts.",
|
97 |
-
"Layers, textured cuts, and off-center parts can soften features.",
|
98 |
-
"For square faces: Softer styles, waves, or curls can balance a strong jaw."
|
99 |
-
],
|
100 |
-
"beard": [
|
101 |
-
"For round faces: Beards that add length to the chin: goatee, soul patch, or a beard that's shorter on the sides and longer at the chin.",
|
102 |
-
"For square faces: Styles that soften the jawline. A circle beard or a well-trimmed full beard that rounds the chin."
|
103 |
-
]
|
104 |
-
},
|
105 |
-
"Long": {
|
106 |
-
"hair": [
|
107 |
-
"Add width: Curls, waves, or layered styles with volume at the sides.",
|
108 |
-
"Avoid excessive height on top.",
|
109 |
-
"Bangs (blunt or side-swept) can shorten the face."
|
110 |
-
],
|
111 |
-
"beard": [
|
112 |
-
"Styles that add width to the face: fuller on the cheeks, like a full beard or mutton chops.",
|
113 |
-
"Avoid long, pointy beards that further elongate the face."
|
114 |
-
]
|
115 |
-
},
|
116 |
-
"Heart": {
|
117 |
-
"hair": [
|
118 |
-
"Add volume at the jawline: chin-length bobs, layered shoulder-length cuts.",
|
119 |
-
"Side-swept bangs or a textured fringe can balance a wider forehead.",
|
120 |
-
"Avoid too much height on top."
|
121 |
-
],
|
122 |
-
"beard": [
|
123 |
-
"Fuller beards that add width to the jawline, like a full beard or a Garibaldi.",
|
124 |
-
"Avoid styles that are too narrow at the chin."
|
125 |
-
]
|
126 |
-
},
|
127 |
-
# Add Diamond, etc.
|
128 |
}
|
129 |
if face_shape in suggestions:
|
130 |
hair_sug = "\n".join([f"- {s}" for s in suggestions[face_shape]["hair"]])
|
131 |
beard_sug = "\n".join([f"- {s}" for s in suggestions[face_shape]["beard"]])
|
132 |
return f"**Haircut Suggestions for {face_shape} Face:**\n{hair_sug}\n\n**Beard Style Suggestions for {face_shape} Face:**\n{beard_sug}"
|
133 |
-
return "Could not determine suggestions for the estimated face shape."
|
|
|
134 |
|
135 |
-
def analyze_face_and_suggest(
|
136 |
-
if
|
137 |
return None, "Please upload a front-facing photo.", ""
|
138 |
|
139 |
-
#
|
140 |
-
img_pil = Image.fromarray(
|
141 |
|
142 |
-
# 1. Detect Face (
|
143 |
-
cropped_face_pil, error_msg =
|
144 |
if error_msg:
|
145 |
return None, error_msg, ""
|
146 |
-
if cropped_face_pil is None:
|
147 |
return None, "Could not detect a face.", ""
|
148 |
|
149 |
-
#
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
if side_image_optional is not None:
|
157 |
-
|
158 |
-
#
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
# 2. Estimate Face Shape (the hard part)
|
163 |
-
# For demo, using a mock function. Replace with actual logic.
|
164 |
-
estimated_shape = estimate_face_shape_mock(cropped_face_pil)
|
165 |
-
|
166 |
-
# 3. Get Suggestions
|
167 |
suggestions_text = get_hairstyle_suggestions(estimated_shape)
|
168 |
|
169 |
return cropped_face_pil, f"Estimated Face Shape: **{estimated_shape}**\n{side_info}", suggestions_text
|
170 |
|
171 |
# --- Gradio Interface ---
|
172 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
173 |
-
gr.Markdown("# ✂️ AI Hairstyle & Beard Suggester 🧔")
|
174 |
gr.Markdown(
|
175 |
-
"Upload a clear, front-facing photo
|
176 |
-
"Optionally, upload a side profile for
|
177 |
-
"\n*Disclaimer: This
|
178 |
)
|
179 |
|
180 |
with gr.Row():
|
181 |
with gr.Column(scale=1):
|
182 |
-
front_image_input = gr.Image(type="numpy", label="Front Face Photo (Required)")
|
183 |
-
side_image_input = gr.Image(type="numpy", label="Side Profile Photo (Optional)")
|
184 |
submit_btn = gr.Button("Get Suggestions", variant="primary")
|
185 |
with gr.Column(scale=2):
|
186 |
-
output_image = gr.Image(label="Detected Face")
|
187 |
output_shape_info = gr.Markdown(label="Face Analysis")
|
188 |
output_suggestions = gr.Markdown(label="Suggestions")
|
189 |
|
@@ -192,18 +277,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
192 |
inputs=[front_image_input, side_image_input],
|
193 |
outputs=[output_image, output_shape_info, output_suggestions]
|
194 |
)
|
|
|
195 |
|
196 |
-
gr.Examples(
|
197 |
-
examples=[
|
198 |
-
# Add paths to example images if you have them in your Space
|
199 |
-
# ["path/to/example_front.jpg", "path/to/example_side.jpg"],
|
200 |
-
# ["another_front.png", None],
|
201 |
-
],
|
202 |
-
inputs=[front_image_input, side_image_input],
|
203 |
-
outputs=[output_image, output_shape_info, output_suggestions],
|
204 |
-
fn=analyze_face_and_suggest,
|
205 |
-
cache_examples=False # or True if your function is deterministic and inputs are fixed
|
206 |
-
)
|
207 |
|
208 |
if __name__ == "__main__":
|
209 |
-
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from PIL import Image, ImageDraw
|
3 |
+
import numpy as np
|
4 |
+
import torch # Required by transformers
|
5 |
+
from transformers import YolosImageProcessor, YolosForObjectDetection
|
6 |
+
import mediapipe as mp
|
7 |
+
import math # For distance calculations
|
8 |
+
|
9 |
+
# --- Model Initialization (Load ONCE when the Space starts) ---
|
10 |
+
# 1. Face Detection Model (YOLOS)
|
11 |
+
print("Loading face detection model...")
|
12 |
+
DETECTION_MODEL_NAME = "hustvl/yolos-tiny" # Smaller model, better for CPU
|
13 |
+
try:
|
14 |
+
face_image_processor = YolosImageProcessor.from_pretrained(DETECTION_MODEL_NAME)
|
15 |
+
face_detection_model = YolosForObjectDetection.from_pretrained(DETECTION_MODEL_NAME)
|
16 |
+
print("Face detection model loaded successfully.")
|
17 |
+
except Exception as e:
|
18 |
+
print(f"Error loading face detection model: {e}")
|
19 |
+
face_image_processor = None
|
20 |
+
face_detection_model = None
|
21 |
+
|
22 |
+
|
23 |
+
# 2. Facial Landmark Model (MediaPipe Face Mesh)
|
24 |
+
print("Initializing MediaPipe Face Mesh...")
|
25 |
+
try:
|
26 |
+
mp_face_mesh = mp.solutions.face_mesh
|
27 |
+
# static_image_mode=True for processing individual images
|
28 |
+
# max_num_faces=1 as we expect one primary face
|
29 |
+
# min_detection_confidence for robustness
|
30 |
+
face_mesh_detector = mp_face_mesh.FaceMesh(
|
31 |
+
static_image_mode=True,
|
32 |
+
max_num_faces=1,
|
33 |
+
refine_landmarks=True, # Get more detailed landmarks (e.g., iris)
|
34 |
+
min_detection_confidence=0.5)
|
35 |
+
print("MediaPipe Face Mesh initialized successfully.")
|
36 |
+
except Exception as e:
|
37 |
+
print(f"Error initializing MediaPipe Face Mesh: {e}")
|
38 |
+
face_mesh_detector = None
|
39 |
+
|
40 |
+
# --- Helper Functions ---
|
41 |
+
|
42 |
+
def detect_face_local(image_pil):
|
43 |
+
if not face_image_processor or not face_detection_model:
|
44 |
+
return None, "Face detection model not loaded."
|
45 |
+
|
46 |
+
inputs = face_image_processor(images=image_pil, return_tensors="pt")
|
47 |
+
outputs = face_detection_model(**inputs)
|
48 |
+
|
49 |
+
# Post-process to get bounding boxes
|
50 |
+
# target_sizes expects a tensor of [height, width]
|
51 |
+
target_sizes = torch.tensor([image_pil.size[::-1]]) # PIL size is (width, height)
|
52 |
+
results = face_image_processor.post_process_object_detection(outputs, threshold=0.7, target_sizes=target_sizes)[0]
|
53 |
+
|
54 |
+
best_box = None
|
55 |
max_score = 0
|
56 |
+
person_label_id = None # YOLOS typically detects 'person' (label 0 in COCO usually)
|
57 |
+
|
58 |
+
# Find the 'person' class ID if model config is available (or assume it if known)
|
59 |
+
# For general YOLOS, 'person' is often label 0 if trained on COCO.
|
60 |
+
# If your model has specific face labels, adjust this.
|
61 |
+
# For hustvl/yolos-tiny, it's trained on COCO, where "person" is label 0.
|
62 |
+
# Check model.config.id2label if needed
|
63 |
+
# person_label_id = face_detection_model.config.label2id.get("person", 0) # More robust way
|
64 |
+
|
65 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
66 |
+
# Assuming 'person' class is the one we want, or if it detects 'face' directly
|
67 |
+
# For YOLOS, it's more likely to detect 'person'. We take the highest score 'person'.
|
68 |
+
# You might need to adjust this if the model has a specific 'face' label
|
69 |
+
if label == 0: # Assuming label 0 is 'person' for COCO-trained YOLOS
|
70 |
+
if score > max_score:
|
71 |
+
max_score = score
|
72 |
+
best_box = box.tolist() # [xmin, ymin, xmax, ymax]
|
73 |
+
|
74 |
+
if best_box:
|
75 |
+
cropped_image = image_pil.crop(best_box)
|
76 |
+
return cropped_image, None # No error message
|
77 |
+
else:
|
78 |
+
return None, "No face/person detected with sufficient confidence."
|
79 |
+
|
80 |
+
def get_landmarks_mediapipe(image_pil):
|
81 |
+
if not face_mesh_detector:
|
82 |
+
return None, "MediaPipe Face Mesh not initialized."
|
83 |
+
|
84 |
+
# MediaPipe expects BGR numpy array
|
85 |
+
image_np = np.array(image_pil.convert('RGB'))
|
86 |
+
image_rgb = image_np[:, :, ::-1].copy() # PIL RGB to CV2 BGR (not strictly needed here as MP handles RGB)
|
87 |
+
# but good practice if using OpenCV functions later
|
88 |
+
image_rgb_mp = np.array(image_pil.convert('RGB')) # MediaPipe prefers RGB
|
89 |
+
|
90 |
+
results = face_mesh_detector.process(image_rgb_mp)
|
91 |
+
|
92 |
+
if results.multi_face_landmarks:
|
93 |
+
return results.multi_face_landmarks[0], None # Return landmarks for the first face
|
94 |
+
else:
|
95 |
+
return None, "Could not detect facial landmarks."
|
96 |
+
|
97 |
+
def _distance(p1, p2):
|
98 |
+
return math.sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2 + (p1.z - p2.z)**2)
|
99 |
+
|
100 |
+
def _distance_2d(p1, p2, img_width, img_height):
|
101 |
+
# Convert normalized coordinates to pixel coordinates for more intuitive ratios
|
102 |
+
x1, y1 = p1.x * img_width, p1.y * img_height
|
103 |
+
x2, y2 = p2.x * img_width, p2.y * img_height
|
104 |
+
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
|
105 |
+
|
106 |
+
|
107 |
+
def estimate_face_shape_from_landmarks(landmarks, img_width, img_height):
|
108 |
+
if not landmarks:
|
109 |
+
return "Unknown"
|
110 |
+
|
111 |
+
# Key landmark indices for MediaPipe Face Mesh (468 landmarks total)
|
112 |
+
# These are approximate and might need fine-tuning or using specific contour points
|
113 |
+
# Forehead: e.g., landmark 10 (top of forehead)
|
114 |
+
# Jaw: e.g., landmarks 172, 397 (jaw points), 152 (chin)
|
115 |
+
# Cheekbones: e.g., landmarks 234, 454 (outer cheekbones) or 116, 345 (zygomatic arch)
|
116 |
+
# Face Width: Widest points, often around cheekbones (e.g. 234 to 454)
|
117 |
+
# Face Height: Top of forehead (e.g. 10) to chin (e.g. 152)
|
118 |
+
|
119 |
+
# Example: Use specific points from standard MediaPipe landmark map
|
120 |
+
# (https://github.com/google/mediapipe/blob/master/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png)
|
121 |
+
|
122 |
+
# Points for measurements (these are just examples, adjust as needed)
|
123 |
+
# These indices are 0-based from the 468 landmarks
|
124 |
+
# Check https://viz.mediapipe.dev/face_ όχιmesh_webgl_demo for interactive map
|
125 |
|
126 |
+
# Face Height: Top of Forehead (10) to Chin (152)
|
127 |
+
p_forehead_top = landmarks.landmark[10]
|
128 |
+
p_chin = landmarks.landmark[152]
|
129 |
+
face_height = _distance_2d(p_forehead_top, p_chin, img_width, img_height)
|
130 |
+
|
131 |
+
# Face Width (approx at cheekbones): Left (234) to Right (454)
|
132 |
+
p_cheek_left = landmarks.landmark[234]
|
133 |
+
p_cheek_right = landmarks.landmark[454]
|
134 |
+
face_width_cheek = _distance_2d(p_cheek_left, p_cheek_right, img_width, img_height)
|
135 |
|
136 |
+
# Forehead Width (approx temples): Left (70) to Right (300) - might be too wide, adjust
|
137 |
+
# Or use points like 54 and 284 for a narrower forehead measure
|
138 |
+
p_forehead_left = landmarks.landmark[54] # More like outer brow
|
139 |
+
p_forehead_right = landmarks.landmark[284] # More like outer brow
|
140 |
+
forehead_width = _distance_2d(p_forehead_left, p_forehead_right, img_width, img_height)
|
141 |
+
|
142 |
+
# Jawline Width (approx): Point near jaw angle left (172) to right (397)
|
143 |
+
# Or closer to chin base: 143 and 372
|
144 |
+
p_jaw_left = landmarks.landmark[132] # Lower jaw points
|
145 |
+
p_jaw_right = landmarks.landmark[361]
|
146 |
+
jaw_width = _distance_2d(p_jaw_left, p_jaw_right, img_width, img_height)
|
147 |
+
|
148 |
+
# Simple Heuristics (these are very basic and need refinement/testing)
|
149 |
+
# Ratios are more reliable than absolute values due to image scale
|
150 |
+
|
151 |
+
if face_height == 0 or face_width_cheek == 0: return "Unknown (measurement error)"
|
152 |
+
|
153 |
+
ratio_h_w = face_height / face_width_cheek
|
154 |
+
|
155 |
+
# Print measurements for debugging
|
156 |
+
print(f"H: {face_height:.2f}, W_Cheek: {face_width_cheek:.2f}, W_Forehead: {forehead_width:.2f}, W_Jaw: {jaw_width:.2f}")
|
157 |
+
print(f"Ratio H/W: {ratio_h_w:.2f}")
|
158 |
+
print(f"Forehead/Cheek: {forehead_width/face_width_cheek if face_width_cheek else 0:.2f}")
|
159 |
+
print(f"Jaw/Cheek: {jaw_width/face_width_cheek if face_width_cheek else 0:.2f}")
|
160 |
+
|
161 |
+
# These rules are very basic and a starting point
|
162 |
+
if ratio_h_w > 1.25: # Significantly longer than wide
|
163 |
+
if forehead_width > jaw_width and jaw_width < face_width_cheek * 0.85 :
|
164 |
+
return "Heart/Inverted Triangle" # Narrow chin
|
165 |
+
return "Long/Oblong"
|
166 |
+
elif ratio_h_w < 0.95: # Wider than tall or close to it
|
167 |
+
return "Round/Square (Wide)" # Need more to differentiate round vs square (jaw angle)
|
168 |
+
else: # Height and width are somewhat proportional (0.95 to 1.25)
|
169 |
+
# Check relative widths of forehead, cheeks, jaw
|
170 |
+
f_w = forehead_width
|
171 |
+
c_w = face_width_cheek
|
172 |
+
j_w = jaw_width
|
173 |
+
|
174 |
+
if abs(f_w - c_w) < c_w * 0.1 and abs(c_w - j_w) < c_w * 0.1 and abs(f_w - j_w) < f_w * 0.1:
|
175 |
+
# All widths are roughly similar
|
176 |
+
return "Square" # More angular jaw typically
|
177 |
+
elif c_w > f_w and c_w > j_w:
|
178 |
+
return "Diamond" # Widest at cheeks
|
179 |
+
elif f_w > c_w * 0.9 and f_w > j_w and j_w < c_w * 0.9: # Forehead prominent, jaw narrower
|
180 |
+
return "Heart"
|
181 |
+
elif f_w < c_w and j_w < c_w and abs(f_w - j_w) < f_w * 0.15 : # Forehead and jaw narrower than cheeks, but similar to each other
|
182 |
+
return "Oval" # Often considered ideal, balanced
|
183 |
+
else: # Default or fallback
|
184 |
+
return "Oval/Round" # Difficult to distinguish without more rules
|
185 |
+
|
186 |
+
return "Oval (Default)" # Fallback
|
187 |
+
|
188 |
+
|
189 |
+
def get_hairstyle_suggestions(face_shape, gender="neutral"):
|
190 |
+
# (Same suggestion dictionary as before - keep it for brevity)
|
191 |
suggestions = {
|
192 |
+
"Oval": {"hair": ["Most hairstyles work well.", "Layers or sleek bob."], "beard": ["Most beard styles.", "Classic full beard."]},
|
193 |
+
"Oval (Default)": {"hair": ["Try versatile styles like layers or a textured crop.", "Side parts can be flattering."], "beard": ["A well-groomed stubble or a short boxed beard often works."]},
|
194 |
+
"Long/Oblong": {"hair": ["Add width: Curls, waves, layers with side volume.", "Bangs can shorten face."], "beard": ["Fuller on cheeks: full beard, mutton chops."]},
|
195 |
+
"Heart": {"hair": ["Add jawline volume: chin-length bobs, layered shoulder cuts.", "Side-swept bangs."], "beard": ["Fuller beards to add jaw width: Garibaldi."]},
|
196 |
+
"Heart/Inverted Triangle": {"hair": ["Add jawline volume: chin-length bobs, layered shoulder cuts.", "Side-swept bangs for forehead."], "beard": ["Fuller beards to add jaw width: Garibaldi, full beard carefully shaped."]},
|
197 |
+
"Square": {"hair": ["Softer styles, waves, curls. Texture to soften angles.", "Avoid sharp, geometric cuts."], "beard": ["Circle beard, rounded full beard."]},
|
198 |
+
"Round/Square (Wide)": {"hair": ["Add height: pompadour, quiff. Layers, off-center parts.", "Avoid blunt bobs at chin."], "beard": ["For round: goatee, beard longer at chin. For square: soften jaw with rounded styles."]},
|
199 |
+
"Diamond": {"hair": ["Soften forehead & jaw: chin bobs, shoulder length with layers.", "Side-swept fringe."], "beard": ["Fuller at chin, possibly some width at jaw but not cheeks: Balbo, shorter full beard."]},
|
200 |
+
"Oval/Round": {"hair": ["Versatile. Add slight height or soft layers.", "Avoid overly round styles if aiming to balance roundness."], "beard": ["Many styles work. A neatly trimmed beard or a Van Dyke can be good."]},
|
201 |
+
"Unknown": {"hair": ["Upload a clearer image for analysis."], "beard": ["Upload a clearer image for analysis."]},
|
202 |
+
"Unknown (measurement error)": {"hair": ["Could not reliably measure face. Try a different pose or lighting."], "beard": ["Could not reliably measure face. Try a different pose or lighting."]},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
}
|
204 |
if face_shape in suggestions:
|
205 |
hair_sug = "\n".join([f"- {s}" for s in suggestions[face_shape]["hair"]])
|
206 |
beard_sug = "\n".join([f"- {s}" for s in suggestions[face_shape]["beard"]])
|
207 |
return f"**Haircut Suggestions for {face_shape} Face:**\n{hair_sug}\n\n**Beard Style Suggestions for {face_shape} Face:**\n{beard_sug}"
|
208 |
+
return f"Could not determine suggestions for the estimated face shape: {face_shape}."
|
209 |
+
|
210 |
|
211 |
+
def analyze_face_and_suggest(front_image_input, side_image_optional):
|
212 |
+
if front_image_input is None:
|
213 |
return None, "Please upload a front-facing photo.", ""
|
214 |
|
215 |
+
# Gradio Image input is a NumPy array
|
216 |
+
img_pil = Image.fromarray(front_image_input).convert("RGB") # Ensure RGB
|
217 |
|
218 |
+
# 1. Detect Face (Local YOLOS model)
|
219 |
+
cropped_face_pil, error_msg = detect_face_local(img_pil)
|
220 |
if error_msg:
|
221 |
return None, error_msg, ""
|
222 |
+
if cropped_face_pil is None:
|
223 |
return None, "Could not detect a face.", ""
|
224 |
|
225 |
+
# 2. Get Facial Landmarks (MediaPipe)
|
226 |
+
landmarks, error_msg_lm = get_landmarks_mediapipe(cropped_face_pil)
|
227 |
+
if error_msg_lm:
|
228 |
+
# If landmarks fail, still show cropped face but indicate no shape analysis
|
229 |
+
return cropped_face_pil, f"Face detected. Error getting landmarks: {error_msg_lm}", "Cannot suggest hairstyles without landmark analysis."
|
230 |
+
|
231 |
+
# For drawing landmarks (optional visualization)
|
232 |
+
# cropped_face_with_landmarks_pil = cropped_face_pil.copy()
|
233 |
+
# draw = ImageDraw.Draw(cropped_face_with_landmarks_pil)
|
234 |
+
# for landmark in landmarks.landmark:
|
235 |
+
# x = int(landmark.x * cropped_face_pil.width)
|
236 |
+
# y = int(landmark.y * cropped_face_pil.height)
|
237 |
+
# draw.ellipse((x-1, y-1, x+1, y+1), fill='red')
|
238 |
+
|
239 |
+
|
240 |
+
# 3. Estimate Face Shape from Landmarks
|
241 |
+
img_w, img_h = cropped_face_pil.size
|
242 |
+
estimated_shape = estimate_face_shape_from_landmarks(landmarks, img_w, img_h)
|
243 |
+
|
244 |
+
# --- Side profile (acknowledgement, no processing yet) ---
|
245 |
+
side_info = "Side profile not uploaded or not yet processed."
|
246 |
if side_image_optional is not None:
|
247 |
+
side_info = "Side profile uploaded (analysis can be enhanced in future versions to refine shape)."
|
248 |
+
# Potentially run detection + landmarks on side_image_optional here too
|
249 |
+
# And combine information for a more robust `estimated_shape`
|
250 |
+
|
251 |
+
# 4. Get Suggestions
|
|
|
|
|
|
|
|
|
|
|
252 |
suggestions_text = get_hairstyle_suggestions(estimated_shape)
|
253 |
|
254 |
return cropped_face_pil, f"Estimated Face Shape: **{estimated_shape}**\n{side_info}", suggestions_text
|
255 |
|
256 |
# --- Gradio Interface ---
|
257 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
258 |
+
gr.Markdown("# ✂️ AI Hairstyle & Beard Suggester (Local Models) 🧔")
|
259 |
gr.Markdown(
|
260 |
+
"Upload a clear, front-facing photo. "
|
261 |
+
"Optionally, upload a side profile (currently not used for analysis but can be added)."
|
262 |
+
"\n*Disclaimer: This app uses local AI models for face detection and landmark-based shape estimation. Suggestions are based on general heuristics and may not be perfect.*"
|
263 |
)
|
264 |
|
265 |
with gr.Row():
|
266 |
with gr.Column(scale=1):
|
267 |
+
front_image_input = gr.Image(type="numpy", label="Front Face Photo (Required)", sources=["upload", "webcam"])
|
268 |
+
side_image_input = gr.Image(type="numpy", label="Side Profile Photo (Optional)", sources=["upload", "webcam"])
|
269 |
submit_btn = gr.Button("Get Suggestions", variant="primary")
|
270 |
with gr.Column(scale=2):
|
271 |
+
output_image = gr.Image(label="Detected Face (or Cropped with Landmarks)")
|
272 |
output_shape_info = gr.Markdown(label="Face Analysis")
|
273 |
output_suggestions = gr.Markdown(label="Suggestions")
|
274 |
|
|
|
277 |
inputs=[front_image_input, side_image_input],
|
278 |
outputs=[output_image, output_shape_info, output_suggestions]
|
279 |
)
|
280 |
+
gr.Markdown("--- \n ### Note on Face Shape Estimation: \n The face shape estimation is based on ratios of distances between facial landmarks. The categories (Oval, Round, Square, etc.) and the rules to classify them are simplified. For more accurate results, a dedicated face shape classification model or more complex geometric analysis would be needed. The landmark points used are: \n - **Height:** Top of Forehead (MP Landmark 10) to Chin (MP 152) \n - **Cheek Width:** Left Cheek (MP 234) to Right Cheek (MP 454) \n - **Forehead Width:** Left Outer Brow (MP 54) to Right Outer Brow (MP 284) \n - **Jaw Width:** Left Lower Jaw (MP 132) to Right Lower Jaw (MP 361)")
|
281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
|
283 |
if __name__ == "__main__":
|
284 |
+
if face_detection_model and face_mesh_detector: # Only launch if models loaded
|
285 |
+
demo.launch()
|
286 |
+
else:
|
287 |
+
print("Gradio app not launched due to model loading errors.")
|