Spaces:
Running
Running
app.py create
Browse files
app.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from PIL import Image
|
3 |
+
import requests
|
4 |
+
import io
|
5 |
+
import os
|
6 |
+
|
7 |
+
# --- Configuration ---
|
8 |
+
# If using Hugging Face Inference API for face detection
|
9 |
+
HF_API_TOKEN = os.environ.get("HF_API_TOKEN") # Set this in your Space secrets
|
10 |
+
DETECTION_MODEL_URL = "https://api-inference.huggingface.co/models/facebook/detr-resnet-50" # Example
|
11 |
+
|
12 |
+
# --- Placeholder for Face Shape Classification ---
|
13 |
+
# This is the most challenging part to do with a generic free model.
|
14 |
+
# Option 1: Find a face shape classification model on HF Hub (ideal but rare)
|
15 |
+
# Option 2: Implement landmark detection + rule-based shape classification (complex)
|
16 |
+
# Option 3: Use a very simple heuristic (e.g., aspect ratio of bounding box - very unreliable)
|
17 |
+
# Option 4 (Simplest for demo): Let user *select* their face shape or a mock function
|
18 |
+
|
19 |
+
def detect_face_api(image_pil):
|
20 |
+
if not HF_API_TOKEN:
|
21 |
+
raise gr.Error("Hugging Face API Token not set in Space secrets!")
|
22 |
+
|
23 |
+
# Convert PIL Image to bytes
|
24 |
+
img_byte_arr = io.BytesIO()
|
25 |
+
image_pil.save(img_byte_arr, format='PNG')
|
26 |
+
img_byte_arr = img_byte_arr.getvalue()
|
27 |
+
|
28 |
+
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
|
29 |
+
try:
|
30 |
+
response = requests.post(DETECTION_MODEL_URL, headers=headers, data=img_byte_arr)
|
31 |
+
response.raise_for_status()
|
32 |
+
outputs = response.json()
|
33 |
+
except requests.exceptions.RequestException as e:
|
34 |
+
print(f"API Error: {e}")
|
35 |
+
print(f"Response content: {response.content if 'response' in locals() else 'No response'}")
|
36 |
+
return None, "Error calling detection API."
|
37 |
+
except Exception as e:
|
38 |
+
print(f"Other Error during detection: {e}")
|
39 |
+
return None, f"Error processing image: {str(e)}"
|
40 |
+
|
41 |
+
|
42 |
+
# Process outputs to find 'person' or 'face' bounding box
|
43 |
+
# This depends heavily on the model used.
|
44 |
+
# For detr-resnet-50, it returns a list of objects with 'label', 'score', 'box'
|
45 |
+
best_face_box = None
|
46 |
+
max_score = 0
|
47 |
+
|
48 |
+
# Try to find "face", then "person"
|
49 |
+
for obj_type in ["face", "person"]: # Some models might label "face" directly
|
50 |
+
for obj in outputs:
|
51 |
+
if obj['label'].lower() == obj_type and obj['score'] > max_score:
|
52 |
+
max_score = obj['score']
|
53 |
+
best_face_box = obj['box']
|
54 |
+
|
55 |
+
if best_face_box:
|
56 |
+
# Crop the image to the detected face
|
57 |
+
# box is {xmin, ymin, xmax, ymax}
|
58 |
+
cropped_image = image_pil.crop((best_face_box['xmin'], best_face_box['ymin'], best_face_box['xmax'], best_face_box['ymax']))
|
59 |
+
return cropped_image, None
|
60 |
+
else:
|
61 |
+
return None, "No face detected with sufficient confidence."
|
62 |
+
|
63 |
+
def estimate_face_shape_mock(face_image_pil):
|
64 |
+
"""
|
65 |
+
MOCK FUNCTION: In a real app, this would use a model or complex heuristics.
|
66 |
+
For now, let's pretend it analyzes and returns a shape.
|
67 |
+
You could also add a dropdown for the user to select their perceived face shape
|
68 |
+
if AI classification is too hard to implement initially.
|
69 |
+
"""
|
70 |
+
# Super simple heuristic (aspect ratio) - VERY UNRELIABLE, just for demo
|
71 |
+
width, height = face_image_pil.size
|
72 |
+
aspect_ratio = height / width
|
73 |
+
if aspect_ratio > 1.25: return "Long"
|
74 |
+
elif 0.9 < aspect_ratio < 1.1: return "Round/Square" # Needs more to differentiate
|
75 |
+
else: return "Oval"
|
76 |
+
# In reality, this needs a dedicated model or landmark-based analysis.
|
77 |
+
# return "Oval" # Or just return a default for now
|
78 |
+
|
79 |
+
def get_hairstyle_suggestions(face_shape, gender="neutral"): # Gender could be another input
|
80 |
+
suggestions = {
|
81 |
+
"Oval": {
|
82 |
+
"hair": [
|
83 |
+
"Most hairstyles work well. Lucky you!",
|
84 |
+
"Consider layers for volume or a sleek bob.",
|
85 |
+
"Side-swept bangs can be flattering."
|
86 |
+
],
|
87 |
+
"beard": [
|
88 |
+
"Most beard styles are suitable.",
|
89 |
+
"A classic full beard, well-groomed, is excellent.",
|
90 |
+
"Consider a short boxed beard or a Van Dyke."
|
91 |
+
]
|
92 |
+
},
|
93 |
+
"Round/Square": { # Simplified for demo, ideally separate
|
94 |
+
"hair": [
|
95 |
+
"Add height on top: pompadour, quiff, faux hawk.",
|
96 |
+
"Avoid blunt bobs ending at the chin or very short, round cuts.",
|
97 |
+
"Layers, textured cuts, and off-center parts can soften features.",
|
98 |
+
"For square faces: Softer styles, waves, or curls can balance a strong jaw."
|
99 |
+
],
|
100 |
+
"beard": [
|
101 |
+
"For round faces: Beards that add length to the chin: goatee, soul patch, or a beard that's shorter on the sides and longer at the chin.",
|
102 |
+
"For square faces: Styles that soften the jawline. A circle beard or a well-trimmed full beard that rounds the chin."
|
103 |
+
]
|
104 |
+
},
|
105 |
+
"Long": {
|
106 |
+
"hair": [
|
107 |
+
"Add width: Curls, waves, or layered styles with volume at the sides.",
|
108 |
+
"Avoid excessive height on top.",
|
109 |
+
"Bangs (blunt or side-swept) can shorten the face."
|
110 |
+
],
|
111 |
+
"beard": [
|
112 |
+
"Styles that add width to the face: fuller on the cheeks, like a full beard or mutton chops.",
|
113 |
+
"Avoid long, pointy beards that further elongate the face."
|
114 |
+
]
|
115 |
+
},
|
116 |
+
"Heart": {
|
117 |
+
"hair": [
|
118 |
+
"Add volume at the jawline: chin-length bobs, layered shoulder-length cuts.",
|
119 |
+
"Side-swept bangs or a textured fringe can balance a wider forehead.",
|
120 |
+
"Avoid too much height on top."
|
121 |
+
],
|
122 |
+
"beard": [
|
123 |
+
"Fuller beards that add width to the jawline, like a full beard or a Garibaldi.",
|
124 |
+
"Avoid styles that are too narrow at the chin."
|
125 |
+
]
|
126 |
+
},
|
127 |
+
# Add Diamond, etc.
|
128 |
+
}
|
129 |
+
if face_shape in suggestions:
|
130 |
+
hair_sug = "\n".join([f"- {s}" for s in suggestions[face_shape]["hair"]])
|
131 |
+
beard_sug = "\n".join([f"- {s}" for s in suggestions[face_shape]["beard"]])
|
132 |
+
return f"**Haircut Suggestions for {face_shape} Face:**\n{hair_sug}\n\n**Beard Style Suggestions for {face_shape} Face:**\n{beard_sug}"
|
133 |
+
return "Could not determine suggestions for the estimated face shape."
|
134 |
+
|
135 |
+
def analyze_face_and_suggest(front_image, side_image_optional):
|
136 |
+
if front_image is None:
|
137 |
+
return None, "Please upload a front-facing photo.", ""
|
138 |
+
|
139 |
+
# Convert Gradio Image (numpy array) to PIL Image
|
140 |
+
img_pil = Image.fromarray(front_image)
|
141 |
+
|
142 |
+
# 1. Detect Face (using API or local model)
|
143 |
+
cropped_face_pil, error_msg = detect_face_api(img_pil) # Using API
|
144 |
+
if error_msg:
|
145 |
+
return None, error_msg, ""
|
146 |
+
if cropped_face_pil is None: # Should be caught by error_msg but as a fallback
|
147 |
+
return None, "Could not detect a face.", ""
|
148 |
+
|
149 |
+
# --- Placeholder for processing side_image_optional ---
|
150 |
+
# If side_image_optional is provided, you could:
|
151 |
+
# - Run face detection on it too.
|
152 |
+
# - If using landmarks, try to get landmarks from both.
|
153 |
+
# - Use it to refine the face shape estimation (e.g., confirm jawline).
|
154 |
+
# For now, we'll just acknowledge it.
|
155 |
+
side_info = "Side profile not uploaded."
|
156 |
+
if side_image_optional is not None:
|
157 |
+
side_img_pil = Image.fromarray(side_image_optional)
|
158 |
+
# You could try to detect face/landmarks on side_img_pil here
|
159 |
+
side_info = "Side profile uploaded (analysis can be enhanced in future versions)."
|
160 |
+
# For a more advanced system, you'd combine info from front and side.
|
161 |
+
|
162 |
+
# 2. Estimate Face Shape (the hard part)
|
163 |
+
# For demo, using a mock function. Replace with actual logic.
|
164 |
+
estimated_shape = estimate_face_shape_mock(cropped_face_pil)
|
165 |
+
|
166 |
+
# 3. Get Suggestions
|
167 |
+
suggestions_text = get_hairstyle_suggestions(estimated_shape)
|
168 |
+
|
169 |
+
return cropped_face_pil, f"Estimated Face Shape: **{estimated_shape}**\n{side_info}", suggestions_text
|
170 |
+
|
171 |
+
# --- Gradio Interface ---
|
172 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
173 |
+
gr.Markdown("# ✂️ AI Hairstyle & Beard Suggester 🧔")
|
174 |
+
gr.Markdown(
|
175 |
+
"Upload a clear, front-facing photo of a person. "
|
176 |
+
"Optionally, upload a side profile for potentially better results (future enhancement)."
|
177 |
+
"\n*Disclaimer: This is a demo using simplified logic for face shape estimation. Suggestions are general.*"
|
178 |
+
)
|
179 |
+
|
180 |
+
with gr.Row():
|
181 |
+
with gr.Column(scale=1):
|
182 |
+
front_image_input = gr.Image(type="numpy", label="Front Face Photo (Required)")
|
183 |
+
side_image_input = gr.Image(type="numpy", label="Side Profile Photo (Optional)")
|
184 |
+
submit_btn = gr.Button("Get Suggestions", variant="primary")
|
185 |
+
with gr.Column(scale=2):
|
186 |
+
output_image = gr.Image(label="Detected Face")
|
187 |
+
output_shape_info = gr.Markdown(label="Face Analysis")
|
188 |
+
output_suggestions = gr.Markdown(label="Suggestions")
|
189 |
+
|
190 |
+
submit_btn.click(
|
191 |
+
analyze_face_and_suggest,
|
192 |
+
inputs=[front_image_input, side_image_input],
|
193 |
+
outputs=[output_image, output_shape_info, output_suggestions]
|
194 |
+
)
|
195 |
+
|
196 |
+
gr.Examples(
|
197 |
+
examples=[
|
198 |
+
# Add paths to example images if you have them in your Space
|
199 |
+
# ["path/to/example_front.jpg", "path/to/example_side.jpg"],
|
200 |
+
# ["another_front.png", None],
|
201 |
+
],
|
202 |
+
inputs=[front_image_input, side_image_input],
|
203 |
+
outputs=[output_image, output_shape_info, output_suggestions],
|
204 |
+
fn=analyze_face_and_suggest,
|
205 |
+
cache_examples=False # or True if your function is deterministic and inputs are fixed
|
206 |
+
)
|
207 |
+
|
208 |
+
if __name__ == "__main__":
|
209 |
+
demo.launch()
|