Spaces:
Runtime error
Runtime error
Commit
·
204c205
1
Parent(s):
20fe935
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
from deepface import DeepFace
|
2 |
import gradio as gr
|
3 |
-
from PIL import Image
|
4 |
import cv2
|
5 |
import numpy as np
|
|
|
6 |
import math
|
7 |
|
8 |
FONT_SCALE = 8e-4 # Adjust for larger font size in all images
|
@@ -19,6 +20,7 @@ recognition_model = ["VGG-Face", "Facenet", "OpenFace", "DeepFace", "DeepID", "A
|
|
19 |
facial_recognition_example=[['./images/blackpink.jpg', './images/jennie.jpg'], ['./images/blackpink.jpg', './images/lisa.jpg'],\
|
20 |
['./images/blackpink.jpg', './images/jisoo.jpg'], ['./images/blackpink.jpg', './images/rose.jpg']]
|
21 |
facial_analysis_example=[['./images/jennie.jpg'], ['./images/lisa.jpg'], ['./images/jisoo.jpg'], ['./images/rose.jpg'], ['./images/midu.jpg']]
|
|
|
22 |
|
23 |
def facial_recognition(img1, img2, metric, detection, recognition):
|
24 |
output = "One of the two photos does not have face."
|
@@ -77,6 +79,25 @@ def facial_analysis(img, detection):
|
|
77 |
|
78 |
return Image.fromarray(img)
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
def main():
|
81 |
demo = gr.Blocks()
|
82 |
with demo:
|
@@ -111,9 +132,22 @@ def main():
|
|
111 |
with gr.Row():
|
112 |
Analysis_example_images = gr.Examples(examples=facial_analysis_example,inputs=[Analysis_inputs_image])
|
113 |
analysis_but = gr.Button("Analysis")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
verify_but.click(facial_recognition,inputs=[Recognition_inputs_image1,Recognition_inputs_image2,inputs_metric,inputs_detection,inputs_recognition],\
|
115 |
outputs=[Recognition_outputs_image,Recognition_outputs_text],queue=True)
|
116 |
analysis_but.click(facial_analysis,inputs=[Analysis_inputs_image,inputs_detection],outputs=[Analysis_outputs_image],queue=True)
|
|
|
117 |
|
118 |
demo.launch(debug=True,enable_queue=True,server_name="0.0.0.0")
|
119 |
|
|
|
1 |
from deepface import DeepFace
|
2 |
import gradio as gr
|
3 |
+
from PIL import Image, ImageColor
|
4 |
import cv2
|
5 |
import numpy as np
|
6 |
+
|
7 |
import math
|
8 |
|
9 |
FONT_SCALE = 8e-4 # Adjust for larger font size in all images
|
|
|
20 |
facial_recognition_example=[['./images/blackpink.jpg', './images/jennie.jpg'], ['./images/blackpink.jpg', './images/lisa.jpg'],\
|
21 |
['./images/blackpink.jpg', './images/jisoo.jpg'], ['./images/blackpink.jpg', './images/rose.jpg']]
|
22 |
facial_analysis_example=[['./images/jennie.jpg'], ['./images/lisa.jpg'], ['./images/jisoo.jpg'], ['./images/rose.jpg'], ['./images/midu.jpg']]
|
23 |
+
facial_makeup_example=[['./images/jennie.jpg'], ['./images/lisa.jpg'], ['./images/jisoo.jpg'], ['./images/rose.jpg'], ['./images/midu.jpg']]
|
24 |
|
25 |
def facial_recognition(img1, img2, metric, detection, recognition):
|
26 |
output = "One of the two photos does not have face."
|
|
|
79 |
|
80 |
return Image.fromarray(img)
|
81 |
|
82 |
+
table = {
|
83 |
+
'hair': 17,
|
84 |
+
'upper_lip': 12,
|
85 |
+
'lower_lip': 13
|
86 |
+
}
|
87 |
+
def facial_makeup(img_path,hair_color,lips_color):
|
88 |
+
hair_rgb = ImageColor.getcolor(hair_color, "RGB")
|
89 |
+
lips_rgb = ImageColor.getcolor(lips_color, "RGB")
|
90 |
+
image = cv2.imread(img_path)
|
91 |
+
parsing = evaluate(img_path)
|
92 |
+
parsing = cv2.resize(parsing, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST)
|
93 |
+
parts = [table['hair'], table['upper_lip'], table['lower_lip']]
|
94 |
+
|
95 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
96 |
+
colors = [list(hair_rgb), list(lips_rgb), list(lips_rgb)]
|
97 |
+
for part, color in zip(parts, colors):
|
98 |
+
image = hair(image, parsing, part, color)
|
99 |
+
return Image.fromarray(image)
|
100 |
+
|
101 |
def main():
|
102 |
demo = gr.Blocks()
|
103 |
with demo:
|
|
|
132 |
with gr.Row():
|
133 |
Analysis_example_images = gr.Examples(examples=facial_analysis_example,inputs=[Analysis_inputs_image])
|
134 |
analysis_but = gr.Button("Analysis")
|
135 |
+
|
136 |
+
with gr.TabItem('Facial MakeUp'):
|
137 |
+
with gr.Row():
|
138 |
+
with gr.Column():
|
139 |
+
MakeUp_inputs_image = gr.Image(label='Image',type='filepath',interactive=True)
|
140 |
+
MakeUp_inputs_hair = gr.ColorPicker(label="hair_color")
|
141 |
+
MakeUp_inputs_lips = gr.ColorPicker(label="lips_color")
|
142 |
+
with gr.Column():
|
143 |
+
MakeUp_outputs_image = gr.Image(type="pil", label="Output Image")
|
144 |
+
with gr.Row():
|
145 |
+
MakeUp_example_images = gr.Examples(examples=facial_makeup_example,inputs=[MakeUp_inputs_image])
|
146 |
+
makeup_but = gr.Button("MakeUp")
|
147 |
verify_but.click(facial_recognition,inputs=[Recognition_inputs_image1,Recognition_inputs_image2,inputs_metric,inputs_detection,inputs_recognition],\
|
148 |
outputs=[Recognition_outputs_image,Recognition_outputs_text],queue=True)
|
149 |
analysis_but.click(facial_analysis,inputs=[Analysis_inputs_image,inputs_detection],outputs=[Analysis_outputs_image],queue=True)
|
150 |
+
makeup_but.click(facial_makeup,inputs=[MakeUp_inputs_image,MakeUp_inputs_hair,MakeUp_inputs_lips],outputs=[MakeUp_outputs_image],queue=True)
|
151 |
|
152 |
demo.launch(debug=True,enable_queue=True,server_name="0.0.0.0")
|
153 |
|