File size: 3,232 Bytes
ea5450c 747c9e3 ea5450c ba54dd8 b847fc0 ea5450c a9f00ec ea5450c 942cb2a ea5450c b847fc0 ea5450c ba54dd8 ed5b611 ea5450c 2a54b5d ea5450c 0babc62 ea5450c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import json
import os
import gradio as gr
from numpy.linalg import norm
from gradio_client import Client
os.environ['CUDA_LAUNCH_BLOCKING'] = '0'
G = ['Female', 'Male']
token = os.environ['token']
client = Client("https://vtechai-face-demo.hf.space/", token)
def analyze(im, conf=0.65, area="Asia", f_age=False, f_gender=False, f_emotion=False):
im_path, jj = client.predict(im, conf, area, f_age, f_gender, f_emotion, fn_index=1)
with open(jj) as f:
jss = json.load(f)
return im_path, jss
def face_search(im1,im2, threshold):
im3, im4, te = client.predict(im1, im2, threshold, fn_index=3)
return im3, im4, te
with gr.Blocks() as face_compare:
with gr.Row():
im1 = gr.Image(value="example/ronaldo.jpg", label='Register', type='filepath', height=300) #.style(full_width=True, height=300)
im2 = gr.Image(value="example/face2.jpg", label='Image for Search', type='filepath', height=300) #.style(full_width=True, height=300)
with gr.Row():
im3 = gr.Image(label='Output', height=300) #.style(height=300, full_width=True, full_height=True)
im4 = gr.Image(label='Output', height=300) #.style(height=300, full_width=True, full_height=True)
sl = gr.Slider(0.3, 1, step=0.05, value=0.5, label='Face Matching Threshold')
text = gr.Text(label="Output", interactive=False)
with gr.Row():
btn = gr.Button(value="Run")
btn_clean = gr.ClearButton([im1, im2, im3, im4])
btn.click(fn=face_search, inputs=[im1, im2, sl], outputs=[im3, im4, text])
# btn2 = gr.Button(value="Check", link="https://manhduy160396.wixsite.com/vtech")
# example
gr.Examples(
examples=[[
os.path.join(os.path.dirname(__file__), "example/ronaldo.jpg"),
os.path.join(os.path.dirname(__file__), "example/face2.jpg")
]],
inputs=[im1, im2]
)
with gr.Blocks() as face_analyze:
with gr.Row():
im1 = gr.Image(shape=(300, 300), type='filepath', height=300, container=True)
im2 = gr.Image(shape=(300, 300), height=300, container=True)
with gr.Row():
with gr.Column():
area = gr.Radio(["Asia", "Europe/America"], label="Area?", value="Asia")
cb_age = gr.Checkbox(label="Age", value=True)
cb_gender = gr.Checkbox(label="Gender", value=True)
cb_emotion = gr.Checkbox(label="Emotion", value=True)
sl = gr.Slider(0, 1, step=0.05, value=0.65, label='Confidence Threshold')
with gr.Column():
js = gr.JSON(label="json")
with gr.Row():
btn = gr.Button(value="Run")
btn_clean = gr.ClearButton([im1, im2])
btn.click(fn=analyze, inputs=[im1, sl, area, cb_age, cb_gender, cb_emotion], outputs=[im2, js])
# btn2 = gr.Button(value="Check", link="https://manhduy160396.wixsite.com/vtech")
with gr.Blocks() as demo:
gr.Markdown('<h1 style="text-align: center;">V-FaceSDK</h1>')
gr.Markdown("*Chọn chức năng bạn muốn trải nghiệm")
gr.TabbedInterface([face_analyze, face_compare], ["Face Analyze", "Face Compare"])
if __name__ == "__main__":
demo = gr.load("VTechAI/Face_demo", src="spaces", hf_token=token)
demo.launch()
|