Spaces:
Runtime error
Runtime error
File size: 5,354 Bytes
8da06e3 7d3a73f 589a4f1 8da06e3 e036234 bd3a391 ab4f2c1 0ed78ff bd3a391 ab4f2c1 8da06e3 0ed78ff 589a4f1 7d3a73f 589a4f1 7d3a73f 589a4f1 7d3a73f 589a4f1 7d3a73f 589a4f1 7d3a73f 589a4f1 7d3a73f 589a4f1 7d3a73f 7beb285 7d3a73f 589a4f1 7d3a73f 8da06e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import gradio as gr
from deepface import DeepFace
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont
import time
import pandas as pd
from operator import itemgetter
import os
def get_named_people():
named_people = next(os.walk('db'))[1]
return named_people
dbackends = [
['Haar Cascade (OpenCV)','opencv'],
#['π Single Shot MultiBox Detector (OpenCV)','ssd'], # for whatever reason fails
#['Histogram of Oriented Gradients (Dlib)','dlib'], # dlib seems broken on modern ubuntu
['RetinaFace','retinaface'],
['You Only Look Once v8','yolov8'],
['π YuNet','yunet'],
#['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'],
['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
]
dbackendinfo = 'Detectors with π require a color image.'
with gr.Blocks() as demo:
with gr.Tab("Add Named Person"):
input_image = gr.Image(value="8428_26_SM.jpg")
annotated_image = gr.AnnotatedImage()
selected_face_info = gr.Textbox(label="Selected Face Info", value="Click on a face above")
selected_face_pic = gr.Image(label="Selected Face", value="Click on a face above", height=148)
def findFaces(imgfile,dbackend):
start_time = time.time()
print(start_time)
face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
numberoffaces = len(face_objs)
jsontext = ''
global faceannotations
faceannotations = []
for i, face_obj in enumerate(face_objs,1):
face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
face_confidence = "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"])
face_result=[face_coordinates,face_confidence]
faceannotations.append(face_result)
#jsontext=faceannotations
#jsontext=face_objs
run_time = str(round((time.time() - start_time),2))
results = gr.AnnotatedImage(
label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
value=(imgfile, faceannotations)
)
print(run_time)
return(results,numberoffaces,run_time)
dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',info=dbackendinfo,container=True,value='retinaface')
gr.Interface(
allow_flagging = "never",
fn=findFaces,
inputs=[input_image, dbackendchoice],
outputs=[annotated_image,selected_face_info,selected_face_pic],
)
def select_section(evt: gr.SelectData):
cropped_image = np.array(Image.open(input_image.value['path']))
cropped_image = cropped_image[faceannotations[evt.index][0][1]:faceannotations[evt.index][0][3], faceannotations[evt.index][0][0]:faceannotations[evt.index][0][2]]
return faceannotations[evt.index], cropped_image
annotated_image.select(select_section, None, [selected_face_info,selected_face_pic])
with gr.Tab("Find Named Person in All Images"):
with gr.Row():
named_people_dropdown = []
for named_person in get_named_people():
named_people_dropdown.append(named_person.replace("_"," "))
find_list = gr.Dropdown(named_people_dropdown, label="Person", info="Select a Named Person."),
find_button = gr.Button(value="Find this person")
with gr.Tab("Identify People in One Image"):
oneimageannotations = []
def identify_in_one_image(imgfile):
oneimageresults = DeepFace.find(img_path=imgfile, db_path="db")
oneimageresults = pd.concat(oneimageresults)
for i, found_face in oneimageresults.iterrows():
face_coordinates = (found_face["source_x"],found_face["source_y"], (found_face["source_x"] + found_face["source_w"]),(found_face["source_y"] + found_face["source_h"]))
person = found_face["identity"].split("/")[1].replace("_"," ")
face_confidence = "Matched " + person + " {:.0%}".format(found_face["distance"])
face_result=[face_coordinates,face_confidence]
oneimageannotations.append(face_result)
results = gr.AnnotatedImage(
value=(imgfile, oneimageannotations)
)
return results, oneimageannotations
oneimage_input_image = gr.Image()
found_faces=gr.AnnotatedImage()
debug_output = gr.Textbox()
gr.Interface(
allow_flagging = "never",
fn=identify_in_one_image,
inputs=oneimage_input_image,
outputs=[found_faces, debug_output]
)
with gr.Tab("Modify Named Person") as ModifyNamedPersonTab:
def get_named_people_dropdown():
named_people_gallery_imgs = []
named_people_gallery_captions = []
for named_person in get_named_people():
#named_person = named_person.replace("_"," ")
named_people_gallery_imgs.append("NamedPeople/" + named_person.replace("_","") + ".jpg")
named_people_gallery_captions.append(named_person.replace("_"," "))
named_people_gallery_all = list(zip(named_people_gallery_imgs, named_people_gallery_captions))
images = named_people_gallery_all
images.sort(key=itemgetter(1))
return images
named_person_gallery = gr.Gallery(
label="Named People", elem_id="gallery", object_fit="none", columns=9)
ModifyNamedPersonTab.select(get_named_people_dropdown, None, named_person_gallery)
#jsontext = gr.Text(label= "deepface extract_faces results")
demo.launch(show_error=True)
|