|
import cv2 |
|
from huggingface_hub import hf_hub_download |
|
from ultralytics import YOLO |
|
from supervision import Detections |
|
from PIL import Image |
|
import torch |
|
import numpy as np |
|
import gradio as gr |
|
|
|
def detect_faces(input_img): |
|
|
|
model_path = hf_hub_download( |
|
repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt" |
|
) |
|
|
|
|
|
model = YOLO(model_path) |
|
|
|
|
|
image_cv = cv2.cvtColor(np.array(input_img), cv2.COLOR_RGB2BGR) |
|
|
|
|
|
output = model(input_img) |
|
results = Detections.from_ultralytics(output[0]) |
|
|
|
|
|
drawn_image = draw_rect_with_conf(image_cv, results) |
|
|
|
|
|
drawn_image_pil = Image.fromarray(cv2.cvtColor(drawn_image, cv2.COLOR_BGR2RGB)) |
|
return drawn_image_pil |
|
|
|
def gradio_interface(input_img): |
|
|
|
detected_img = detect_faces(input_img) |
|
return detected_img |
|
|
|
|
|
demo = gr.Interface(fn=gradio_interface, inputs=gr.Image(type="pil"), outputs="image") |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|