developer0hye's picture
Update app.py
36c4050 verified
import gradio as gr
import numpy as np
import onnxruntime
import cv2
# Declare ONNX session as a global variable
MODEL_PATH = "weights/Glint360K_R200_TopoFR_9784.onnx"
session = onnxruntime.InferenceSession(MODEL_PATH)
def pil_to_cv2(pil_image):
# Convert PIL Image to CV2 format
numpy_image = np.array(pil_image)
# Convert RGB to BGR
cv2_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
return cv2_image
def process_image(pil_img):
if pil_img is None:
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8)
else:
# Convert PIL image to CV2
img = pil_to_cv2(pil_img)
img = cv2.resize(img, (112, 112))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, (2, 0, 1))
img = img.astype(np.float32)
img = np.expand_dims(img, axis=0)
img = (img / 255.0 - 0.5) / 0.5
return img
def calculate_similarity(img1, img2):
# Image preprocessing
img1_tensor = process_image(img1)
img2_tensor = process_image(img2)
# Extract features using ONNX model
def get_features(img_tensor):
input_name = session.get_inputs()[0].name
features = session.run(None, {input_name: img_tensor})[0]
return features
# Extract features for each image
feat1 = get_features(img1_tensor)
feat2 = get_features(img2_tensor)
# Normalize features (L2 normalization)
feat1 = feat1 / np.linalg.norm(feat1, axis=1, keepdims=True)
feat2 = feat2 / np.linalg.norm(feat2, axis=1, keepdims=True)
# Calculate cosine similarity
cosine_similarity = np.sum(feat1 * feat2, axis=1).item()
return f"Cosine Similarity: {cosine_similarity:.4f}"
# Create Gradio interface with custom layout
with gr.Blocks() as iface:
gr.Markdown("# Face Recognition with [TopoFR](https://github.com/DanJun6737/TopoFR)")
gr.Markdown("Compare two faces to calculate their cosine similarity.")
with gr.Row():
img1_input = gr.Image(label="Reference Face", type="pil")
img2_input = gr.Image(label="Other Face", type="pil")
with gr.Row():
similarity_output = gr.Text(label="Results")
btn = gr.Button("Compare Faces")
btn.click(
fn=calculate_similarity,
inputs=[img1_input, img2_input],
outputs=similarity_output
)
# Add examples
gr.Examples(
examples=[
["examples/yong1.png", "examples/yong2.png"],
["examples/yong1.png", "examples/yong3.png"],
["examples/yong2.png", "examples/yong3.png"],
["examples/yong1.png", "examples/barboon.jpeg"],
["examples/yong2.png", "examples/barboon.jpeg"],
["examples/yong3.png", "examples/barboon.jpeg"],
],
inputs=[img1_input, img2_input],
label="Example Image Pairs"
)
# Launch the interface
iface.launch()