PersonMatch / app.py
szili2011's picture
Update app.py
0e47c11 verified
import gradio as gr
from deepface import DeepFace
import cv2
import mediapipe as mp
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import tempfile
# Initialize MediaPipe Pose for body bounding box detection
mp_pose = mp.solutions.pose
def analyze_images(img1, img2):
# Option 1: Convert PIL images to NumPy arrays
img1_array = np.array(img1)
img2_array = np.array(img2)
# Option 2: Save images temporarily as JPEG files
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file1:
img1.save(temp_file1, format='JPEG')
img1_path = temp_file1.name
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file2:
img2.save(temp_file2, format='JPEG')
img2_path = temp_file2.name
# Face comparison with DeepFace, enforce_detection set to False
face_result = DeepFace.verify(img1_path, img2_path, model_name='VGG-Face', enforce_detection=False)
is_same_person = face_result['verified']
similarity_score = face_result['distance']
# Convert images to OpenCV format
img1_cv = cv2.cvtColor(img1_array, cv2.COLOR_RGB2BGR)
img2_cv = cv2.cvtColor(img2_array, cv2.COLOR_RGB2BGR)
# Body analysis with MediaPipe
def get_body_info(image):
with mp_pose.Pose(static_image_mode=True) as pose:
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if results.pose_landmarks:
landmarks = results.pose_landmarks.landmark
# Calculate bounding box height and width
x_values = [landmark.x for landmark in landmarks]
y_values = [landmark.y for landmark in landmarks]
width = max(x_values) - min(x_values)
height = max(y_values) - min(y_values)
aspect_ratio = height / width if width > 0 else 0
return aspect_ratio, width, height
return None, None, None
aspect_ratio1, width1, height1 = get_body_info(img1_cv)
aspect_ratio2, width2, height2 = get_body_info(img2_cv)
# Create a chart to display face and body similarities
labels = ['Face Similarity', 'Body Aspect Ratio', 'Body Width', 'Body Height']
values1 = [similarity_score, aspect_ratio1, width1, height1]
values2 = [similarity_score, aspect_ratio2, width2, height2]
x = np.arange(len(labels))
width = 0.35
fig, ax = plt.subplots()
ax.bar(x - width/2, values1, width, label='Image 1')
ax.bar(x + width/2, values2, width, label='Image 2')
ax.set_ylabel('Scores')
ax.set_title('Comparison of Face and Body Features')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
# Save and return the chart
plt.tight_layout()
plt_path = "comparison_chart.png"
plt.savefig(plt_path)
plt.close(fig)
return is_same_person, plt_path
# Set up Gradio interface
iface = gr.Interface(
fn=analyze_images,
inputs=[gr.Image(type="pil"), gr.Image(type="pil")],
outputs = [gr.Textbox(label="Same Person?"), gr.Image(label="Comparison Chart")],
title="Face and Body Similarity Analyzer",
description="Upload two images to analyze if they contain the same person and compare body features."
)
iface.launch()