File size: 3,264 Bytes
747ca92
 
 
 
 
 
 
c56710c
747ca92
 
 
 
 
c56710c
 
 
 
 
 
 
 
 
 
 
 
 
0e47c11
 
747ca92
 
 
 
c56710c
 
747ca92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2d10dd
e3cbdd0
747ca92
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
from deepface import DeepFace
import cv2
import mediapipe as mp
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import tempfile

# Initialize MediaPipe Pose for body bounding box detection
mp_pose = mp.solutions.pose

def analyze_images(img1, img2):
    # Option 1: Convert PIL images to NumPy arrays
    img1_array = np.array(img1)
    img2_array = np.array(img2)

    # Option 2: Save images temporarily as JPEG files
    with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file1:
        img1.save(temp_file1, format='JPEG')
        img1_path = temp_file1.name

    with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file2:
        img2.save(temp_file2, format='JPEG')
        img2_path = temp_file2.name

    # Face comparison with DeepFace, enforce_detection set to False
    face_result = DeepFace.verify(img1_path, img2_path, model_name='VGG-Face', enforce_detection=False)
    is_same_person = face_result['verified']
    similarity_score = face_result['distance']

    # Convert images to OpenCV format
    img1_cv = cv2.cvtColor(img1_array, cv2.COLOR_RGB2BGR)
    img2_cv = cv2.cvtColor(img2_array, cv2.COLOR_RGB2BGR)

    # Body analysis with MediaPipe
    def get_body_info(image):
        with mp_pose.Pose(static_image_mode=True) as pose:
            results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
            if results.pose_landmarks:
                landmarks = results.pose_landmarks.landmark
                # Calculate bounding box height and width
                x_values = [landmark.x for landmark in landmarks]
                y_values = [landmark.y for landmark in landmarks]
                width = max(x_values) - min(x_values)
                height = max(y_values) - min(y_values)
                aspect_ratio = height / width if width > 0 else 0
                return aspect_ratio, width, height
            return None, None, None

    aspect_ratio1, width1, height1 = get_body_info(img1_cv)
    aspect_ratio2, width2, height2 = get_body_info(img2_cv)

    # Create a chart to display face and body similarities
    labels = ['Face Similarity', 'Body Aspect Ratio', 'Body Width', 'Body Height']
    values1 = [similarity_score, aspect_ratio1, width1, height1]
    values2 = [similarity_score, aspect_ratio2, width2, height2]

    x = np.arange(len(labels))
    width = 0.35

    fig, ax = plt.subplots()
    ax.bar(x - width/2, values1, width, label='Image 1')
    ax.bar(x + width/2, values2, width, label='Image 2')

    ax.set_ylabel('Scores')
    ax.set_title('Comparison of Face and Body Features')
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend()

    # Save and return the chart
    plt.tight_layout()
    plt_path = "comparison_chart.png"
    plt.savefig(plt_path)
    plt.close(fig)

    return is_same_person, plt_path

# Set up Gradio interface
iface = gr.Interface(
    fn=analyze_images,
    inputs=[gr.Image(type="pil"), gr.Image(type="pil")],
    outputs = [gr.Textbox(label="Same Person?"), gr.Image(label="Comparison Chart")],
    title="Face and Body Similarity Analyzer",
    description="Upload two images to analyze if they contain the same person and compare body features."
)

iface.launch()