Spaces:
Sleeping
Sleeping
Delete app.py
Browse files
app.py
DELETED
@@ -1,267 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import numpy as np
|
3 |
-
import cv2
|
4 |
-
from tensorflow.keras.applications import ResNet50
|
5 |
-
from tensorflow.keras.applications.resnet50 import preprocess_input
|
6 |
-
from tensorflow.keras.preprocessing import image
|
7 |
-
from skimage.metrics import structural_similarity as ssim
|
8 |
-
import os
|
9 |
-
import tempfile
|
10 |
-
from PIL import Image
|
11 |
-
|
12 |
-
class ImageCharacterClassifier:
|
13 |
-
def __init__(self, similarity_threshold=0.5):
|
14 |
-
# Initialize ResNet50 model without top classification layer
|
15 |
-
self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
|
16 |
-
self.similarity_threshold = similarity_threshold
|
17 |
-
|
18 |
-
def load_and_preprocess_image(self, image_path, target_size=(224, 224)):
|
19 |
-
# Load and preprocess image for ResNet50
|
20 |
-
img = image.load_img(image_path, target_size=target_size)
|
21 |
-
img_array = image.img_to_array(img)
|
22 |
-
img_array = np.expand_dims(img_array, axis=0)
|
23 |
-
img_array = preprocess_input(img_array)
|
24 |
-
return img_array
|
25 |
-
|
26 |
-
def extract_features(self, image_path):
|
27 |
-
# Extract deep features using ResNet50
|
28 |
-
preprocessed_img = self.load_and_preprocess_image(image_path)
|
29 |
-
features = self.model.predict(preprocessed_img)
|
30 |
-
return features
|
31 |
-
|
32 |
-
def calculate_ssim(self, img1_path, img2_path):
|
33 |
-
# Calculate SSIM between two images
|
34 |
-
img1 = cv2.imread(img1_path)
|
35 |
-
img2 = cv2.imread(img2_path)
|
36 |
-
|
37 |
-
if img1 is None or img2 is None:
|
38 |
-
return 0.0
|
39 |
-
|
40 |
-
# Convert to grayscale if images are in color
|
41 |
-
if len(img1.shape) == 3:
|
42 |
-
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
|
43 |
-
if len(img2.shape) == 3:
|
44 |
-
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
45 |
-
|
46 |
-
# Resize images to same dimensions
|
47 |
-
img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))
|
48 |
-
|
49 |
-
score = ssim(img1, img2)
|
50 |
-
return score
|
51 |
-
|
52 |
-
def process_images(reference_image, comparison_images, similarity_threshold):
|
53 |
-
try:
|
54 |
-
if reference_image is None:
|
55 |
-
return "Please upload a reference image.", []
|
56 |
-
|
57 |
-
if not comparison_images:
|
58 |
-
return "Please upload comparison images.", []
|
59 |
-
|
60 |
-
# Create temporary directory for saving uploaded files
|
61 |
-
with tempfile.TemporaryDirectory() as temp_dir:
|
62 |
-
# Initialize classifier with the threshold
|
63 |
-
classifier = ImageCharacterClassifier(similarity_threshold=similarity_threshold)
|
64 |
-
|
65 |
-
# Save reference image
|
66 |
-
ref_path = os.path.join(temp_dir, "reference.jpg")
|
67 |
-
cv2.imwrite(ref_path, cv2.cvtColor(reference_image, cv2.COLOR_RGB2BGR))
|
68 |
-
|
69 |
-
results = []
|
70 |
-
html_output = """
|
71 |
-
<div style='text-align: center; margin-bottom: 20px;'>
|
72 |
-
<h2 style='color: #2c3e50;'>Results</h2>
|
73 |
-
<p style='color: #7f8c8d;'>Reference image compared with uploaded images</p>
|
74 |
-
</div>
|
75 |
-
"""
|
76 |
-
|
77 |
-
# Extract reference features once
|
78 |
-
ref_features = classifier.extract_features(ref_path)
|
79 |
-
|
80 |
-
# Process each comparison image
|
81 |
-
for i, comp_image in enumerate(comparison_images):
|
82 |
-
try:
|
83 |
-
# Save comparison image
|
84 |
-
comp_path = os.path.join(temp_dir, f"comparison_{i}.jpg")
|
85 |
-
|
86 |
-
try:
|
87 |
-
# First attempt: Try using PIL
|
88 |
-
with Image.open(comp_image.name) as img:
|
89 |
-
img = img.convert('RGB')
|
90 |
-
img_array = np.array(img)
|
91 |
-
cv2.imwrite(comp_path, cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR))
|
92 |
-
except Exception as e1:
|
93 |
-
print(f"PIL failed: {str(e1)}")
|
94 |
-
# Second attempt: Try using OpenCV directly
|
95 |
-
img = cv2.imread(comp_image.name)
|
96 |
-
if img is not None:
|
97 |
-
cv2.imwrite(comp_path, img)
|
98 |
-
else:
|
99 |
-
raise ValueError(f"Could not read image: {comp_image.name}")
|
100 |
-
|
101 |
-
# Calculate SSIM for structural similarity
|
102 |
-
ssim_score = classifier.calculate_ssim(ref_path, comp_path)
|
103 |
-
|
104 |
-
# Extract features for physical feature comparison
|
105 |
-
comp_features = classifier.extract_features(comp_path)
|
106 |
-
|
107 |
-
# Calculate feature differences for physical features
|
108 |
-
feature_diff = np.abs(ref_features - comp_features)
|
109 |
-
|
110 |
-
# Calculate different aspects of similarity
|
111 |
-
avg_feature_diff = np.mean(feature_diff)
|
112 |
-
max_feature_diff = np.max(feature_diff)
|
113 |
-
feature_similarity = np.dot(ref_features.flatten(),
|
114 |
-
comp_features.flatten()) / (
|
115 |
-
np.linalg.norm(ref_features) * np.linalg.norm(comp_features))
|
116 |
-
|
117 |
-
# Stricter similarity criteria
|
118 |
-
is_similar = True # Start with assumption of similarity
|
119 |
-
reason = "Images are similar"
|
120 |
-
|
121 |
-
# First check for major physical feature differences (like misplaced eyes)
|
122 |
-
if max_feature_diff > 0.85 or avg_feature_diff > 0.5:
|
123 |
-
is_similar = False
|
124 |
-
reason = "Major physical differences detected (missing or misplaced features)"
|
125 |
-
# Then check for overall structural similarity
|
126 |
-
elif ssim_score < 0.4: # Lowered SSIM threshold
|
127 |
-
is_similar = False
|
128 |
-
reason = "Overall structure is too different"
|
129 |
-
# Finally check for feature similarity
|
130 |
-
elif feature_similarity < 0.5:
|
131 |
-
is_similar = False
|
132 |
-
reason = "Features don't match well enough"
|
133 |
-
|
134 |
-
# Debug information
|
135 |
-
print(f"\nDebug for {os.path.basename(comp_image.name)}:")
|
136 |
-
print(f"SSIM Score: {ssim_score:.3f}")
|
137 |
-
print(f"Max Feature Difference: {max_feature_diff:.3f}")
|
138 |
-
print(f"Average Feature Difference: {avg_feature_diff:.3f}")
|
139 |
-
print(f"Feature Similarity: {feature_similarity:.3f}")
|
140 |
-
|
141 |
-
# Create HTML output with improved styling and reason
|
142 |
-
status_color = "#27ae60" if is_similar else "#c0392b" # Green or Red
|
143 |
-
status_text = "SIMILAR" if is_similar else "NOT SIMILAR"
|
144 |
-
status_icon = "✓" if is_similar else "✗"
|
145 |
-
|
146 |
-
html_output += f"""
|
147 |
-
<div style='
|
148 |
-
margin: 15px 0;
|
149 |
-
padding: 15px;
|
150 |
-
border-radius: 8px;
|
151 |
-
background-color: {status_color}1a;
|
152 |
-
border: 2px solid {status_color};
|
153 |
-
display: flex;
|
154 |
-
align-items: center;
|
155 |
-
justify-content: space-between;
|
156 |
-
'>
|
157 |
-
<div style='display: flex; align-items: center;'>
|
158 |
-
<span style='
|
159 |
-
font-size: 24px;
|
160 |
-
margin-right: 10px;
|
161 |
-
color: {status_color};
|
162 |
-
'>{status_icon}</span>
|
163 |
-
<div>
|
164 |
-
<span style='color: #2c3e50; font-weight: bold; display: block;'>
|
165 |
-
{os.path.basename(comp_image.name)}
|
166 |
-
</span>
|
167 |
-
<span style='color: {status_color}; font-size: 12px;'>
|
168 |
-
{reason}
|
169 |
-
</span>
|
170 |
-
</div>
|
171 |
-
</div>
|
172 |
-
<div style='
|
173 |
-
color: {status_color};
|
174 |
-
font-weight: bold;
|
175 |
-
font-size: 16px;
|
176 |
-
'>{status_text}</div>
|
177 |
-
</div>
|
178 |
-
"""
|
179 |
-
|
180 |
-
# Read the processed image back for display
|
181 |
-
display_img = cv2.imread(comp_path)
|
182 |
-
if display_img is not None:
|
183 |
-
display_img = cv2.cvtColor(display_img, cv2.COLOR_BGR2RGB)
|
184 |
-
results.append(display_img)
|
185 |
-
|
186 |
-
except Exception as e:
|
187 |
-
print(f"Error processing {comp_image.name}: {str(e)}")
|
188 |
-
html_output += f"""
|
189 |
-
<div style='
|
190 |
-
margin: 15px 0;
|
191 |
-
padding: 15px;
|
192 |
-
border-radius: 8px;
|
193 |
-
background-color: #e74c3c1a;
|
194 |
-
border: 2px solid #e74c3c;
|
195 |
-
'>
|
196 |
-
<h3 style='color: #e74c3c; margin: 0;'>
|
197 |
-
Error processing: {os.path.basename(comp_image.name)}
|
198 |
-
</h3>
|
199 |
-
<p style='color: #e74c3c; margin: 5px 0 0 0;'>{str(e)}</p>
|
200 |
-
</div>
|
201 |
-
"""
|
202 |
-
|
203 |
-
return html_output, results
|
204 |
-
|
205 |
-
except Exception as e:
|
206 |
-
print(f"Main error: {str(e)}")
|
207 |
-
return f"""
|
208 |
-
<div style='
|
209 |
-
padding: 15px;
|
210 |
-
border-radius: 8px;
|
211 |
-
background-color: #e74c3c1a;
|
212 |
-
border: 2px solid #e74c3c;
|
213 |
-
'>
|
214 |
-
<h3 style='color: #e74c3c; margin: 0;'>Error</h3>
|
215 |
-
<p style='color: #e74c3c; margin: 5px 0 0 0;'>{str(e)}</p>
|
216 |
-
</div>
|
217 |
-
""", []
|
218 |
-
|
219 |
-
# Update the interface creation
|
220 |
-
def create_interface():
|
221 |
-
with gr.Blocks() as interface:
|
222 |
-
gr.Markdown("# Image Similarity Classifier")
|
223 |
-
gr.Markdown("Upload a reference image and up to 10 comparison images to check similarity.")
|
224 |
-
|
225 |
-
with gr.Row():
|
226 |
-
with gr.Column():
|
227 |
-
reference_input = gr.Image(
|
228 |
-
label="Reference Image",
|
229 |
-
type="numpy",
|
230 |
-
image_mode="RGB"
|
231 |
-
)
|
232 |
-
comparison_input = gr.File(
|
233 |
-
label="Comparison Images (Upload up to 10)",
|
234 |
-
file_count="multiple",
|
235 |
-
file_types=["image"],
|
236 |
-
maximum=10
|
237 |
-
)
|
238 |
-
threshold_slider = gr.Slider(
|
239 |
-
minimum=0.0,
|
240 |
-
maximum=1.0,
|
241 |
-
value=0.5,
|
242 |
-
step=0.05,
|
243 |
-
label="Similarity Threshold"
|
244 |
-
)
|
245 |
-
submit_button = gr.Button("Compare Images", variant="primary")
|
246 |
-
|
247 |
-
with gr.Column():
|
248 |
-
output_html = gr.HTML(label="Results")
|
249 |
-
output_gallery = gr.Gallery(
|
250 |
-
label="Processed Images",
|
251 |
-
columns=5,
|
252 |
-
show_label=True,
|
253 |
-
height="auto"
|
254 |
-
)
|
255 |
-
|
256 |
-
submit_button.click(
|
257 |
-
fn=process_images,
|
258 |
-
inputs=[reference_input, comparison_input, threshold_slider],
|
259 |
-
outputs=[output_html, output_gallery]
|
260 |
-
)
|
261 |
-
|
262 |
-
return interface
|
263 |
-
|
264 |
-
# Launch the app
|
265 |
-
if __name__ == "__main__":
|
266 |
-
interface = create_interface()
|
267 |
-
interface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|