Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,251 +1,114 @@
|
|
1 |
import os
|
2 |
-
|
3 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
4 |
-
|
5 |
-
|
6 |
-
import gradio as gr
|
7 |
import numpy as np
|
8 |
import cv2
|
|
|
9 |
from tensorflow.keras.applications import ResNet50
|
10 |
from tensorflow.keras.applications.resnet50 import preprocess_input
|
11 |
from tensorflow.keras.preprocessing import image
|
12 |
from skimage.metrics import structural_similarity as ssim
|
13 |
-
import os
|
14 |
-
import tempfile
|
15 |
from PIL import Image
|
16 |
|
|
|
|
|
|
|
|
|
17 |
class ImageCharacterClassifier:
|
18 |
def __init__(self, similarity_threshold=0.5):
|
19 |
-
# Initialize ResNet50 model without top classification layer
|
20 |
self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
|
21 |
self.similarity_threshold = similarity_threshold
|
22 |
|
23 |
-
def load_and_preprocess_image(self,
|
24 |
-
#
|
25 |
-
img =
|
26 |
-
img_array =
|
|
|
27 |
img_array = np.expand_dims(img_array, axis=0)
|
28 |
img_array = preprocess_input(img_array)
|
29 |
return img_array
|
30 |
|
31 |
-
def extract_features(self,
|
32 |
-
|
33 |
-
preprocessed_img = self.load_and_preprocess_image(image_path)
|
34 |
features = self.model.predict(preprocessed_img)
|
35 |
return features
|
36 |
|
37 |
-
def calculate_ssim(self,
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
if img1 is None or img2 is None:
|
43 |
-
return 0.0
|
44 |
-
|
45 |
-
# Convert to grayscale if images are in color
|
46 |
-
if len(img1.shape) == 3:
|
47 |
-
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
|
48 |
-
if len(img2.shape) == 3:
|
49 |
-
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
50 |
-
|
51 |
-
# Resize images to same dimensions
|
52 |
-
img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))
|
53 |
-
|
54 |
-
score = ssim(img1, img2)
|
55 |
-
return score
|
56 |
|
57 |
def process_images(reference_image, comparison_images, similarity_threshold):
|
58 |
try:
|
59 |
if reference_image is None:
|
60 |
return "Please upload a reference image.", []
|
61 |
-
|
62 |
if not comparison_images:
|
63 |
return "Please upload comparison images.", []
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
cv2.imwrite(comp_path, img)
|
103 |
-
else:
|
104 |
-
raise ValueError(f"Could not read image: {comp_image.name}")
|
105 |
-
|
106 |
-
# Calculate SSIM for structural similarity
|
107 |
-
ssim_score = classifier.calculate_ssim(ref_path, comp_path)
|
108 |
-
|
109 |
-
# Extract features for physical feature comparison
|
110 |
-
comp_features = classifier.extract_features(comp_path)
|
111 |
-
|
112 |
-
# Calculate feature differences
|
113 |
-
feature_diff = np.abs(ref_features - comp_features)
|
114 |
-
max_feature_diff = np.max(feature_diff)
|
115 |
-
|
116 |
-
# Determine similarity based on max feature difference
|
117 |
-
is_similar = max_feature_diff > 6.0
|
118 |
-
|
119 |
-
if is_similar:
|
120 |
-
reason = "Physical features match the reference image"
|
121 |
-
else:
|
122 |
-
reason = "Physical features don't match the reference image"
|
123 |
-
|
124 |
-
# Debug information
|
125 |
-
print(f"\nDebug for {os.path.basename(comp_image.name)}:")
|
126 |
-
print(f"SSIM Score: {ssim_score:.3f}")
|
127 |
-
print(f"Max Feature Difference: {max_feature_diff:.3f}")
|
128 |
-
|
129 |
-
# Create HTML output with improved styling and reason
|
130 |
-
status_color = "#27ae60" if is_similar else "#c0392b" # Green or Red
|
131 |
-
status_text = "SIMILAR" if is_similar else "NOT SIMILAR"
|
132 |
-
status_icon = "✓" if is_similar else "✗"
|
133 |
-
|
134 |
-
html_output += f"""
|
135 |
-
<div style='
|
136 |
-
margin: 15px 0;
|
137 |
-
padding: 15px;
|
138 |
-
border-radius: 8px;
|
139 |
-
background-color: {status_color}1a;
|
140 |
-
border: 2px solid {status_color};
|
141 |
-
display: flex;
|
142 |
-
align-items: center;
|
143 |
-
justify-content: space-between;
|
144 |
-
'>
|
145 |
-
<div style='display: flex; align-items: center;'>
|
146 |
-
<span style='
|
147 |
-
font-size: 24px;
|
148 |
-
margin-right: 10px;
|
149 |
-
color: {status_color};
|
150 |
-
'>{status_icon}</span>
|
151 |
-
<div>
|
152 |
-
<span style='color: #2c3e50; font-weight: bold; display: block;'>
|
153 |
-
{os.path.basename(comp_image.name)}
|
154 |
-
</span>
|
155 |
-
<span style='color: {status_color}; font-size: 12px;'>
|
156 |
-
{reason}
|
157 |
-
</span>
|
158 |
-
</div>
|
159 |
-
</div>
|
160 |
-
<div style='
|
161 |
-
color: {status_color};
|
162 |
-
font-weight: bold;
|
163 |
-
font-size: 16px;
|
164 |
-
'>{status_text}</div>
|
165 |
-
</div>
|
166 |
-
"""
|
167 |
-
|
168 |
-
# Read the processed image back for display
|
169 |
-
display_img = cv2.imread(comp_path)
|
170 |
-
if display_img is not None:
|
171 |
-
display_img = cv2.cvtColor(display_img, cv2.COLOR_BGR2RGB)
|
172 |
-
results.append(display_img)
|
173 |
-
|
174 |
-
except Exception as e:
|
175 |
-
print(f"Error processing {comp_image.name}: {str(e)}")
|
176 |
-
html_output += f"""
|
177 |
-
<div style='
|
178 |
-
margin: 15px 0;
|
179 |
-
padding: 15px;
|
180 |
-
border-radius: 8px;
|
181 |
-
background-color: #e74c3c1a;
|
182 |
-
border: 2px solid #e74c3c;
|
183 |
-
'>
|
184 |
-
<h3 style='color: #e74c3c; margin: 0;'>
|
185 |
-
Error processing: {os.path.basename(comp_image.name)}
|
186 |
-
</h3>
|
187 |
-
<p style='color: #e74c3c; margin: 5px 0 0 0;'>{str(e)}</p>
|
188 |
-
</div>
|
189 |
-
"""
|
190 |
-
|
191 |
-
return html_output, results
|
192 |
-
|
193 |
except Exception as e:
|
194 |
-
|
195 |
-
|
196 |
-
<div style='
|
197 |
-
padding: 15px;
|
198 |
-
border-radius: 8px;
|
199 |
-
background-color: #e74c3c1a;
|
200 |
-
border: 2px solid #e74c3c;
|
201 |
-
'>
|
202 |
-
<h3 style='color: #e74c3c; margin: 0;'>Error</h3>
|
203 |
-
<p style='color: #e74c3c; margin: 5px 0 0 0;'>{str(e)}</p>
|
204 |
-
</div>
|
205 |
-
""", []
|
206 |
-
|
207 |
-
# Update the interface creation
|
208 |
def create_interface():
|
209 |
with gr.Blocks() as interface:
|
210 |
gr.Markdown("# Image Similarity Classifier")
|
211 |
-
gr.Markdown("Upload a reference image and
|
212 |
-
|
213 |
with gr.Row():
|
214 |
with gr.Column():
|
215 |
-
reference_input = gr.Image(
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
comparison_input = gr.File(type="filepath", interactive=True)
|
221 |
-
|
222 |
-
threshold_slider = gr.Slider(
|
223 |
-
minimum=0.0,
|
224 |
-
maximum=1.0,
|
225 |
-
value=0.5,
|
226 |
-
step=0.05,
|
227 |
-
label="Similarity Threshold"
|
228 |
-
)
|
229 |
-
submit_button = gr.Button("Compare Images", variant="primary")
|
230 |
-
|
231 |
with gr.Column():
|
232 |
output_html = gr.HTML(label="Results")
|
233 |
-
output_gallery = gr.Gallery(
|
234 |
-
|
235 |
-
columns=5,
|
236 |
-
show_label=True,
|
237 |
-
height="auto"
|
238 |
-
)
|
239 |
-
|
240 |
submit_button.click(
|
241 |
fn=process_images,
|
242 |
inputs=[reference_input, comparison_input, threshold_slider],
|
243 |
outputs=[output_html, output_gallery]
|
244 |
)
|
245 |
-
|
246 |
return interface
|
247 |
|
248 |
-
# Launch the app
|
249 |
if __name__ == "__main__":
|
250 |
interface = create_interface()
|
251 |
-
interface.launch(share=True)
|
|
|
1 |
import os
|
2 |
+
import tempfile
|
|
|
|
|
|
|
|
|
3 |
import numpy as np
|
4 |
import cv2
|
5 |
+
import gradio as gr
|
6 |
from tensorflow.keras.applications import ResNet50
|
7 |
from tensorflow.keras.applications.resnet50 import preprocess_input
|
8 |
from tensorflow.keras.preprocessing import image
|
9 |
from skimage.metrics import structural_similarity as ssim
|
|
|
|
|
10 |
from PIL import Image
|
11 |
|
12 |
+
# Disable GPU for TensorFlow
|
13 |
+
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
|
14 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
15 |
+
|
16 |
class ImageCharacterClassifier:
|
17 |
def __init__(self, similarity_threshold=0.5):
|
|
|
18 |
self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
|
19 |
self.similarity_threshold = similarity_threshold
|
20 |
|
21 |
+
def load_and_preprocess_image(self, img):
|
22 |
+
# Convert image to array and preprocess it
|
23 |
+
img = img.convert('RGB')
|
24 |
+
img_array = np.array(img)
|
25 |
+
img_array = cv2.resize(img_array, (224, 224)) # Ensure correct size
|
26 |
img_array = np.expand_dims(img_array, axis=0)
|
27 |
img_array = preprocess_input(img_array)
|
28 |
return img_array
|
29 |
|
30 |
+
def extract_features(self, img):
|
31 |
+
preprocessed_img = self.load_and_preprocess_image(img)
|
|
|
32 |
features = self.model.predict(preprocessed_img)
|
33 |
return features
|
34 |
|
35 |
+
def calculate_ssim(self, img1, img2):
|
36 |
+
img1_gray = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
|
37 |
+
img2_gray = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
|
38 |
+
img2_gray = cv2.resize(img2_gray, (img1_gray.shape[1], img1_gray.shape[0]))
|
39 |
+
return ssim(img1_gray, img2_gray)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
def process_images(reference_image, comparison_images, similarity_threshold):
|
42 |
try:
|
43 |
if reference_image is None:
|
44 |
return "Please upload a reference image.", []
|
|
|
45 |
if not comparison_images:
|
46 |
return "Please upload comparison images.", []
|
47 |
+
|
48 |
+
classifier = ImageCharacterClassifier(similarity_threshold)
|
49 |
+
|
50 |
+
# Convert reference image to NumPy array
|
51 |
+
ref_image = Image.fromarray(reference_image)
|
52 |
+
ref_features = classifier.extract_features(ref_image)
|
53 |
+
|
54 |
+
results = []
|
55 |
+
html_output = "<h3>Comparison Results:</h3>"
|
56 |
+
|
57 |
+
for comp_image in comparison_images:
|
58 |
+
try:
|
59 |
+
# Read image file as PIL Image
|
60 |
+
comp_pil = Image.open(comp_image)
|
61 |
+
comp_pil = comp_pil.convert("RGB")
|
62 |
+
|
63 |
+
# Convert to NumPy format for SSIM
|
64 |
+
comp_array = np.array(comp_pil)
|
65 |
+
|
66 |
+
# Calculate SSIM score
|
67 |
+
ssim_score = classifier.calculate_ssim(reference_image, comp_array)
|
68 |
+
|
69 |
+
# Extract features
|
70 |
+
comp_features = classifier.extract_features(comp_pil)
|
71 |
+
max_feature_diff = np.max(np.abs(ref_features - comp_features))
|
72 |
+
is_similar = max_feature_diff < 6.0
|
73 |
+
|
74 |
+
status_text = "SIMILAR" if is_similar else "NOT SIMILAR"
|
75 |
+
status_color = "green" if is_similar else "red"
|
76 |
+
|
77 |
+
html_output += f"<p style='color:{status_color};'>{comp_image.name}: {status_text}</p>"
|
78 |
+
results.append(comp_array)
|
79 |
+
|
80 |
+
except Exception as e:
|
81 |
+
html_output += f"<p style='color:red;'>Error processing {comp_image.name}: {str(e)}</p>"
|
82 |
+
|
83 |
+
return html_output, results
|
84 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
except Exception as e:
|
86 |
+
return f"<p style='color:red;'>Error: {str(e)}</p>", []
|
87 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
def create_interface():
|
89 |
with gr.Blocks() as interface:
|
90 |
gr.Markdown("# Image Similarity Classifier")
|
91 |
+
gr.Markdown("Upload a reference image and multiple comparison images.")
|
92 |
+
|
93 |
with gr.Row():
|
94 |
with gr.Column():
|
95 |
+
reference_input = gr.Image(label="Reference Image", type="numpy")
|
96 |
+
comparison_input = gr.File(label="Comparison Images", type="file", multiple=True)
|
97 |
+
threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.05, label="Similarity Threshold")
|
98 |
+
submit_button = gr.Button("Compare Images")
|
99 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
with gr.Column():
|
101 |
output_html = gr.HTML(label="Results")
|
102 |
+
output_gallery = gr.Gallery(label="Processed Images", columns=3)
|
103 |
+
|
|
|
|
|
|
|
|
|
|
|
104 |
submit_button.click(
|
105 |
fn=process_images,
|
106 |
inputs=[reference_input, comparison_input, threshold_slider],
|
107 |
outputs=[output_html, output_gallery]
|
108 |
)
|
109 |
+
|
110 |
return interface
|
111 |
|
|
|
112 |
if __name__ == "__main__":
|
113 |
interface = create_interface()
|
114 |
+
interface.launch(share=True)
|