Spaces:
Running
Running
Upload 4 files
Browse files- gradio_app.py +267 -0
- image_classifier.py +139 -0
- pyrightconfig.json +4 -0
- requirements.txt +8 -0
gradio_app.py
ADDED
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
from tensorflow.keras.applications import ResNet50
|
5 |
+
from tensorflow.keras.applications.resnet50 import preprocess_input
|
6 |
+
from tensorflow.keras.preprocessing import image
|
7 |
+
from skimage.metrics import structural_similarity as ssim
|
8 |
+
import os
|
9 |
+
import tempfile
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
class ImageCharacterClassifier:
|
13 |
+
def __init__(self, similarity_threshold=0.5):
|
14 |
+
# Initialize ResNet50 model without top classification layer
|
15 |
+
self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
|
16 |
+
self.similarity_threshold = similarity_threshold
|
17 |
+
|
18 |
+
def load_and_preprocess_image(self, image_path, target_size=(224, 224)):
|
19 |
+
# Load and preprocess image for ResNet50
|
20 |
+
img = image.load_img(image_path, target_size=target_size)
|
21 |
+
img_array = image.img_to_array(img)
|
22 |
+
img_array = np.expand_dims(img_array, axis=0)
|
23 |
+
img_array = preprocess_input(img_array)
|
24 |
+
return img_array
|
25 |
+
|
26 |
+
def extract_features(self, image_path):
|
27 |
+
# Extract deep features using ResNet50
|
28 |
+
preprocessed_img = self.load_and_preprocess_image(image_path)
|
29 |
+
features = self.model.predict(preprocessed_img)
|
30 |
+
return features
|
31 |
+
|
32 |
+
def calculate_ssim(self, img1_path, img2_path):
|
33 |
+
# Calculate SSIM between two images
|
34 |
+
img1 = cv2.imread(img1_path)
|
35 |
+
img2 = cv2.imread(img2_path)
|
36 |
+
|
37 |
+
if img1 is None or img2 is None:
|
38 |
+
return 0.0
|
39 |
+
|
40 |
+
# Convert to grayscale if images are in color
|
41 |
+
if len(img1.shape) == 3:
|
42 |
+
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
|
43 |
+
if len(img2.shape) == 3:
|
44 |
+
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
45 |
+
|
46 |
+
# Resize images to same dimensions
|
47 |
+
img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))
|
48 |
+
|
49 |
+
score = ssim(img1, img2)
|
50 |
+
return score
|
51 |
+
|
52 |
+
def process_images(reference_image, comparison_images, similarity_threshold):
|
53 |
+
try:
|
54 |
+
if reference_image is None:
|
55 |
+
return "Please upload a reference image.", []
|
56 |
+
|
57 |
+
if not comparison_images:
|
58 |
+
return "Please upload comparison images.", []
|
59 |
+
|
60 |
+
# Create temporary directory for saving uploaded files
|
61 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
62 |
+
# Initialize classifier with the threshold
|
63 |
+
classifier = ImageCharacterClassifier(similarity_threshold=similarity_threshold)
|
64 |
+
|
65 |
+
# Save reference image
|
66 |
+
ref_path = os.path.join(temp_dir, "reference.jpg")
|
67 |
+
cv2.imwrite(ref_path, cv2.cvtColor(reference_image, cv2.COLOR_RGB2BGR))
|
68 |
+
|
69 |
+
results = []
|
70 |
+
html_output = """
|
71 |
+
<div style='text-align: center; margin-bottom: 20px;'>
|
72 |
+
<h2 style='color: #2c3e50;'>Results</h2>
|
73 |
+
<p style='color: #7f8c8d;'>Reference image compared with uploaded images</p>
|
74 |
+
</div>
|
75 |
+
"""
|
76 |
+
|
77 |
+
# Extract reference features once
|
78 |
+
ref_features = classifier.extract_features(ref_path)
|
79 |
+
|
80 |
+
# Process each comparison image
|
81 |
+
for i, comp_image in enumerate(comparison_images):
|
82 |
+
try:
|
83 |
+
# Save comparison image
|
84 |
+
comp_path = os.path.join(temp_dir, f"comparison_{i}.jpg")
|
85 |
+
|
86 |
+
try:
|
87 |
+
# First attempt: Try using PIL
|
88 |
+
with Image.open(comp_image.name) as img:
|
89 |
+
img = img.convert('RGB')
|
90 |
+
img_array = np.array(img)
|
91 |
+
cv2.imwrite(comp_path, cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR))
|
92 |
+
except Exception as e1:
|
93 |
+
print(f"PIL failed: {str(e1)}")
|
94 |
+
# Second attempt: Try using OpenCV directly
|
95 |
+
img = cv2.imread(comp_image.name)
|
96 |
+
if img is not None:
|
97 |
+
cv2.imwrite(comp_path, img)
|
98 |
+
else:
|
99 |
+
raise ValueError(f"Could not read image: {comp_image.name}")
|
100 |
+
|
101 |
+
# Calculate SSIM for structural similarity
|
102 |
+
ssim_score = classifier.calculate_ssim(ref_path, comp_path)
|
103 |
+
|
104 |
+
# Extract features for physical feature comparison
|
105 |
+
comp_features = classifier.extract_features(comp_path)
|
106 |
+
|
107 |
+
# Calculate feature differences for physical features
|
108 |
+
feature_diff = np.abs(ref_features - comp_features)
|
109 |
+
|
110 |
+
# Calculate different aspects of similarity
|
111 |
+
avg_feature_diff = np.mean(feature_diff)
|
112 |
+
max_feature_diff = np.max(feature_diff)
|
113 |
+
feature_similarity = np.dot(ref_features.flatten(),
|
114 |
+
comp_features.flatten()) / (
|
115 |
+
np.linalg.norm(ref_features) * np.linalg.norm(comp_features))
|
116 |
+
|
117 |
+
# Stricter similarity criteria
|
118 |
+
is_similar = True # Start with assumption of similarity
|
119 |
+
reason = "Images are similar"
|
120 |
+
|
121 |
+
# First check for major physical feature differences (like misplaced eyes)
|
122 |
+
if max_feature_diff > 0.85 or avg_feature_diff > 0.5:
|
123 |
+
is_similar = False
|
124 |
+
reason = "Major physical differences detected (missing or misplaced features)"
|
125 |
+
# Then check for overall structural similarity
|
126 |
+
elif ssim_score < 0.4: # Lowered SSIM threshold
|
127 |
+
is_similar = False
|
128 |
+
reason = "Overall structure is too different"
|
129 |
+
# Finally check for feature similarity
|
130 |
+
elif feature_similarity < 0.5:
|
131 |
+
is_similar = False
|
132 |
+
reason = "Features don't match well enough"
|
133 |
+
|
134 |
+
# Debug information
|
135 |
+
print(f"\nDebug for {os.path.basename(comp_image.name)}:")
|
136 |
+
print(f"SSIM Score: {ssim_score:.3f}")
|
137 |
+
print(f"Max Feature Difference: {max_feature_diff:.3f}")
|
138 |
+
print(f"Average Feature Difference: {avg_feature_diff:.3f}")
|
139 |
+
print(f"Feature Similarity: {feature_similarity:.3f}")
|
140 |
+
|
141 |
+
# Create HTML output with improved styling and reason
|
142 |
+
status_color = "#27ae60" if is_similar else "#c0392b" # Green or Red
|
143 |
+
status_text = "SIMILAR" if is_similar else "NOT SIMILAR"
|
144 |
+
status_icon = "✓" if is_similar else "✗"
|
145 |
+
|
146 |
+
html_output += f"""
|
147 |
+
<div style='
|
148 |
+
margin: 15px 0;
|
149 |
+
padding: 15px;
|
150 |
+
border-radius: 8px;
|
151 |
+
background-color: {status_color}1a;
|
152 |
+
border: 2px solid {status_color};
|
153 |
+
display: flex;
|
154 |
+
align-items: center;
|
155 |
+
justify-content: space-between;
|
156 |
+
'>
|
157 |
+
<div style='display: flex; align-items: center;'>
|
158 |
+
<span style='
|
159 |
+
font-size: 24px;
|
160 |
+
margin-right: 10px;
|
161 |
+
color: {status_color};
|
162 |
+
'>{status_icon}</span>
|
163 |
+
<div>
|
164 |
+
<span style='color: #2c3e50; font-weight: bold; display: block;'>
|
165 |
+
{os.path.basename(comp_image.name)}
|
166 |
+
</span>
|
167 |
+
<span style='color: {status_color}; font-size: 12px;'>
|
168 |
+
{reason}
|
169 |
+
</span>
|
170 |
+
</div>
|
171 |
+
</div>
|
172 |
+
<div style='
|
173 |
+
color: {status_color};
|
174 |
+
font-weight: bold;
|
175 |
+
font-size: 16px;
|
176 |
+
'>{status_text}</div>
|
177 |
+
</div>
|
178 |
+
"""
|
179 |
+
|
180 |
+
# Read the processed image back for display
|
181 |
+
display_img = cv2.imread(comp_path)
|
182 |
+
if display_img is not None:
|
183 |
+
display_img = cv2.cvtColor(display_img, cv2.COLOR_BGR2RGB)
|
184 |
+
results.append(display_img)
|
185 |
+
|
186 |
+
except Exception as e:
|
187 |
+
print(f"Error processing {comp_image.name}: {str(e)}")
|
188 |
+
html_output += f"""
|
189 |
+
<div style='
|
190 |
+
margin: 15px 0;
|
191 |
+
padding: 15px;
|
192 |
+
border-radius: 8px;
|
193 |
+
background-color: #e74c3c1a;
|
194 |
+
border: 2px solid #e74c3c;
|
195 |
+
'>
|
196 |
+
<h3 style='color: #e74c3c; margin: 0;'>
|
197 |
+
Error processing: {os.path.basename(comp_image.name)}
|
198 |
+
</h3>
|
199 |
+
<p style='color: #e74c3c; margin: 5px 0 0 0;'>{str(e)}</p>
|
200 |
+
</div>
|
201 |
+
"""
|
202 |
+
|
203 |
+
return html_output, results
|
204 |
+
|
205 |
+
except Exception as e:
|
206 |
+
print(f"Main error: {str(e)}")
|
207 |
+
return f"""
|
208 |
+
<div style='
|
209 |
+
padding: 15px;
|
210 |
+
border-radius: 8px;
|
211 |
+
background-color: #e74c3c1a;
|
212 |
+
border: 2px solid #e74c3c;
|
213 |
+
'>
|
214 |
+
<h3 style='color: #e74c3c; margin: 0;'>Error</h3>
|
215 |
+
<p style='color: #e74c3c; margin: 5px 0 0 0;'>{str(e)}</p>
|
216 |
+
</div>
|
217 |
+
""", []
|
218 |
+
|
219 |
+
# Update the interface creation
|
220 |
+
def create_interface():
|
221 |
+
with gr.Blocks() as interface:
|
222 |
+
gr.Markdown("# Image Similarity Classifier")
|
223 |
+
gr.Markdown("Upload a reference image and up to 10 comparison images to check similarity.")
|
224 |
+
|
225 |
+
with gr.Row():
|
226 |
+
with gr.Column():
|
227 |
+
reference_input = gr.Image(
|
228 |
+
label="Reference Image",
|
229 |
+
type="numpy",
|
230 |
+
image_mode="RGB"
|
231 |
+
)
|
232 |
+
comparison_input = gr.File(
|
233 |
+
label="Comparison Images (Upload up to 10)",
|
234 |
+
file_count="multiple",
|
235 |
+
file_types=["image"],
|
236 |
+
maximum=10
|
237 |
+
)
|
238 |
+
threshold_slider = gr.Slider(
|
239 |
+
minimum=0.0,
|
240 |
+
maximum=1.0,
|
241 |
+
value=0.5,
|
242 |
+
step=0.05,
|
243 |
+
label="Similarity Threshold"
|
244 |
+
)
|
245 |
+
submit_button = gr.Button("Compare Images", variant="primary")
|
246 |
+
|
247 |
+
with gr.Column():
|
248 |
+
output_html = gr.HTML(label="Results")
|
249 |
+
output_gallery = gr.Gallery(
|
250 |
+
label="Processed Images",
|
251 |
+
columns=5,
|
252 |
+
show_label=True,
|
253 |
+
height="auto"
|
254 |
+
)
|
255 |
+
|
256 |
+
submit_button.click(
|
257 |
+
fn=process_images,
|
258 |
+
inputs=[reference_input, comparison_input, threshold_slider],
|
259 |
+
outputs=[output_html, output_gallery]
|
260 |
+
)
|
261 |
+
|
262 |
+
return interface
|
263 |
+
|
264 |
+
# Launch the app
|
265 |
+
if __name__ == "__main__":
|
266 |
+
interface = create_interface()
|
267 |
+
interface.launch(share=True)
|
image_classifier.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
from tensorflow.keras.applications import ResNet50
|
4 |
+
from tensorflow.keras.applications.resnet50 import preprocess_input
|
5 |
+
from tensorflow.keras.preprocessing import image
|
6 |
+
from skimage.metrics import structural_similarity as ssim
|
7 |
+
import os
|
8 |
+
import argparse
|
9 |
+
|
10 |
+
class ImageCharacterClassifier:
|
11 |
+
def __init__(self, similarity_threshold=0.7):
|
12 |
+
# Initialize ResNet50 model without top classification layer
|
13 |
+
self.model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
|
14 |
+
self.similarity_threshold = similarity_threshold
|
15 |
+
|
16 |
+
def load_and_preprocess_image(self, image_path, target_size=(224, 224)):
|
17 |
+
# Load and preprocess image for ResNet50
|
18 |
+
img = image.load_img(image_path, target_size=target_size)
|
19 |
+
img_array = image.img_to_array(img)
|
20 |
+
img_array = np.expand_dims(img_array, axis=0)
|
21 |
+
img_array = preprocess_input(img_array)
|
22 |
+
return img_array
|
23 |
+
|
24 |
+
def extract_features(self, image_path):
|
25 |
+
# Extract deep features using ResNet50
|
26 |
+
preprocessed_img = self.load_and_preprocess_image(image_path)
|
27 |
+
features = self.model.predict(preprocessed_img)
|
28 |
+
return features
|
29 |
+
|
30 |
+
def calculate_ssim(self, img1_path, img2_path):
|
31 |
+
# Calculate SSIM between two images
|
32 |
+
img1 = cv2.imread(img1_path)
|
33 |
+
img2 = cv2.imread(img2_path)
|
34 |
+
|
35 |
+
# Convert to grayscale if images are in color
|
36 |
+
if len(img1.shape) == 3:
|
37 |
+
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
|
38 |
+
if len(img2.shape) == 3:
|
39 |
+
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
|
40 |
+
|
41 |
+
# Resize images to same dimensions
|
42 |
+
img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))
|
43 |
+
|
44 |
+
score = ssim(img1, img2)
|
45 |
+
return score
|
46 |
+
|
47 |
+
def classify_images(self, reference_image_path, image_folder_path):
|
48 |
+
# Extract features from reference image
|
49 |
+
reference_features = self.extract_features(reference_image_path)
|
50 |
+
|
51 |
+
results = []
|
52 |
+
|
53 |
+
# Process each image in the folder
|
54 |
+
for image_name in os.listdir(image_folder_path):
|
55 |
+
if image_name.lower().endswith(('.png', '.jpg', '.jpeg')):
|
56 |
+
image_path = os.path.join(image_folder_path, image_name)
|
57 |
+
|
58 |
+
try:
|
59 |
+
# Calculate SSIM
|
60 |
+
ssim_score = self.calculate_ssim(reference_image_path, image_path)
|
61 |
+
|
62 |
+
# Extract features and calculate similarity
|
63 |
+
image_features = self.extract_features(image_path)
|
64 |
+
|
65 |
+
# Calculate cosine similarity
|
66 |
+
feature_similarity = np.dot(reference_features.flatten(),
|
67 |
+
image_features.flatten()) / (
|
68 |
+
np.linalg.norm(reference_features) * np.linalg.norm(image_features))
|
69 |
+
|
70 |
+
# Give more weight to feature similarity
|
71 |
+
combined_similarity = (0.3 * ssim_score + 0.7 * feature_similarity)
|
72 |
+
|
73 |
+
# Classify based on similarity threshold
|
74 |
+
is_similar = combined_similarity >= self.similarity_threshold
|
75 |
+
|
76 |
+
results.append({
|
77 |
+
'image_name': image_name,
|
78 |
+
'ssim_score': ssim_score,
|
79 |
+
'feature_similarity': feature_similarity,
|
80 |
+
'combined_similarity': combined_similarity,
|
81 |
+
'is_similar': is_similar
|
82 |
+
})
|
83 |
+
|
84 |
+
except Exception as e:
|
85 |
+
print(f"Error processing {image_name}: {str(e)}")
|
86 |
+
continue
|
87 |
+
|
88 |
+
return results
|
89 |
+
|
90 |
+
def main():
|
91 |
+
# Create argument parser
|
92 |
+
parser = argparse.ArgumentParser(description='Image Character Classification')
|
93 |
+
parser.add_argument('--reference', '-r',
|
94 |
+
type=str,
|
95 |
+
required=True,
|
96 |
+
help='Path to reference image')
|
97 |
+
parser.add_argument('--folder', '-f',
|
98 |
+
type=str,
|
99 |
+
required=True,
|
100 |
+
help='Path to folder containing images to compare')
|
101 |
+
parser.add_argument('--threshold', '-t',
|
102 |
+
type=float,
|
103 |
+
default=0.5, # Lowered the default threshold
|
104 |
+
help='Similarity threshold (default: 0.5)')
|
105 |
+
|
106 |
+
# Parse arguments
|
107 |
+
args = parser.parse_args()
|
108 |
+
|
109 |
+
# Initialize classifier
|
110 |
+
classifier = ImageCharacterClassifier(similarity_threshold=args.threshold)
|
111 |
+
|
112 |
+
# Check if paths exist
|
113 |
+
if not os.path.exists(args.reference):
|
114 |
+
print(f"Error: Reference image not found at {args.reference}")
|
115 |
+
return
|
116 |
+
|
117 |
+
if not os.path.exists(args.folder):
|
118 |
+
print(f"Error: Image folder not found at {args.folder}")
|
119 |
+
return
|
120 |
+
|
121 |
+
# Perform classification
|
122 |
+
results = classifier.classify_images(args.reference, args.folder)
|
123 |
+
|
124 |
+
# Sort results by similarity score
|
125 |
+
results.sort(key=lambda x: x['combined_similarity'], reverse=True)
|
126 |
+
|
127 |
+
# Print results
|
128 |
+
print("\nResults sorted by similarity (highest to lowest):")
|
129 |
+
print("-" * 50)
|
130 |
+
for result in results:
|
131 |
+
print(f"\nImage: {result['image_name']}")
|
132 |
+
print(f"SSIM Score: {result['ssim_score']:.3f}")
|
133 |
+
print(f"Feature Similarity: {result['feature_similarity']:.3f}")
|
134 |
+
print(f"Combined Similarity: {result['combined_similarity']:.3f}")
|
135 |
+
print(f"Is Similar: {result['is_similar']}")
|
136 |
+
print("-" * 30)
|
137 |
+
|
138 |
+
if __name__ == "__main__":
|
139 |
+
main()
|
pyrightconfig.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"reportMissingImports": false,
|
3 |
+
"reportGeneralTypeIssues": false
|
4 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow==2.10.0
|
2 |
+
tensorflow-gpu==2.10.0
|
3 |
+
keras==2.10.0
|
4 |
+
numpy==1.23.5
|
5 |
+
opencv-python==4.7.0.72
|
6 |
+
scikit-image==0.19.3
|
7 |
+
Pillow==9.3.0
|
8 |
+
gradio==3.50.2
|