Spaces:
Runtime error
Runtime error
Commit
·
25ab7f8
1
Parent(s):
d90092b
Initial Commit
Browse files- src/LBP.py +35 -0
- src/app.py +14 -1
src/LBP.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
from skimage import feature
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
|
5 |
+
class LBPImageEncoder:
|
6 |
+
def __init__(self, numPoints, radius):
|
7 |
+
self.numPoints = numPoints
|
8 |
+
self.radius = radius
|
9 |
+
|
10 |
+
def describe(self, image, eps=1e-7):
|
11 |
+
lbp = feature.local_binary_pattern(image, self.numPoints, self.radius)
|
12 |
+
hist = plt.hist(lbp.ravel())
|
13 |
+
return lbp, hist
|
14 |
+
|
15 |
+
def face_detection(image):
|
16 |
+
cascadePath = "haarcascade_frontalface_default.xml"
|
17 |
+
detector = cv2.CascadeClassifier(cascadePath)
|
18 |
+
|
19 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
20 |
+
rects = detector.detectMultiScale(gray, scaleFactor=1.05, minNeighbors=10, minSize=(30, 30),flags=cv2.CASCADE_SCALE_IMAGE)
|
21 |
+
return rects
|
22 |
+
|
23 |
+
def preprocess_img(self, imagePath):
|
24 |
+
img = cv2.imread(imagePath)
|
25 |
+
rects = self.face_detection(img)
|
26 |
+
for (x, y, w, h) in rects:
|
27 |
+
face = img[y:y + h, x:x + w]
|
28 |
+
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
|
29 |
+
# plt.imshow(face , cmap="gray")
|
30 |
+
# print(face.shape)
|
31 |
+
# face = np.array(face)
|
32 |
+
|
33 |
+
lbp, hist = self.describe(face)
|
34 |
+
return lbp, hist
|
35 |
+
|
src/app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
from colordescriptor import ColorDescriptor
|
2 |
from CLIP import CLIPImageEncoder
|
|
|
3 |
import gradio as gr
|
4 |
import os
|
5 |
import cv2
|
@@ -22,11 +23,15 @@ def index_dataset(dataset):
|
|
22 |
clip_model = CLIPImageEncoder()
|
23 |
dataset_with_embeddings = dataset_with_embeddings.map(clip_model.encode_images, batched=True, batch_size=16)
|
24 |
|
|
|
|
|
|
|
|
|
25 |
# Add index
|
26 |
dataset_with_embeddings.add_faiss_index(column='color_embeddings')
|
27 |
dataset_with_embeddings.save_faiss_index('color_embeddings', 'color_index.faiss')
|
28 |
-
|
29 |
dataset_with_embeddings.add_faiss_index(column='clip_embeddings')
|
|
|
30 |
dataset_with_embeddings.save_faiss_index('clip_embeddings', 'clip_index.faiss')
|
31 |
|
32 |
|
@@ -47,6 +52,7 @@ def check_index(ds):
|
|
47 |
dataset_with_embeddings = check_index(candidate_subset)
|
48 |
|
49 |
# Main function, to find similar images
|
|
|
50 |
# TODO: implement different distance measures
|
51 |
|
52 |
def get_neighbors(query_image, selected_descriptor, top_k=5):
|
@@ -74,6 +80,13 @@ def get_neighbors(query_image, selected_descriptor, top_k=5):
|
|
74 |
'clip_embeddings', qi_embedding, k=top_k)
|
75 |
images = retrieved_examples['image']
|
76 |
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
else:
|
78 |
print("This descriptor is not yet supported :(")
|
79 |
return []
|
|
|
1 |
from colordescriptor import ColorDescriptor
|
2 |
from CLIP import CLIPImageEncoder
|
3 |
+
from LBP import LBPImageEncoder
|
4 |
import gradio as gr
|
5 |
import os
|
6 |
import cv2
|
|
|
23 |
clip_model = CLIPImageEncoder()
|
24 |
dataset_with_embeddings = dataset_with_embeddings.map(clip_model.encode_images, batched=True, batch_size=16)
|
25 |
|
26 |
+
## LBP Embeddings
|
27 |
+
lbp_model = LBPImageEncoder()
|
28 |
+
dataset_with_embeddings = dataset_with_embeddings.map(lambda row: {'lbp_embeddings': lbp_model.describe(row["image"])})
|
29 |
+
|
30 |
# Add index
|
31 |
dataset_with_embeddings.add_faiss_index(column='color_embeddings')
|
32 |
dataset_with_embeddings.save_faiss_index('color_embeddings', 'color_index.faiss')
|
|
|
33 |
dataset_with_embeddings.add_faiss_index(column='clip_embeddings')
|
34 |
+
dataset_with_embeddings.add_faiss_index(column='lbp_embeddings')
|
35 |
dataset_with_embeddings.save_faiss_index('clip_embeddings', 'clip_index.faiss')
|
36 |
|
37 |
|
|
|
52 |
dataset_with_embeddings = check_index(candidate_subset)
|
53 |
|
54 |
# Main function, to find similar images
|
55 |
+
# TODO: allow different descriptor/embedding functions
|
56 |
# TODO: implement different distance measures
|
57 |
|
58 |
def get_neighbors(query_image, selected_descriptor, top_k=5):
|
|
|
80 |
'clip_embeddings', qi_embedding, k=top_k)
|
81 |
images = retrieved_examples['image']
|
82 |
return images
|
83 |
+
if "LBP" in selected_descriptor:
|
84 |
+
lbp_model = LBPImageEncoder(8,2)
|
85 |
+
qi_embedding = lbp_model.preprocess_img(query_image)
|
86 |
+
scores, retrieved_examples = dataset_with_embeddings.get_nearest_examples(
|
87 |
+
'lbp_embeddings', qi_embedding, k=top_k)
|
88 |
+
images = retrieved_examples['image']
|
89 |
+
return images
|
90 |
else:
|
91 |
print("This descriptor is not yet supported :(")
|
92 |
return []
|