Spaces:
Build error
Build error
Commit
·
fd0cac7
1
Parent(s):
1fbfc7c
app.py
Browse files
app.py
CHANGED
@@ -74,31 +74,31 @@ emb_filename = 'unsplash-25k-photos-embeddings.pkl'
|
|
74 |
with open(emb_filename, 'rb') as emb:
|
75 |
img_names, img_emb = pickle.load(emb)
|
76 |
|
77 |
-
def display_matches(similarity):
|
78 |
best_matched_images = []
|
79 |
-
top_k_indices = torch.topk(similarity,
|
80 |
for matched_image in top_k_indices:
|
81 |
img = Image.open(IMAGES_DIR / img_names[matched_image])
|
82 |
best_matched_images.append(img)
|
83 |
return best_matched_images
|
84 |
|
85 |
-
def image_search(search_text, search_image
|
86 |
|
87 |
# Input Text Query
|
88 |
#search_query = "The feeling when your program finally works"
|
89 |
-
if
|
90 |
# Extracting text features embeddings
|
91 |
#text_features = encode_search_query(search_text, model, device)
|
92 |
text_emb = model.encode([search_text], convert_to_tensor=True)
|
93 |
similarity = util.cos_sim(img_emb, text_emb)
|
94 |
-
return display_matches(similarity)
|
95 |
# Find the matched Images
|
96 |
#matched_images = find_matches(text_features, photo_features, photo_ids, 4)
|
97 |
#matched_results = util.semantic_search(text_emb, img_emb, top_k=4)[0]
|
98 |
|
99 |
# top 4 highest ranked images
|
100 |
#return display_matches(matched_results)
|
101 |
-
elif
|
102 |
# Input Image for Search
|
103 |
#search_image = Image.fromarray(search_image.astype('uint8'), 'RGB')
|
104 |
|
@@ -119,13 +119,33 @@ def image_search(search_text, search_image, option):
|
|
119 |
|
120 |
image_emb = model.encode([Image.fromarray(search_image)], convert_to_tensor=True)
|
121 |
similarity = util.cos_sim(img_emb, image_emb)
|
122 |
-
return display_matches(similarity)
|
123 |
|
124 |
-
gr.Interface(fn=image_search,
|
125 |
-
inputs=[gr.inputs.Textbox(lines=7, label="Input Text"),
|
126 |
-
gr.inputs.Image(optional=True),
|
127 |
-
gr.inputs.Dropdown(["Text-To-Image", "Image-To-Image"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
],
|
129 |
-
outputs=gr.outputs.Carousel([gr.outputs.Image(type="pil")
|
130 |
-
enable_queue=True
|
|
|
131 |
).launch(debug=True,share=True)
|
|
|
74 |
with open(emb_filename, 'rb') as emb:
|
75 |
img_names, img_emb = pickle.load(emb)
|
76 |
|
77 |
+
def display_matches(similarity, topk):
|
78 |
best_matched_images = []
|
79 |
+
top_k_indices = torch.topk(similarity, topk, 0).indices
|
80 |
for matched_image in top_k_indices:
|
81 |
img = Image.open(IMAGES_DIR / img_names[matched_image])
|
82 |
best_matched_images.append(img)
|
83 |
return best_matched_images
|
84 |
|
85 |
+
def image_search(Option, topk, search_text, search_image):
|
86 |
|
87 |
# Input Text Query
|
88 |
#search_query = "The feeling when your program finally works"
|
89 |
+
if Option == "Text-To-Image" :
|
90 |
# Extracting text features embeddings
|
91 |
#text_features = encode_search_query(search_text, model, device)
|
92 |
text_emb = model.encode([search_text], convert_to_tensor=True)
|
93 |
similarity = util.cos_sim(img_emb, text_emb)
|
94 |
+
return display_matches(similarity, topk)
|
95 |
# Find the matched Images
|
96 |
#matched_images = find_matches(text_features, photo_features, photo_ids, 4)
|
97 |
#matched_results = util.semantic_search(text_emb, img_emb, top_k=4)[0]
|
98 |
|
99 |
# top 4 highest ranked images
|
100 |
#return display_matches(matched_results)
|
101 |
+
elif Option == "Image-To-Image":
|
102 |
# Input Image for Search
|
103 |
#search_image = Image.fromarray(search_image.astype('uint8'), 'RGB')
|
104 |
|
|
|
119 |
|
120 |
image_emb = model.encode([Image.fromarray(search_image)], convert_to_tensor=True)
|
121 |
similarity = util.cos_sim(img_emb, image_emb)
|
122 |
+
return display_matches(similarity, topk)
|
123 |
|
124 |
+
#gr.Interface(fn=image_search,
|
125 |
+
# inputs=[gr.inputs.Textbox(lines=7, label="Input Text"),
|
126 |
+
# gr.inputs.Image(optional=True),
|
127 |
+
# gr.inputs.Dropdown(["Text-To-Image", "Image-To-Image"])
|
128 |
+
# ],
|
129 |
+
# outputs=gr.outputs.Carousel([gr.outputs.Image(type="pil"), gr.outputs.Image(type="pil"), gr.outputs.Image(type="pil"), gr.outputs.Image(type="pil")]),
|
130 |
+
# enable_queue=True
|
131 |
+
# ).launch(debug=True,share=True)
|
132 |
+
gr.Interface(fn=image_search, title="Search Image",
|
133 |
+
description="Enter the text or image to search the other most relevant images...",
|
134 |
+
article="""
|
135 |
+
Instructions:-
|
136 |
+
1. Select the option - `Text to Image` OR `Image To Image`.
|
137 |
+
2. Then accordingly enter the text or image.
|
138 |
+
3. Just on entering the text or image , you will get the output image on right side
|
139 |
+
|
140 |
+
Note: on entering the text, it may first show a different/unexpected image but then after a sec. it will show the correct image.
|
141 |
+
""",
|
142 |
+
theme="huggingface",
|
143 |
+
inputs=[gr.inputs.Dropdown(["Text-To-Image", "Image-To-Image"]),
|
144 |
+
gr.inputs.Dropdown(["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], type="index", default="1", label="Select Top K Images"),
|
145 |
+
gr.inputs.Textbox(lines=3, label="Input Text", placeholder="Enter the text..."),
|
146 |
+
gr.inputs.Image(type="pil", optional=True)
|
147 |
],
|
148 |
+
outputs=gr.outputs.Carousel([gr.outputs.Image(type="pil")]),
|
149 |
+
enable_queue=True,
|
150 |
+
live=True
|
151 |
).launch(debug=True,share=True)
|