Spaces:
Build error
Build error
Commit
·
3c7ac16
1
Parent(s):
87c1220
app.py
Browse files
app.py
CHANGED
@@ -31,8 +31,10 @@ def show_output_image(matched_images) :
|
|
31 |
image=[]
|
32 |
for photo_id in matched_images:
|
33 |
photo_image_url = f"https://unsplash.com/photos/{photo_id}/download?w=280"
|
34 |
-
response = requests.get(photo_image_url, stream=True)
|
35 |
-
img = Image.open(BytesIO(response.content))
|
|
|
|
|
36 |
image.append(img)
|
37 |
return image
|
38 |
|
@@ -42,7 +44,7 @@ def encode_search_query(search_query, model, device):
|
|
42 |
inputs = tokenizer([search_query], padding=True, return_tensors="pt")
|
43 |
#inputs = processor(text=[search_query], images=None, return_tensors="pt", padding=True)
|
44 |
text_features = model.get_text_features(**inputs).detach().numpy()
|
45 |
-
return
|
46 |
|
47 |
# Find all matched photos
|
48 |
def find_matches(text_features, photo_features, photo_ids, results_count=4):
|
@@ -70,6 +72,8 @@ def image_search(search_text, search_image, option):
|
|
70 |
return show_output_image(matched_images)
|
71 |
elif option == "Image-To-Image":
|
72 |
# Input Image for Search
|
|
|
|
|
73 |
with torch.no_grad():
|
74 |
processed_image = processor(text=None, images=search_image, return_tensors="pt", padding=True)["pixel_values"]
|
75 |
image_feature = model.get_image_features(processed_image.to(device))
|
|
|
31 |
image=[]
|
32 |
for photo_id in matched_images:
|
33 |
photo_image_url = f"https://unsplash.com/photos/{photo_id}/download?w=280"
|
34 |
+
#response = requests.get(photo_image_url, stream=True)
|
35 |
+
#img = Image.open(BytesIO(response.content))
|
36 |
+
response = requests.get(photo_image_url, stream=True).raw
|
37 |
+
img = Image.open(response).convert("RGB")
|
38 |
image.append(img)
|
39 |
return image
|
40 |
|
|
|
44 |
inputs = tokenizer([search_query], padding=True, return_tensors="pt")
|
45 |
#inputs = processor(text=[search_query], images=None, return_tensors="pt", padding=True)
|
46 |
text_features = model.get_text_features(**inputs).detach().numpy()
|
47 |
+
return text_features
|
48 |
|
49 |
# Find all matched photos
|
50 |
def find_matches(text_features, photo_features, photo_ids, results_count=4):
|
|
|
72 |
return show_output_image(matched_images)
|
73 |
elif option == "Image-To-Image":
|
74 |
# Input Image for Search
|
75 |
+
search_image = PILIMAGE.fromarray(search_image.astype('uint8'), 'RGB')
|
76 |
+
|
77 |
with torch.no_grad():
|
78 |
processed_image = processor(text=None, images=search_image, return_tensors="pt", padding=True)["pixel_values"]
|
79 |
image_feature = model.get_image_features(processed_image.to(device))
|