Spaces:
Runtime error
Runtime error
Commit
·
f140b23
1
Parent(s):
fb972ca
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,9 @@ import os
|
|
3 |
from transformers import CLIPProcessor, CLIPTextModel, CLIPModel
|
4 |
|
5 |
import gradio as gr
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
@@ -16,6 +19,13 @@ def compute_text_embeddings(list_of_strings):
|
|
16 |
inputs = processor(text=list_of_strings, return_tensors="pt", padding=True)
|
17 |
return model.get_text_features(**inputs)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
def predict(query):
|
20 |
corpus = 'Unsplash'
|
21 |
n_results=3
|
@@ -23,7 +33,7 @@ def predict(query):
|
|
23 |
text_embeddings = compute_text_embeddings([query]).detach().numpy()
|
24 |
k = 0 if corpus == 'Unsplash' else 1
|
25 |
results = np.argsort((embeddings[k]@text_embeddings.T)[:, 0])[-1:-n_results-1:-1]
|
26 |
-
paths = [df[k].iloc[i]['path'] for i in results]
|
27 |
print(paths)
|
28 |
return paths
|
29 |
|
|
|
3 |
from transformers import CLIPProcessor, CLIPTextModel, CLIPModel
|
4 |
|
5 |
import gradio as gr
|
6 |
+
import requests
|
7 |
+
|
8 |
+
|
9 |
|
10 |
|
11 |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
19 |
inputs = processor(text=list_of_strings, return_tensors="pt", padding=True)
|
20 |
return model.get_text_features(**inputs)
|
21 |
|
22 |
+
def download_img(path):
|
23 |
+
img_data = requests.get(path).content
|
24 |
+
local_path = path.split("/")[-1] + ".jpg"
|
25 |
+
with open(local_path, 'wb') as handler:
|
26 |
+
handler.write(img_data)
|
27 |
+
return local_path
|
28 |
+
|
29 |
def predict(query):
|
30 |
corpus = 'Unsplash'
|
31 |
n_results=3
|
|
|
33 |
text_embeddings = compute_text_embeddings([query]).detach().numpy()
|
34 |
k = 0 if corpus == 'Unsplash' else 1
|
35 |
results = np.argsort((embeddings[k]@text_embeddings.T)[:, 0])[-1:-n_results-1:-1]
|
36 |
+
paths = [download_img(df[k].iloc[i]['path']) for i in results]
|
37 |
print(paths)
|
38 |
return paths
|
39 |
|