SajjadAyoubi commited on
Commit
a7746f5
·
1 Parent(s): 99713ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -15,8 +15,8 @@ from transformers import RobertaModel, AutoTokenizer
15
  def load():
16
  text_encoder = RobertaModel.from_pretrained('SajjadAyoubi/clip-fa-text')
17
  tokenizer = AutoTokenizer.from_pretrained('SajjadAyoubi/clip-fa-text')
18
- links = np.load('data.npy', allow_pickle=True)
19
  image_embeddings = torch.load('embedding.pt')
 
20
  return text_encoder, tokenizer, links, image_embeddings
21
 
22
 
@@ -32,12 +32,12 @@ def get_html(url_list, height=224):
32
  return html
33
 
34
  def compute_embeddings(query):
35
- with torch.no_grad():
36
- return text_encoder(**tokenizer(query, return_tensors='pt')).pooler_output
37
 
38
- @st.cache(show_spinner=False)
39
  def image_search(query, top_k=8):
40
- text_embedding = compute_embeddings(query)
 
41
  values, indices = torch.cosine_similarity(text_embedding, image_embeddings).sort(descending=True)
42
  return [links[i] for i in indices[:top_k]]
43
 
 
15
  def load():
16
  text_encoder = RobertaModel.from_pretrained('SajjadAyoubi/clip-fa-text')
17
  tokenizer = AutoTokenizer.from_pretrained('SajjadAyoubi/clip-fa-text')
 
18
  image_embeddings = torch.load('embedding.pt')
19
+ links = np.load('data.npy', allow_pickle=True)
20
  return text_encoder, tokenizer, links, image_embeddings
21
 
22
 
 
32
  return html
33
 
34
  def compute_embeddings(query):
35
+
36
+
37
 
 
38
  def image_search(query, top_k=8):
39
+ with torch.no_grad():
40
+ text_embedding = text_encoder(**tokenizer(query, return_tensors='pt')).pooler_output
41
  values, indices = torch.cosine_similarity(text_embedding, image_embeddings).sort(descending=True)
42
  return [links[i] for i in indices[:top_k]]
43