RobotJelly commited on
Commit
87c1220
·
1 Parent(s): ea1e3b9
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -37,12 +37,12 @@ def show_output_image(matched_images) :
37
  return image
38
 
39
  # Encode and normalize the search query using CLIP
40
- def encode_search_query(search_query, model):
41
  with torch.no_grad():
42
  inputs = tokenizer([search_query], padding=True, return_tensors="pt")
43
  #inputs = processor(text=[search_query], images=None, return_tensors="pt", padding=True)
44
  text_features = model.get_text_features(**inputs).detach().numpy()
45
- return text_features
46
 
47
  # Find all matched photos
48
  def find_matches(text_features, photo_features, photo_ids, results_count=4):
@@ -62,7 +62,7 @@ def image_search(search_text, search_image, option):
62
 
63
  if option == "Text-To-Image" :
64
  # Extracting text features
65
- text_features = encode_search_query(search_text, model)
66
 
67
  # Find the matched Images
68
  matched_images = find_matches(text_features, photo_features, photo_ids, 4)
 
37
  return image
38
 
39
  # Encode and normalize the search query using CLIP
40
+ def encode_search_query(search_query, model, device):
41
  with torch.no_grad():
42
  inputs = tokenizer([search_query], padding=True, return_tensors="pt")
43
  #inputs = processor(text=[search_query], images=None, return_tensors="pt", padding=True)
44
  text_features = model.get_text_features(**inputs).detach().numpy()
45
+ return np.array(text_features)
46
 
47
  # Find all matched photos
48
  def find_matches(text_features, photo_features, photo_ids, results_count=4):
 
62
 
63
  if option == "Text-To-Image" :
64
  # Extracting text features
65
+ text_features = encode_search_query(search_text, model, device)
66
 
67
  # Find the matched Images
68
  matched_images = find_matches(text_features, photo_features, photo_ids, 4)