Cyanido commited on
Commit
b1fafc1
·
verified ·
1 Parent(s): 1ee0865

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -1,10 +1,10 @@
1
  # Initialize a retriever using Qdrant and SentenceTransformer embeddings
2
  from langchain_community.vectorstores import Qdrant
3
- from langchain_community.retrievers.qdrant_sparse_vector_retriever import QdrantSparseVectorRetriever
4
  from langchain_community.embeddings import SentenceTransformerEmbeddings
 
5
  from qdrant_client import QdrantClient
6
  import pandas as pd
7
- import gradio as gr
8
 
9
 
10
  embeddings = SentenceTransformerEmbeddings(model_name='sentence-transformers/clip-ViT-B-32')
@@ -13,20 +13,23 @@ def get_results(search_results):
13
  filtered_img_ids = [doc.metadata.get("image_id") for doc in search_results]
14
  return filtered_img_ids
15
 
 
 
 
16
  client = QdrantClient(
17
  url="https://763bc1da-0673-4535-91ac-b5538ec0287f.us-east4-0.gcp.cloud.qdrant.io:6333",
18
- api_key='UOqiBgqhhu8BBWP98mwjGl7h4IhL2vMAqzO4EI9PEB66A50n9GoIiQ',
19
  ) # Persists changes to disk, fast prototyping
20
 
21
  COLLECTION_NAME="semantic_image_search"
22
 
23
 
24
  dense_vector_retriever = Qdrant(client, COLLECTION_NAME, embeddings)
25
- images_data = pd.read_csv("images.csv", on_bad_lines='skip')
26
 
27
  def get_link(query):
28
  Search_Query = query
29
- neutral_retiever = QdrantSparseVectorRetriever(retrievers=[dense_vector_retriever.as_retriever()])
30
  result = neutral_retiever.get_relevant_documents(Search_Query)
31
  filtered_images = get_results(result)
32
  filtered_img_ids = [doc.metadata.get("image_id") for doc in result]
 
1
  # Initialize a retriever using Qdrant and SentenceTransformer embeddings
2
  from langchain_community.vectorstores import Qdrant
 
3
  from langchain_community.embeddings import SentenceTransformerEmbeddings
4
+ from kaggle_secrets import UserSecretsClient
5
  from qdrant_client import QdrantClient
6
  import pandas as pd
7
+ import gradio as gd
8
 
9
 
10
  embeddings = SentenceTransformerEmbeddings(model_name='sentence-transformers/clip-ViT-B-32')
 
13
  filtered_img_ids = [doc.metadata.get("image_id") for doc in search_results]
14
  return filtered_img_ids
15
 
16
+ user_secrets = UserSecretsClient()
17
+ vector_db_key = user_secrets.get_secret("vector_db_key")
18
+
19
  client = QdrantClient(
20
  url="https://763bc1da-0673-4535-91ac-b5538ec0287f.us-east4-0.gcp.cloud.qdrant.io:6333",
21
+ api_key=vector_db_key,
22
  ) # Persists changes to disk, fast prototyping
23
 
24
  COLLECTION_NAME="semantic_image_search"
25
 
26
 
27
  dense_vector_retriever = Qdrant(client, COLLECTION_NAME, embeddings)
28
+ images_data = pd.read_csv("/kaggle/input/fashion-product-images-dataset/fashion-dataset/images.csv", on_bad_lines='skip')
29
 
30
  def get_link(query):
31
  Search_Query = query
32
+ neutral_retiever = dense_vector_retriever.as_retriever()
33
  result = neutral_retiever.get_relevant_documents(Search_Query)
34
  filtered_images = get_results(result)
35
  filtered_img_ids = [doc.metadata.get("image_id") for doc in result]