HarryLee commited on
Commit
c1d264e
·
1 Parent(s): 0358f95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -41,8 +41,8 @@ option2 = st.sidebar.selectbox(
41
 
42
  st.sidebar.success("Load Successfully!")
43
 
44
- if not torch.cuda.is_available():
45
- print("Warning: No GPU found. Please add GPU to your notebook")
46
 
47
  #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
48
  bi_encoder = SentenceTransformer(option1)
@@ -53,7 +53,7 @@ top_k = 32 #Number of passages we want to retrieve with
53
  cross_encoder = CrossEncoder(option2)
54
 
55
  # load pre-train embeedings files
56
- embedding_cache_path = 'etsy-embeddings.pkl'
57
  print("Load pre-computed embeddings from disc")
58
  with open(embedding_cache_path, "rb") as fIn:
59
  cache_data = pickle.load(fIn)
@@ -67,7 +67,7 @@ def search(query):
67
  ##### Sematic Search #####
68
  # Encode the query using the bi-encoder and find potentially relevant passages
69
  query_embedding = bi_encoder.encode(query, convert_to_tensor=True)
70
- query_embedding = query_embedding.cuda()
71
  hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=top_k)
72
  hits = hits[0] # Get the hits for the first query
73
 
 
41
 
42
  st.sidebar.success("Load Successfully!")
43
 
44
+ #if not torch.cuda.is_available():
45
+ # print("Warning: No GPU found. Please add GPU to your notebook")
46
 
47
  #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
48
  bi_encoder = SentenceTransformer(option1)
 
53
  cross_encoder = CrossEncoder(option2)
54
 
55
  # load pre-train embeedings files
56
+ embedding_cache_path = 'etsy-embeddings-cpu.pkl'
57
  print("Load pre-computed embeddings from disc")
58
  with open(embedding_cache_path, "rb") as fIn:
59
  cache_data = pickle.load(fIn)
 
67
  ##### Sematic Search #####
68
  # Encode the query using the bi-encoder and find potentially relevant passages
69
  query_embedding = bi_encoder.encode(query, convert_to_tensor=True)
70
+ #query_embedding = query_embedding.cuda()
71
  hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=top_k)
72
  hits = hits[0] # Get the hits for the first query
73