HarryLee commited on
Commit
07fc416
·
1 Parent(s): c7496eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -11
app.py CHANGED
@@ -43,6 +43,7 @@ st.sidebar.success("Load Successfully!")
43
 
44
  if not torch.cuda.is_available():
45
  print("Warning: No GPU found. Please add GPU to your notebook")
 
46
 
47
  #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
48
  bi_encoder = SentenceTransformer(option1)
@@ -64,17 +65,6 @@ with open(embedding_cache_path, "rb") as fIn:
64
  # answer the query
65
  def search(query):
66
  print("Input question:", query)
67
-
68
- ##### BM25 search (lexical search) #####
69
- #bm25_scores = bm25.get_scores(bm25_tokenizer(query))
70
- #top_n = np.argpartition(bm25_scores, -5)[-5:]
71
- #bm25_hits = [{'corpus_id': idx, 'score': bm25_scores[idx]} for idx in top_n]
72
- #bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True)
73
-
74
- #print("Top-10 lexical search (BM25) hits")
75
- #for hit in bm25_hits[0:10]:
76
- # print("\t{:.3f}\t{}".format(hit['score'], passages[hit['corpus_id']].replace("\n", " ")))
77
-
78
  ##### Sematic Search #####
79
  # Encode the query using the bi-encoder and find potentially relevant passages
80
  query_embedding = bi_encoder.encode(query, convert_to_tensor=True)
 
43
 
44
  if not torch.cuda.is_available():
45
  print("Warning: No GPU found. Please add GPU to your notebook")
46
+ torch.load(path, map_location='cpu')
47
 
48
  #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
49
  bi_encoder = SentenceTransformer(option1)
 
65
  # answer the query
66
  def search(query):
67
  print("Input question:", query)
 
 
 
 
 
 
 
 
 
 
 
68
  ##### Sematic Search #####
69
  # Encode the query using the bi-encoder and find potentially relevant passages
70
  query_embedding = bi_encoder.encode(query, convert_to_tensor=True)