Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -45,12 +45,12 @@ st.sidebar.success("Load Successfully!")
|
|
45 |
# print("Warning: No GPU found. Please add GPU to your notebook")
|
46 |
|
47 |
#We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
|
48 |
-
bi_encoder = SentenceTransformer(option1)
|
49 |
bi_encoder.max_seq_length = 256 #Truncate long passages to 256 tokens
|
50 |
top_k = 32 #Number of passages we want to retrieve with the bi-encoder
|
51 |
|
52 |
#The bi-encoder will retrieve 100 documents. We use a cross-encoder, to re-rank the results list to improve the quality
|
53 |
-
cross_encoder = CrossEncoder(option2)
|
54 |
|
55 |
passages = []
|
56 |
|
@@ -69,7 +69,6 @@ def search(query):
|
|
69 |
##### Sematic Search #####
|
70 |
# Encode the query using the bi-encoder and find potentially relevant passages
|
71 |
query_embedding = bi_encoder.encode(query, convert_to_tensor=True)
|
72 |
-
#query_embedding = query_embedding.cuda()
|
73 |
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=top_k)
|
74 |
hits = hits[0] # Get the hits for the first query
|
75 |
|
|
|
45 |
# print("Warning: No GPU found. Please add GPU to your notebook")
|
46 |
|
47 |
#We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
|
48 |
+
bi_encoder = SentenceTransformer(option1, ,device='cpu')
|
49 |
bi_encoder.max_seq_length = 256 #Truncate long passages to 256 tokens
|
50 |
top_k = 32 #Number of passages we want to retrieve with the bi-encoder
|
51 |
|
52 |
#The bi-encoder will retrieve 100 documents. We use a cross-encoder, to re-rank the results list to improve the quality
|
53 |
+
cross_encoder = CrossEncoder(option2, ,device='cpu')
|
54 |
|
55 |
passages = []
|
56 |
|
|
|
69 |
##### Sematic Search #####
|
70 |
# Encode the query using the bi-encoder and find potentially relevant passages
|
71 |
query_embedding = bi_encoder.encode(query, convert_to_tensor=True)
|
|
|
72 |
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=top_k)
|
73 |
hits = hits[0] # Get the hits for the first query
|
74 |
|