thisis-it nickmuchi commited on
Commit
0dd62db
·
0 Parent(s):

Duplicate from nickmuchi/semantic-search-with-retrieve-and-rerank

Browse files

Co-authored-by: Nicholas Muchinguri <[email protected]>

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +13 -0
  3. app.py +367 -0
  4. encoder.png +0 -0
  5. requirements.txt +16 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Semantic Search With Retrieve And Rerank
3
+ emoji: 🏃
4
+ colorFrom: pink
5
+ colorTo: green
6
+ sdk: streamlit
7
+ sdk_version: 1.2.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: nickmuchi/semantic-search-with-retrieve-and-rerank
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from sentence_transformers import SentenceTransformer, CrossEncoder, util
3
+ import os, re
4
+ import torch
5
+ from rank_bm25 import BM25Okapi
6
+ from sklearn.feature_extraction import _stop_words
7
+ import string
8
+ import numpy as np
9
+ import pandas as pd
10
+ from newspaper import Article
11
+ import base64
12
+ import docx2txt
13
+ from io import StringIO
14
+ from PyPDF2 import PdfFileReader
15
+ import validators
16
+ import nltk
17
+ import warnings
18
+ import streamlit as st
19
+ from PIL import Image
20
+
21
+
22
+ nltk.download('punkt')
23
+
24
+ from nltk import sent_tokenize
25
+
26
+ warnings.filterwarnings("ignore")
27
+
28
+ auth_token = os.environ.get("auth_token")
29
+
30
+ def extract_text_from_url(url: str):
31
+
32
+ '''Extract text from url'''
33
+
34
+ article = Article(url)
35
+ article.download()
36
+ article.parse()
37
+
38
+ # get text
39
+ text = article.text
40
+
41
+ # get article title
42
+ title = article.title
43
+
44
+ return title, text
45
+
46
+ def extract_text_from_file(file):
47
+
48
+ '''Extract text from uploaded file'''
49
+
50
+ # read text file
51
+ if file.type == "text/plain":
52
+ # To convert to a string based IO:
53
+ stringio = StringIO(file.getvalue().decode("cp1252"))
54
+
55
+ # To read file as string:
56
+ file_text = stringio.read()
57
+
58
+ return file_text, None
59
+
60
+ # read pdf file
61
+ elif file.type == "application/pdf":
62
+ pdfReader = PdfFileReader(file)
63
+ count = pdfReader.numPages
64
+ all_text = ""
65
+ pdf_title = pdfReader.getDocumentInfo().title
66
+
67
+ for i in range(count):
68
+
69
+ try:
70
+ page = pdfReader.getPage(i)
71
+ all_text += page.extractText()
72
+
73
+ except:
74
+ continue
75
+
76
+ file_text = all_text
77
+
78
+ return file_text, pdf_title
79
+
80
+ # read docx file
81
+ elif (
82
+ file.type
83
+ == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
84
+ ):
85
+ file_text = docx2txt.process(file)
86
+
87
+ return file_text, None
88
+
89
+ def preprocess_plain_text(text,window_size=3):
90
+
91
+ text = text.encode("ascii", "ignore").decode() # unicode
92
+ text = re.sub(r"https*\S+", " ", text) # url
93
+ text = re.sub(r"@\S+", " ", text) # mentions
94
+ text = re.sub(r"#\S+", " ", text) # hastags
95
+ text = re.sub(r"\s{2,}", " ", text) # over spaces
96
+ #text = re.sub("[^.,!?%$A-Za-z0-9]+", " ", text) # special characters except .,!?
97
+
98
+ #break into lines and remove leading and trailing space on each
99
+ lines = [line.strip() for line in text.splitlines()]
100
+
101
+ # #break multi-headlines into a line each
102
+ chunks = [phrase.strip() for line in lines for phrase in line.split(" ")]
103
+
104
+ # # drop blank lines
105
+ text = '\n'.join(chunk for chunk in chunks if chunk)
106
+
107
+ ## We split this article into paragraphs and then every paragraph into sentences
108
+ paragraphs = []
109
+ for paragraph in text.replace('\n',' ').split("\n\n"):
110
+ if len(paragraph.strip()) > 0:
111
+ paragraphs.append(sent_tokenize(paragraph.strip()))
112
+
113
+ #We combine up to 3 sentences into a passage. You can choose smaller or larger values for window_size
114
+ #Smaller value: Context from other sentences might get lost
115
+ #Lager values: More context from the paragraph remains, but results are longer
116
+ window_size = window_size
117
+ passages = []
118
+ for paragraph in paragraphs:
119
+ for start_idx in range(0, len(paragraph), window_size):
120
+ end_idx = min(start_idx+window_size, len(paragraph))
121
+ passages.append(" ".join(paragraph[start_idx:end_idx]))
122
+
123
+ st.write(f"Sentences: {sum([len(p) for p in paragraphs])}")
124
+ st.write(f"Passages: {len(passages)}")
125
+
126
+ return passages
127
+
128
+ @st.experimental_memo(suppress_st_warning=True)
129
+ def bi_encode(bi_enc,passages):
130
+
131
+ global bi_encoder
132
+ #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
133
+ bi_encoder = SentenceTransformer(bi_enc,use_auth_token=auth_token)
134
+
135
+ #quantize the model
136
+ #bi_encoder = quantize_dynamic(model, {Linear, Embedding})
137
+
138
+ #Compute the embeddings using the multi-process pool
139
+ with st.spinner('Encoding passages into a vector space...'):
140
+
141
+ if bi_enc == 'intfloat/e5-base-v2':
142
+
143
+ corpus_embeddings = bi_encoder.encode(['passage: ' + sentence for sentence in passages], convert_to_tensor=True)
144
+
145
+ else:
146
+
147
+ corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True)
148
+
149
+
150
+ st.success(f"Embeddings computed. Shape: {corpus_embeddings.shape}")
151
+
152
+ return bi_encoder, corpus_embeddings
153
+
154
+ @st.experimental_singleton(suppress_st_warning=True)
155
+ def cross_encode():
156
+
157
+ global cross_encoder
158
+ #The bi-encoder will retrieve 100 documents. We use a cross-encoder, to re-rank the results list to improve the quality
159
+ cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
160
+ return cross_encoder
161
+
162
+ @st.experimental_memo(suppress_st_warning=True)
163
+ def bm25_tokenizer(text):
164
+
165
+ # We also compare the results to lexical search (keyword search). Here, we use
166
+ # the BM25 algorithm which is implemented in the rank_bm25 package.
167
+ # We lower case our text and remove stop-words from indexing
168
+ tokenized_doc = []
169
+ for token in text.lower().split():
170
+ token = token.strip(string.punctuation)
171
+
172
+ if len(token) > 0 and token not in _stop_words.ENGLISH_STOP_WORDS:
173
+ tokenized_doc.append(token)
174
+ return tokenized_doc
175
+
176
+ @st.experimental_singleton(suppress_st_warning=True)
177
+ def bm25_api(passages):
178
+
179
+ tokenized_corpus = []
180
+
181
+ for passage in passages:
182
+ tokenized_corpus.append(bm25_tokenizer(passage))
183
+
184
+ bm25 = BM25Okapi(tokenized_corpus)
185
+
186
+ return bm25
187
+
188
+ bi_enc_options = ["multi-qa-mpnet-base-dot-v1","all-mpnet-base-v2","multi-qa-MiniLM-L6-cos-v1",'intfloat/e5-base-v2',"neeva/query2query"]
189
+
190
+ def display_df_as_table(model,top_k,score='score'):
191
+ # Display the df with text and scores as a table
192
+ df = pd.DataFrame([(hit[score],passages[hit['corpus_id']]) for hit in model[0:top_k]],columns=['Score','Text'])
193
+ df['Score'] = round(df['Score'],2)
194
+
195
+ return df
196
+
197
+ #Streamlit App
198
+
199
+ st.title("Semantic Search with Retrieve & Rerank 📝")
200
+
201
+ """
202
+ [![](https://img.shields.io/twitter/follow/nickmuchi?label=@nickmuchi&style=social)](https://twitter.com/nickmuchi)
203
+ """
204
+
205
+ window_size = st.sidebar.slider("Paragraph Window Size",min_value=1,max_value=10,value=3,key=
206
+ 'slider')
207
+
208
+ bi_encoder_type = st.sidebar.selectbox("Bi-Encoder", options=bi_enc_options, key='sbox')
209
+
210
+ top_k = st.sidebar.slider("Number of Top Hits Generated",min_value=1,max_value=5,value=2)
211
+
212
+ # This function will search all wikipedia articles for passages that
213
+ # answer the query
214
+ def search_func(query, bi_encoder_type, top_k=top_k):
215
+
216
+ global bi_encoder, cross_encoder
217
+
218
+ st.subheader(f"Search Query: {query}")
219
+
220
+ if url_text:
221
+
222
+ st.write(f"Document Header: {title}")
223
+
224
+ elif pdf_title:
225
+
226
+ st.write(f"Document Header: {pdf_title}")
227
+
228
+ ##### BM25 search (lexical search) #####
229
+ bm25_scores = bm25.get_scores(bm25_tokenizer(query))
230
+ top_n = np.argpartition(bm25_scores, -5)[-5:]
231
+ bm25_hits = [{'corpus_id': idx, 'score': bm25_scores[idx]} for idx in top_n]
232
+ bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True)
233
+
234
+ st.subheader(f"Top-{top_k} lexical search (BM25) hits")
235
+
236
+ bm25_df = display_df_as_table(bm25_hits,top_k)
237
+ st.write(bm25_df.to_html(index=False), unsafe_allow_html=True)
238
+
239
+ if bi_encoder_type == 'intfloat/e5-base-v2':
240
+ query = 'query: ' + query
241
+ ##### Sematic Search #####
242
+ # Encode the query using the bi-encoder and find potentially relevant passages
243
+ question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
244
+ question_embedding = question_embedding.cpu()
245
+ hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k,score_function=util.dot_score)
246
+ hits = hits[0] # Get the hits for the first query
247
+
248
+ ##### Re-Ranking #####
249
+ # Now, score all retrieved passages with the cross_encoder
250
+ cross_inp = [[query, passages[hit['corpus_id']]] for hit in hits]
251
+ cross_scores = cross_encoder.predict(cross_inp)
252
+
253
+ # Sort results by the cross-encoder scores
254
+ for idx in range(len(cross_scores)):
255
+ hits[idx]['cross-score'] = cross_scores[idx]
256
+
257
+ # Output of top-3 hits from bi-encoder
258
+ st.markdown("\n-------------------------\n")
259
+ st.subheader(f"Top-{top_k} Bi-Encoder Retrieval hits")
260
+ hits = sorted(hits, key=lambda x: x['score'], reverse=True)
261
+
262
+ cross_df = display_df_as_table(hits,top_k)
263
+ st.write(cross_df.to_html(index=False), unsafe_allow_html=True)
264
+
265
+ # Output of top-3 hits from re-ranker
266
+ st.markdown("\n-------------------------\n")
267
+ st.subheader(f"Top-{top_k} Cross-Encoder Re-ranker hits")
268
+ hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
269
+
270
+ rerank_df = display_df_as_table(hits,top_k,'cross-score')
271
+ st.write(rerank_df.to_html(index=False), unsafe_allow_html=True)
272
+
273
+ st.markdown(
274
+ """
275
+ - The app supports asymmetric Semantic search which seeks to improve search accuracy of documents/URL by understanding the content of the search query in contrast to traditional search engines which only find documents based on lexical matches.
276
+ - The idea behind semantic search is to embed all entries in your corpus, whether they be sentences, paragraphs, or documents, into a vector space. At search time, the query is embedded into the same vector space and the closest embeddings from your corpus are found. These entries should have a high semantic overlap with the query.
277
+ - The all-* models where trained on all available training data (more than 1 billion training pairs) and are designed as general purpose models. The all-mpnet-base-v2 model provides the best quality, while all-MiniLM-L6-v2 is 5 times faster and still offers good quality. The models used have been trained on broad datasets, however, if your document/corpus is specialised, such as for science or economics, the results returned might be unsatisfactory.""")
278
+
279
+ st.markdown("""There models available to choose from:""")
280
+
281
+ st.markdown(
282
+ """
283
+ Model Source:
284
+ - Bi-Encoders - [multi-qa-mpnet-base-dot-v1](https://huggingface.co/sentence-transformers/multi-qa-mpnet-base-dot-v1), [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2), [intfloat/e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) and [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)
285
+ - Cross-Encoder - [cross-encoder/ms-marco-MiniLM-L-12-v2](https://huggingface.co/cross-encoder/ms-marco-MiniLM-L-12-v2)""")
286
+
287
+ st.markdown(
288
+ """
289
+ Code and App Inspiration Source: [Sentence Transformers](https://www.sbert.net/examples/applications/retrieve_rerank/README.html)""")
290
+
291
+ st.markdown(
292
+ """
293
+ Quick summary of the purposes of a Bi and Cross-encoder below, the image and info were adapted from [www.sbert.net](https://www.sbert.net/examples/applications/semantic-search/README.html):""")
294
+
295
+ st.markdown(
296
+ """
297
+ - Bi-Encoder (Retrieval): The Bi-encoder is responsible for independently embedding the sentences and search queries into a vector space. The result is then passed to the cross-encoder for checking the relevance/similarity between the query and sentences.
298
+ - Cross-Encoder (Re-Ranker): A re-ranker based on a Cross-Encoder can substantially improve the final results for the user. The query and a possible document is passed simultaneously to transformer network, which then outputs a single score between 0 and 1 indicating how relevant the document is for the given query. The cross-encoder further boost the performance, especially when you search over a corpus for which the bi-encoder was not trained for.""")
299
+
300
+ st.image(Image.open('encoder.png'), caption='Retrieval and Re-Rank')
301
+
302
+ st.markdown("""
303
+ In order to use the app:
304
+ - Select the preferred Sentence Transformer model (Bi-Encoder).
305
+ - Select the number of sentences per paragraph to partition your corpus (Window-Size), if you choose a small value the context from the other sentences might get lost and for larger values the results might take longer to generate.
306
+ - Select the number of top hits to be generated.
307
+ - Paste the URL with your corpus or upload your preferred document in txt, pdf or Word format.
308
+ - Semantic Search away!! """
309
+ )
310
+
311
+ st.markdown("---")
312
+
313
+ def clear_text():
314
+ st.session_state["text_url"] = ""
315
+ st.session_state["text_input"]= ""
316
+
317
+ def clear_search_text():
318
+ st.session_state["text_input"]= ""
319
+
320
+ url_text = st.text_input("Please Enter a url here",value="https://www.rba.gov.au/monetary-policy/rba-board-minutes/2023/2023-05-02.html",key='text_url',on_change=clear_search_text)
321
+
322
+ st.markdown(
323
+ "<h3 style='text-align: center; color: red;'>OR</h3>",
324
+ unsafe_allow_html=True,
325
+ )
326
+
327
+ upload_doc = st.file_uploader("Upload a .txt, .pdf, .docx file",key="upload")
328
+
329
+ search_query = st.text_input("Please Enter your search query here",value="What are the expectations for inflation for Australia?",key="text_input")
330
+
331
+ if validators.url(url_text):
332
+ #if input is URL
333
+ title, text = extract_text_from_url(url_text)
334
+ passages = preprocess_plain_text(text,window_size=window_size)
335
+
336
+ elif upload_doc:
337
+
338
+ text, pdf_title = extract_text_from_file(upload_doc)
339
+ passages = preprocess_plain_text(text,window_size=window_size)
340
+
341
+ col1, col2 = st.columns(2)
342
+
343
+ with col1:
344
+ search = st.button("Search",key='search_but', help='Click to Search!!')
345
+
346
+ with col2:
347
+ clear = st.button("Clear Text Input", on_click=clear_text,key='clear',help='Click to clear the URL input and search query')
348
+
349
+ if search:
350
+ if bi_encoder_type:
351
+
352
+ with st.spinner(
353
+ text=f"Loading {bi_encoder_type} bi-encoder and embedding document into vector space. This might take a few seconds depending on the length of your document..."
354
+ ):
355
+ bi_encoder, corpus_embeddings = bi_encode(bi_encoder_type,passages)
356
+ cross_encoder = cross_encode()
357
+ bm25 = bm25_api(passages)
358
+
359
+ with st.spinner(
360
+ text="Embedding completed, searching for relevant text for given query and hits..."):
361
+
362
+ search_func(search_query,bi_encoder_type,top_k)
363
+
364
+ st.markdown("""
365
+ """)
366
+
367
+ st.markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=nickmuchi-semantic-search-with-retrieve-and-rerank)")
encoder.png ADDED
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ beautifulsoup4==4.9.3
2
+ bs4==0.0.1
3
+ docx2txt==0.8
4
+ newspaper3k==0.2.8
5
+ PyPDF2==1.26.0
6
+ regex==2021.8.28
7
+ requests
8
+ requests-file==1.5.1
9
+ requests-oauthlib
10
+ torch==1.10.1
11
+ transformers==4.22.0
12
+ validators==0.18.2
13
+ nltk==3.7
14
+ sentence-transformers==2.2.2
15
+ rank-bm25==0.2.2
16
+ spacy_streamlit==1.0.3