Spaces:
Sleeping
Sleeping
Commit
·
36a404e
1
Parent(s):
2bcd818
Added highlight search term functionality to keyword search output
Browse files- .gitignore +1 -0
- app.py +12 -4
- example_highlight.txt +10 -0
- requirements.txt +2 -1
- search_funcs/bm25_functions.py +5 -2
- search_funcs/helper_functions.py +115 -3
.gitignore
CHANGED
@@ -15,6 +15,7 @@
|
|
15 |
*.npz
|
16 |
*.pkl
|
17 |
*.pkl.gz
|
|
|
18 |
build/*
|
19 |
dist/*
|
20 |
__pycache__/*
|
|
|
15 |
*.npz
|
16 |
*.pkl
|
17 |
*.pkl.gz
|
18 |
+
*.pem
|
19 |
build/*
|
20 |
dist/*
|
21 |
__pycache__/*
|
app.py
CHANGED
@@ -9,7 +9,7 @@ PandasDataFrame = Type[pd.DataFrame]
|
|
9 |
from search_funcs.bm25_functions import prepare_bm25_input_data, prepare_bm25, bm25_search
|
10 |
from search_funcs.semantic_ingest_functions import csv_excel_text_to_docs
|
11 |
from search_funcs.semantic_functions import docs_to_bge_embed_np_array, bge_simple_retrieval
|
12 |
-
from search_funcs.helper_functions import display_info, initial_data_load, put_columns_in_join_df, get_temp_folder_path, empty_folder
|
13 |
from search_funcs.spacy_search_funcs import spacy_fuzzy_search
|
14 |
|
15 |
# Attempt to delete temporary files generated by previous use of the app (as the files can be very big!)
|
@@ -157,7 +157,7 @@ depends on factors such as the type of documents or queries. Information taken f
|
|
157 |
|
158 |
### BM25 SEARCH ###
|
159 |
# Update dropdowns upon initial file load
|
160 |
-
in_bm25_file.upload(initial_data_load, inputs=[in_bm25_file
|
161 |
in_join_file.upload(put_columns_in_join_df, inputs=[in_join_file], outputs=[in_join_column, join_data_state, in_join_message])
|
162 |
|
163 |
# Load in BM25 data
|
@@ -174,7 +174,7 @@ depends on factors such as the type of documents or queries. Information taken f
|
|
174 |
### SEMANTIC SEARCH ###
|
175 |
|
176 |
# Load in a csv/excel file for semantic search
|
177 |
-
in_semantic_file.upload(initial_data_load, inputs=[in_semantic_file
|
178 |
load_semantic_data_button.click(
|
179 |
csv_excel_text_to_docs, inputs=[semantic_data_state, in_semantic_file, in_semantic_column, in_clean_data, return_intermediate_files], outputs=[ingest_docs, semantic_load_progress]).\
|
180 |
then(docs_to_bge_embed_np_array, inputs=[ingest_docs, in_semantic_file, embeddings_state, return_intermediate_files, embedding_super_compress], outputs=[semantic_load_progress, vectorstore_state, semantic_output_file])
|
@@ -183,5 +183,13 @@ depends on factors such as the type of documents or queries. Information taken f
|
|
183 |
semantic_submit.click(bge_simple_retrieval, inputs=[semantic_query, vectorstore_state, ingest_docs, in_semantic_column, k_val, out_passages, semantic_min_distance, vec_weight, join_data_state, in_join_column, search_df_join_column], outputs=[semantic_output_single_text, semantic_output_file], api_name="semantic")
|
184 |
semantic_query.submit(bge_simple_retrieval, inputs=[semantic_query, vectorstore_state, ingest_docs, in_semantic_column, k_val, out_passages, semantic_min_distance, vec_weight, join_data_state, in_join_column, search_df_join_column], outputs=[semantic_output_single_text, semantic_output_file])
|
185 |
|
186 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
|
|
|
9 |
from search_funcs.bm25_functions import prepare_bm25_input_data, prepare_bm25, bm25_search
|
10 |
from search_funcs.semantic_ingest_functions import csv_excel_text_to_docs
|
11 |
from search_funcs.semantic_functions import docs_to_bge_embed_np_array, bge_simple_retrieval
|
12 |
+
from search_funcs.helper_functions import display_info, initial_data_load, put_columns_in_join_df, get_temp_folder_path, empty_folder
|
13 |
from search_funcs.spacy_search_funcs import spacy_fuzzy_search
|
14 |
|
15 |
# Attempt to delete temporary files generated by previous use of the app (as the files can be very big!)
|
|
|
157 |
|
158 |
### BM25 SEARCH ###
|
159 |
# Update dropdowns upon initial file load
|
160 |
+
in_bm25_file.upload(initial_data_load, inputs=[in_bm25_file], outputs=[in_bm25_column, search_df_join_column, keyword_data_state, search_index_state, embeddings_state, tokenised_state, load_finished_message, current_source])
|
161 |
in_join_file.upload(put_columns_in_join_df, inputs=[in_join_file], outputs=[in_join_column, join_data_state, in_join_message])
|
162 |
|
163 |
# Load in BM25 data
|
|
|
174 |
### SEMANTIC SEARCH ###
|
175 |
|
176 |
# Load in a csv/excel file for semantic search
|
177 |
+
in_semantic_file.upload(initial_data_load, inputs=[in_semantic_file], outputs=[in_semantic_column, search_df_join_column, semantic_data_state, search_index_state, embeddings_state, tokenised_state, semantic_load_progress, current_source_semantic])
|
178 |
load_semantic_data_button.click(
|
179 |
csv_excel_text_to_docs, inputs=[semantic_data_state, in_semantic_file, in_semantic_column, in_clean_data, return_intermediate_files], outputs=[ingest_docs, semantic_load_progress]).\
|
180 |
then(docs_to_bge_embed_np_array, inputs=[ingest_docs, in_semantic_file, embeddings_state, return_intermediate_files, embedding_super_compress], outputs=[semantic_load_progress, vectorstore_state, semantic_output_file])
|
|
|
183 |
semantic_submit.click(bge_simple_retrieval, inputs=[semantic_query, vectorstore_state, ingest_docs, in_semantic_column, k_val, out_passages, semantic_min_distance, vec_weight, join_data_state, in_join_column, search_df_join_column], outputs=[semantic_output_single_text, semantic_output_file], api_name="semantic")
|
184 |
semantic_query.submit(bge_simple_retrieval, inputs=[semantic_query, vectorstore_state, ingest_docs, in_semantic_column, k_val, out_passages, semantic_min_distance, vec_weight, join_data_state, in_join_column, search_df_join_column], outputs=[semantic_output_single_text, semantic_output_file])
|
185 |
|
186 |
+
# Simple run for HF spaces or local on your computer
|
187 |
+
block.queue().launch(debug=True)
|
188 |
+
|
189 |
+
# Running on local server without https
|
190 |
+
#block.queue().launch(server_name="0.0.0.0", server_port=7861, ssl_verify=False)
|
191 |
+
|
192 |
+
# Running on local server with https: https://discuss.huggingface.co/t/how-to-run-gradio-with-0-0-0-0-and-https/38003 or https://dev.to/rajshirolkar/fastapi-over-https-for-development-on-windows-2p7d # Need to download OpenSSL and create own keys
|
193 |
+
# block.queue().launch(ssl_verify=False, share=False, debug=False, server_name="0.0.0.0",server_port=443,
|
194 |
+
# ssl_certfile="cert.pem", ssl_keyfile="key.pem") # port 443 for https. Certificates currently not valid
|
195 |
|
example_highlight.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Sample DataFrame
|
2 |
+
data = {
|
3 |
+
'Column1': ['This is a specific substring example', 'Another example', 'One more'],
|
4 |
+
'Column2': ['Some data', 'Another data', 'More data']
|
5 |
+
}
|
6 |
+
df = pd.DataFrame(data)
|
7 |
+
|
8 |
+
# Define the column to highlight and the substrings to highlight
|
9 |
+
column_to_highlight = 'Column1'
|
10 |
+
substrings_to_highlight = ['specific', 'example']
|
requirements.txt
CHANGED
@@ -8,4 +8,5 @@ torch==2.1.2
|
|
8 |
spacy==3.7.2
|
9 |
en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1.tar.gz
|
10 |
gradio==4.16.0
|
11 |
-
sentence_transformers==2.3.1
|
|
|
|
8 |
spacy==3.7.2
|
9 |
en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1.tar.gz
|
10 |
gradio==4.16.0
|
11 |
+
sentence_transformers==2.3.1
|
12 |
+
lxml==5.1.0
|
search_funcs/bm25_functions.py
CHANGED
@@ -14,7 +14,7 @@ from datetime import datetime
|
|
14 |
today_rev = datetime.now().strftime("%Y%m%d")
|
15 |
|
16 |
from search_funcs.clean_funcs import initial_clean # get_lemma_tokens, stem_sentence
|
17 |
-
from search_funcs.helper_functions import get_file_path_end_with_ext, get_file_path_end
|
18 |
|
19 |
# Load the SpaCy model
|
20 |
from spacy.cli.download import download
|
@@ -517,7 +517,10 @@ def bm25_search(free_text_query, in_no_search_results, original_data, text_colum
|
|
517 |
print("Saving search file output")
|
518 |
progress(0.7, desc = "Saving search output to file")
|
519 |
|
520 |
-
|
|
|
|
|
|
|
521 |
results_first_text = results_df_out[text_column].iloc[0]
|
522 |
|
523 |
print("Returning results")
|
|
|
14 |
today_rev = datetime.now().strftime("%Y%m%d")
|
15 |
|
16 |
from search_funcs.clean_funcs import initial_clean # get_lemma_tokens, stem_sentence
|
17 |
+
from search_funcs.helper_functions import get_file_path_end_with_ext, get_file_path_end, create_highlighted_excel_wb
|
18 |
|
19 |
# Load the SpaCy model
|
20 |
from spacy.cli.download import download
|
|
|
517 |
print("Saving search file output")
|
518 |
progress(0.7, desc = "Saving search output to file")
|
519 |
|
520 |
+
# Highlight found text and save to file
|
521 |
+
results_df_out_wb = create_highlighted_excel_wb(results_df_out, free_text_query, "search_text")
|
522 |
+
results_df_out_wb.save(results_df_name)
|
523 |
+
#results_df_out.to_excel(results_df_name, index= None)
|
524 |
results_first_text = results_df_out[text_column].iloc[0]
|
525 |
|
526 |
print("Returning results")
|
search_funcs/helper_functions.py
CHANGED
@@ -9,6 +9,12 @@ import gzip
|
|
9 |
import pickle
|
10 |
import numpy as np
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
# Attempt to delete content of gradio temp folder
|
13 |
def get_temp_folder_path():
|
14 |
username = getpass.getuser()
|
@@ -86,7 +92,7 @@ def read_file(filename):
|
|
86 |
|
87 |
return file
|
88 |
|
89 |
-
def initial_data_load(in_file
|
90 |
'''
|
91 |
When file is loaded, update the column dropdown choices
|
92 |
'''
|
@@ -107,7 +113,7 @@ def initial_data_load(in_file, in_bm25_column):
|
|
107 |
if not data_file_names:
|
108 |
out_message = "Please load in at least one csv/Excel/parquet data file."
|
109 |
print(out_message)
|
110 |
-
return gr.Dropdown(choices=concat_choices), gr.Dropdown(choices=concat_choices), pd.DataFrame(),
|
111 |
|
112 |
data_file_name = data_file_names[0]
|
113 |
|
@@ -179,7 +185,7 @@ def put_columns_in_join_df(in_file):
|
|
179 |
|
180 |
return gr.Dropdown(choices=concat_choices), new_df, out_message
|
181 |
|
182 |
-
|
183 |
"""
|
184 |
A dummy function that exists just so that dropdown updates work correctly.
|
185 |
"""
|
@@ -188,3 +194,109 @@ def dummy_function(gradio_component):
|
|
188 |
def display_info(info_component):
|
189 |
gr.Info(info_component)
|
190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import pickle
|
10 |
import numpy as np
|
11 |
|
12 |
+
# Openpyxl functions for output
|
13 |
+
from openpyxl import Workbook
|
14 |
+
from openpyxl.cell.text import InlineFont
|
15 |
+
from openpyxl.cell.rich_text import TextBlock, CellRichText
|
16 |
+
from openpyxl.styles import Font
|
17 |
+
|
18 |
# Attempt to delete content of gradio temp folder
|
19 |
def get_temp_folder_path():
|
20 |
username = getpass.getuser()
|
|
|
92 |
|
93 |
return file
|
94 |
|
95 |
+
def initial_data_load(in_file):
|
96 |
'''
|
97 |
When file is loaded, update the column dropdown choices
|
98 |
'''
|
|
|
113 |
if not data_file_names:
|
114 |
out_message = "Please load in at least one csv/Excel/parquet data file."
|
115 |
print(out_message)
|
116 |
+
return gr.Dropdown(choices=concat_choices), gr.Dropdown(choices=concat_choices), pd.DataFrame(), index_load, out_message
|
117 |
|
118 |
data_file_name = data_file_names[0]
|
119 |
|
|
|
185 |
|
186 |
return gr.Dropdown(choices=concat_choices), new_df, out_message
|
187 |
|
188 |
+
|
189 |
"""
|
190 |
A dummy function that exists just so that dropdown updates work correctly.
|
191 |
"""
|
|
|
194 |
def display_info(info_component):
|
195 |
gr.Info(info_component)
|
196 |
|
197 |
+
def highlight_found_text(search_text: str, full_text: str) -> str:
|
198 |
+
"""
|
199 |
+
Highlights occurrences of search_text within full_text.
|
200 |
+
|
201 |
+
Parameters:
|
202 |
+
- search_text (str): The text to be searched for within full_text.
|
203 |
+
- full_text (str): The text within which search_text occurrences will be highlighted.
|
204 |
+
|
205 |
+
Returns:
|
206 |
+
- str: A string with occurrences of search_text highlighted.
|
207 |
+
|
208 |
+
Example:
|
209 |
+
>>> highlight_found_text("world", "Hello, world! This is a test. Another world awaits.")
|
210 |
+
'Hello, <mark style="color:black;">world</mark>! This is a test. Another <mark style="color:black;">world</mark> awaits.'
|
211 |
+
"""
|
212 |
+
|
213 |
+
def extract_text_from_input(text, i=0):
|
214 |
+
if isinstance(text, str):
|
215 |
+
return text
|
216 |
+
elif isinstance(text, list):
|
217 |
+
return text[i][0]
|
218 |
+
else:
|
219 |
+
return ""
|
220 |
+
|
221 |
+
def extract_search_text_from_input(text):
|
222 |
+
if isinstance(text, str):
|
223 |
+
return text
|
224 |
+
elif isinstance(text, list):
|
225 |
+
return text[-1][1]
|
226 |
+
else:
|
227 |
+
return ""
|
228 |
+
|
229 |
+
full_text = extract_text_from_input(full_text)
|
230 |
+
search_text = extract_search_text_from_input(search_text)
|
231 |
+
|
232 |
+
sections = search_text.split(sep = " ")
|
233 |
+
|
234 |
+
found_positions = {}
|
235 |
+
for x in sections:
|
236 |
+
text_start_pos = 0
|
237 |
+
while text_start_pos != -1:
|
238 |
+
text_start_pos = full_text.find(x, text_start_pos)
|
239 |
+
if text_start_pos != -1:
|
240 |
+
found_positions[text_start_pos] = text_start_pos + len(x)
|
241 |
+
text_start_pos += 1
|
242 |
+
|
243 |
+
# Combine overlapping or adjacent positions
|
244 |
+
sorted_starts = sorted(found_positions.keys())
|
245 |
+
combined_positions = []
|
246 |
+
if sorted_starts:
|
247 |
+
current_start, current_end = sorted_starts[0], found_positions[sorted_starts[0]]
|
248 |
+
for start in sorted_starts[1:]:
|
249 |
+
if start <= (current_end + 10):
|
250 |
+
current_end = max(current_end, found_positions[start])
|
251 |
+
else:
|
252 |
+
combined_positions.append((current_start, current_end))
|
253 |
+
current_start, current_end = start, found_positions[start]
|
254 |
+
combined_positions.append((current_start, current_end))
|
255 |
+
|
256 |
+
# Construct pos_tokens
|
257 |
+
pos_tokens = []
|
258 |
+
prev_end = 0
|
259 |
+
for start, end in combined_positions:
|
260 |
+
if end-start > 1: # Only combine if there is a significant amount of matched text. Avoids picking up single words like 'and' etc.
|
261 |
+
pos_tokens.append(full_text[prev_end:start])
|
262 |
+
pos_tokens.append('<mark style="color:black;">' + full_text[start:end] + '</mark>')
|
263 |
+
prev_end = end
|
264 |
+
pos_tokens.append(full_text[prev_end:])
|
265 |
+
|
266 |
+
return "".join(pos_tokens), combined_positions
|
267 |
+
|
268 |
+
def create_rich_text_cell_from_positions(full_text, combined_positions):
|
269 |
+
# Construct pos_tokens
|
270 |
+
red = InlineFont(color='00FF0000')
|
271 |
+
rich_text_cell = CellRichText()
|
272 |
+
|
273 |
+
prev_end = 0
|
274 |
+
for start, end in combined_positions:
|
275 |
+
if end-start > 1: # Only combine if there is a significant amount of matched text. Avoids picking up single words like 'and' etc.
|
276 |
+
rich_text_cell.append(full_text[prev_end:start])
|
277 |
+
rich_text_cell.append(TextBlock(red, full_text[start:end]))
|
278 |
+
prev_end = end
|
279 |
+
rich_text_cell.append(full_text[prev_end:])
|
280 |
+
|
281 |
+
return rich_text_cell
|
282 |
+
|
283 |
+
def create_highlighted_excel_wb(df, search_text, column_to_highlight):
|
284 |
+
|
285 |
+
# Create a new Excel workbook
|
286 |
+
wb = Workbook()
|
287 |
+
sheet = wb.active
|
288 |
+
|
289 |
+
# Insert headers into the worksheet, make bold
|
290 |
+
sheet.append(df.columns.tolist())
|
291 |
+
for cell in sheet[1]:
|
292 |
+
cell.font = Font(bold=True)
|
293 |
+
|
294 |
+
# Find substrings in cells and highlight
|
295 |
+
for r_idx, row in enumerate(df.itertuples(), start=2):
|
296 |
+
for c_idx, cell_value in enumerate(row[1:], start=1):
|
297 |
+
sheet.cell(row=r_idx, column=c_idx, value=cell_value)
|
298 |
+
if df.columns[c_idx - 1] == column_to_highlight:
|
299 |
+
html_text, combined_positions = highlight_found_text(search_text, cell_value)
|
300 |
+
sheet.cell(row=r_idx, column=c_idx).value = create_rich_text_cell_from_positions(cell_value, combined_positions)
|
301 |
+
|
302 |
+
return wb
|