File size: 13,050 Bytes
99d6fba 63049fe 99d6fba 36a404e 352c02a 36a404e e0fe055 d3ff2e2 2754a2b 99d6fba d3ff2e2 2089141 d3ff2e2 2089141 d3ff2e2 2089141 d3ff2e2 2089141 99d6fba 3df8e40 99d6fba 4ce2224 99d6fba 4ce2224 99d6fba 4ce2224 99d6fba 3df8e40 99d6fba 36a404e 99d6fba 63049fe 99d6fba 63049fe 739b386 63049fe 739b386 352c02a 99d6fba 739b386 4ee3470 99d6fba 63049fe e0fe055 63049fe 352c02a 739b386 352c02a 99d6fba 352c02a e0fe055 352c02a 99d6fba 352c02a 63049fe 352c02a 99d6fba 63049fe 739b386 63049fe 739b386 63049fe 739b386 63049fe 99d6fba 352c02a 99d6fba 63049fe 99d6fba 63049fe 99d6fba 63049fe 99d6fba 63049fe 99d6fba 63049fe 99d6fba 63049fe 99d6fba 63049fe 99d6fba 36a404e 99d6fba 36a404e 352c02a 36a404e 352c02a 36a404e 352c02a 36a404e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 |
import os
import re
import pandas as pd
import gradio as gr
import os
import shutil
import getpass
import gzip
import pickle
import numpy as np
# Openpyxl functions for output
from openpyxl import Workbook
from openpyxl.cell.text import InlineFont
from openpyxl.cell.rich_text import TextBlock, CellRichText
from openpyxl.styles import Font, Alignment
megabyte = 1024 * 1024 # Bytes in a megabyte
file_size_mb = 500 # Size in megabytes
file_size_bytes_500mb = megabyte * file_size_mb
def get_or_create_env_var(var_name, default_value):
# Get the environment variable if it exists
value = os.environ.get(var_name)
# If it doesn't exist, set it to the default value
if value is None:
os.environ[var_name] = default_value
value = default_value
return value
# Retrieving or setting output folder
env_var_name = 'GRADIO_OUTPUT_FOLDER'
default_value = 'output/'
output_folder = get_or_create_env_var(env_var_name, default_value)
print(f'The value of {env_var_name} is {output_folder}')
def ensure_output_folder_exists(output_folder):
"""Checks if the output folder exists, creates it if not."""
folder_name = output_folder
if not os.path.exists(folder_name):
# Create the folder if it doesn't exist
os.makedirs(folder_name)
print(f"Created the output folder:", folder_name)
else:
print(f"The output folder already exists:", folder_name)
# Attempt to delete content of gradio temp folder
def get_temp_folder_path():
username = getpass.getuser()
return os.path.join('C:\\Users', username, 'AppData\\Local\\Temp\\gradio')
def empty_folder(directory_path):
if not os.path.exists(directory_path):
#print(f"The directory {directory_path} does not exist. No temporary files from previous app use found to delete.")
return
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
#print(f'Failed to delete {file_path}. Reason: {e}')
print('')
def get_file_path_end(file_path):
# First, get the basename of the file (e.g., "example.txt" from "/path/to/example.txt")
basename = os.path.basename(file_path)
# Then, split the basename and its extension and return only the basename without the extension
filename_without_extension, _ = os.path.splitext(basename)
#print(filename_without_extension)
return filename_without_extension
def get_file_path_end_with_ext(file_path):
match = re.search(r'(.*[\/\\])?(.+)$', file_path)
filename_end = match.group(2) if match else ''
return filename_end
def ensure_output_folder_exists(output_folder):
"""Checks if the output folder exists, creates it if not."""
folder_name = output_folder
if not os.path.exists(folder_name):
# Create the folder if it doesn't exist
os.makedirs(folder_name)
print(f"Created the output folder:", folder_name)
else:
print(f"The output folder already exists:", folder_name)
def detect_file_type(filename):
"""Detect the file type based on its extension."""
if (filename.endswith('.csv')) | (filename.endswith('.csv.gz')) | (filename.endswith('.zip')):
return 'csv'
elif filename.endswith('.xlsx'):
return 'xlsx'
elif filename.endswith('.parquet'):
return 'parquet'
elif filename.endswith('.pkl.gz'):
return 'pkl.gz'
#elif filename.endswith('.gz'):
# return 'gz'
else:
raise ValueError("Unsupported file type.")
def read_file(filename):
"""Read the file based on its detected type."""
file_type = detect_file_type(filename)
print("Loading in file")
if file_type == 'csv':
file = pd.read_csv(filename, low_memory=False).reset_index()#.drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
elif file_type == 'xlsx':
file = pd.read_excel(filename).reset_index()#.drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
elif file_type == 'parquet':
file = pd.read_parquet(filename).reset_index()#.drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
elif file_type == 'pkl.gz':
with gzip.open(filename, 'rb') as file:
file = pickle.load(file)
#elif file_type == ".gz":
# with gzip.open(filename, 'rb') as file:
# file = pickle.load(file)
print("File load complete")
return file
def initial_data_load(in_file):
'''
When file is loaded, update the column dropdown choices
'''
new_choices = []
concat_choices = []
index_load = None
embed_load = np.array([])
tokenised_load =[]
out_message = ""
current_source = ""
df = pd.DataFrame()
file_list = [string.name for string in in_file]
#print(file_list)
data_file_names = [string for string in file_list if "tokenised" not in string.lower() and "npz" not in string.lower() and "search_index" not in string.lower()]
print(data_file_names)
if not data_file_names:
out_message = "Please load in at least one csv/Excel/parquet data file."
print(out_message)
return gr.Dropdown(choices=concat_choices), gr.Dropdown(choices=concat_choices), pd.DataFrame(), pd.DataFrame(), index_load, embed_load, tokenised_load, out_message, None
# This if you have loaded in a documents object for the semantic search
if "pkl" in data_file_names[0]:
df = read_file(data_file_names[0])
new_choices = list(df[0].metadata.keys()) #["Documents"] #["page_contents"] +
current_source = get_file_path_end_with_ext(data_file_names[0])
# This if you have loaded in a csv/parquets/xlsx
else:
for file in data_file_names:
current_source = current_source + get_file_path_end_with_ext(file) + " "
# Get the size of the file
print("Checking file size")
file_size = os.path.getsize(file)
if file_size > file_size_bytes_500mb:
out_message = "Data file greater than 500mb in size. Please use smaller sizes."
print(out_message)
return gr.Dropdown(choices=concat_choices), gr.Dropdown(choices=concat_choices), pd.DataFrame(), pd.DataFrame(), index_load, embed_load, tokenised_load, out_message, None
df_new = read_file(file)
df = pd.concat([df, df_new], ignore_index = True)
new_choices = list(df.columns)
concat_choices.extend(new_choices)
# Check if there is a search index file already
index_file_names = [string for string in file_list if "gz" in string.lower()]
if index_file_names:
index_file_name = index_file_names[0]
index_load = read_file(index_file_name)
embeddings_file_names = [string for string in file_list if "embedding" in string.lower()]
if embeddings_file_names:
print("Loading embeddings from file.")
embed_load = np.load(embeddings_file_names[0])['arr_0']
# If embedding files have 'super_compress' in the title, they have been multiplied by 100 before save
if "compress" in embeddings_file_names[0]:
embed_load /= 100
else:
embed_load = np.array([])
tokenised_file_names = [string for string in file_list if "tokenised" in string.lower()]
if tokenised_file_names:
tokenised_load = read_file(tokenised_file_names[0])
out_message = "Initial data check successful. Next, choose a data column to search in the drop down above, then click 'Load data'"
print(out_message)
return gr.Dropdown(choices=concat_choices), gr.Dropdown(choices=concat_choices), df, df, index_load, embed_load, tokenised_load, out_message, current_source
def put_columns_in_join_df(in_file):
'''
When file is loaded, update the column dropdown choices
'''
new_df = pd.DataFrame()
#print("in_bm25_column")
new_choices = []
concat_choices = []
new_df = read_file(in_file.name)
new_choices = list(new_df.columns)
#print(new_choices)
concat_choices.extend(new_choices)
out_message = "File load successful. Now select a column to join below."
return gr.Dropdown(choices=concat_choices), new_df, out_message
def display_info(info_component):
gr.Info(info_component)
def highlight_found_text(search_text: str, full_text: str) -> str:
"""
Highlights occurrences of search_text within full_text.
Parameters:
- search_text (str): The text to be searched for within full_text.
- full_text (str): The text within which search_text occurrences will be highlighted.
Returns:
- str: A string with occurrences of search_text highlighted.
Example:
>>> highlight_found_text("world", "Hello, world! This is a test. Another world awaits.")
'Hello, <mark style="color:black;">world</mark>! This is a test. Another <mark style="color:black;">world</mark> awaits.'
"""
def extract_text_from_input(text, i=0):
if isinstance(text, str):
return text
elif isinstance(text, list):
return text[i][0]
else:
return ""
def extract_search_text_from_input(text):
if isinstance(text, str):
return text
elif isinstance(text, list):
return text[-1][1]
else:
return ""
full_text = extract_text_from_input(full_text)
search_text = extract_search_text_from_input(search_text)
sections = search_text.split(sep = " ")
found_positions = {}
for x in sections:
text_start_pos = 0
while text_start_pos != -1:
text_start_pos = full_text.find(x, text_start_pos)
if text_start_pos != -1:
found_positions[text_start_pos] = text_start_pos + len(x)
text_start_pos += 1
# Combine overlapping or adjacent positions
sorted_starts = sorted(found_positions.keys())
combined_positions = []
if sorted_starts:
current_start, current_end = sorted_starts[0], found_positions[sorted_starts[0]]
for start in sorted_starts[1:]:
if start <= (current_end + 10):
current_end = max(current_end, found_positions[start])
else:
combined_positions.append((current_start, current_end))
current_start, current_end = start, found_positions[start]
combined_positions.append((current_start, current_end))
# Construct pos_tokens
pos_tokens = []
prev_end = 0
for start, end in combined_positions:
if end-start > 1: # Only combine if there is a significant amount of matched text. Avoids picking up single words like 'and' etc.
pos_tokens.append(full_text[prev_end:start])
pos_tokens.append('<mark style="color:black;">' + full_text[start:end] + '</mark>')
prev_end = end
pos_tokens.append(full_text[prev_end:])
return "".join(pos_tokens), combined_positions
def create_rich_text_cell_from_positions(full_text, combined_positions):
# Construct pos_tokens
red = InlineFont(color='00FF0000')
rich_text_cell = CellRichText()
prev_end = 0
for start, end in combined_positions:
if end-start > 1: # Only combine if there is a significant amount of matched text. Avoids picking up single words like 'and' etc.
rich_text_cell.append(full_text[prev_end:start])
rich_text_cell.append(TextBlock(red, full_text[start:end]))
prev_end = end
rich_text_cell.append(full_text[prev_end:])
return rich_text_cell
def create_highlighted_excel_wb(df, search_text, column_to_highlight):
# Create a new Excel workbook
wb = Workbook()
sheet = wb.active
# Insert headers into the worksheet, make bold
sheet.append(df.columns.tolist())
for cell in sheet[1]:
cell.font = Font(bold=True)
column_width = 150 # Adjust as needed
relevant_column_no = (df.columns == column_to_highlight).argmax() + 1
print(relevant_column_no)
sheet.column_dimensions[sheet.cell(row=1, column=relevant_column_no).column_letter].width = column_width
# Find substrings in cells and highlight
for r_idx, row in enumerate(df.itertuples(), start=2):
for c_idx, cell_value in enumerate(row[1:], start=1):
sheet.cell(row=r_idx, column=c_idx, value=cell_value)
if df.columns[c_idx - 1] == column_to_highlight:
html_text, combined_positions = highlight_found_text(search_text, cell_value)
sheet.cell(row=r_idx, column=c_idx).value = create_rich_text_cell_from_positions(cell_value, combined_positions)
sheet.cell(row=r_idx, column=c_idx).alignment = Alignment(wrap_text=True)
return wb |