File size: 21,517 Bytes
49e32ea 8aa3ebb ee7464e 8aa3ebb 2e536f9 8aa3ebb 96d818b 03afd76 2e536f9 4a190c2 03afd76 4a190c2 96d818b 03afd76 f301d67 8aa3ebb 49e32ea 4a190c2 ee7464e 4a190c2 03afd76 49e32ea 4a190c2 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 49e32ea 03afd76 f301d67 ee7464e 03afd76 ee7464e 03afd76 ee7464e 03afd76 ee7464e 03afd76 96d818b 03afd76 96d818b ee7464e 03afd76 96d818b 03afd76 96d818b ee7464e 03afd76 2e536f9 ee7464e 2e536f9 03afd76 8249fd3 0a7a8db f301d67 8249fd3 114048b f301d67 114048b 2e536f9 f301d67 03afd76 f301d67 994ad90 f301d67 03afd76 f301d67 03afd76 f301d67 2e536f9 03afd76 2e536f9 03afd76 2e536f9 ee7464e 2e536f9 ee7464e 2e536f9 ee7464e 2e536f9 d5a8385 2e536f9 ee7464e 49e32ea 4a190c2 0c818aa 4a190c2 03afd76 ee7464e 102df35 2e536f9 ee7464e 4a190c2 03afd76 4a190c2 102df35 03afd76 ee7464e 2e536f9 102df35 4a190c2 03afd76 4a190c2 96d818b 4a190c2 49e32ea a0e9486 96d818b 275393f 03afd76 275393f 49e32ea d4b0a2c 96d818b 49e32ea 96d818b 49e32ea c2ff47a 49e32ea 30689f9 0c818aa 03afd76 49e32ea 96d818b 03afd76 49e32ea a0e9486 49e32ea ae4a7ec 49e32ea d4b0a2c 4a190c2 49e32ea 96d818b d53332d 232a079 d5a8385 96d818b 7339026 ee7464e 2e536f9 49e32ea 03afd76 49e32ea 71c040a 49e32ea ee7464e 03afd76 ee7464e 03afd76 ee7464e 03afd76 ee7464e 03afd76 ee7464e 96d818b ee7464e 2e536f9 49e32ea 9118536 0c818aa 03afd76 0c818aa 49e32ea 9118536 0c818aa 03afd76 0c818aa d4b0a2c 0c818aa 03afd76 0c818aa ee7464e 49e32ea ee7464e 49e32ea ee7464e aa0ad5d 4a190c2 03afd76 4a190c2 0c818aa 4a190c2 03afd76 4a190c2 03afd76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 |
import os
from typing import Type
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
import gradio as gr
import pandas as pd
from torch import float16, float32
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
import zipfile
from chatfuncs.ingest import embed_faiss_save_to_zip
from chatfuncs.helper_functions import get_connection_params, reveal_feedback_buttons, wipe_logs
from chatfuncs.aws_functions import upload_file_to_s3
from chatfuncs.auth import authenticate_user
from chatfuncs.config import FEEDBACK_LOGS_FOLDER, ACCESS_LOGS_FOLDER, USAGE_LOGS_FOLDER, HOST_NAME, COGNITO_AUTH, INPUT_FOLDER, OUTPUT_FOLDER, MAX_QUEUE_SIZE, DEFAULT_CONCURRENCY_LIMIT, MAX_FILE_SIZE, GRADIO_SERVER_PORT, ROOT_PATH, DEFAULT_EMBEDDINGS_LOCATION, EMBEDDINGS_MODEL_NAME, DEFAULT_DATA_SOURCE, HF_TOKEN, LARGE_MODEL_REPO_ID, LARGE_MODEL_GGUF_FILE, LARGE_MODEL_NAME, SMALL_MODEL_NAME, SMALL_MODEL_REPO_ID, DEFAULT_DATA_SOURCE_NAME, DEFAULT_EXAMPLES, DEFAULT_MODEL_CHOICES, RUN_GEMINI_MODELS, LOAD_LARGE_MODEL
from chatfuncs.model_load import torch_device, gpu_config, cpu_config, context_length
import chatfuncs.chatfuncs as chatf
import chatfuncs.ingest as ing
PandasDataFrame = Type[pd.DataFrame]
from datetime import datetime
today_rev = datetime.now().strftime("%Y%m%d")
host_name = HOST_NAME
access_logs_data_folder = ACCESS_LOGS_FOLDER
feedback_data_folder = FEEDBACK_LOGS_FOLDER
usage_data_folder = USAGE_LOGS_FOLDER
if isinstance(DEFAULT_EXAMPLES, str): default_examples_set = eval(DEFAULT_EXAMPLES)
if isinstance(DEFAULT_MODEL_CHOICES, str): default_model_choices = eval(DEFAULT_MODEL_CHOICES)
# Disable cuda devices if necessary
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
###
# Load preset embeddings, vectorstore, and model
###
def load_embeddings_model(embeddings_model = EMBEDDINGS_MODEL_NAME):
embeddings_func = HuggingFaceEmbeddings(model_name=embeddings_model)
#global embeddings
#embeddings = embeddings_func
return embeddings_func
def get_faiss_store(faiss_vstore_folder:str, embeddings_model:object):
with zipfile.ZipFile(faiss_vstore_folder + '/' + faiss_vstore_folder + '.zip', 'r') as zip_ref:
zip_ref.extractall(faiss_vstore_folder)
faiss_vstore = FAISS.load_local(folder_path=faiss_vstore_folder, embeddings=embeddings_model, allow_dangerous_deserialization=True)
os.remove(faiss_vstore_folder + "/index.faiss")
os.remove(faiss_vstore_folder + "/index.pkl")
#global vectorstore
#vectorstore = faiss_vstore
return faiss_vstore #vectorstore
# Load in default embeddings and embeddings model name
embeddings_model = load_embeddings_model(EMBEDDINGS_MODEL_NAME)
vectorstore = get_faiss_store(faiss_vstore_folder=DEFAULT_EMBEDDINGS_LOCATION,embeddings_model=embeddings_model)#globals()["embeddings"])
chatf.embeddings = embeddings_model
chatf.vectorstore = vectorstore
def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings_model=embeddings_model):
print(f"> Total split documents: {len(docs_out)}")
print(docs_out)
vectorstore_func = FAISS.from_documents(documents=docs_out, embedding=embeddings_model)
chatf.vectorstore = vectorstore_func
out_message = "Document processing complete"
return out_message, vectorstore_func
def create_hf_model(model_name:str, hf_token=HF_TOKEN):
if torch_device == "cuda":
if "flan" in model_name:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map="auto")#, torch_dtype=torch.float16)
else:
if hf_token:
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", token=hf_token, torch_dtype=float32) # , torch_dtype=float16 - not compatible with CPU and Gemma 3
else:
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=float32) # , torch_dtype=float16
else:
if "flan" in model_name:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)#, torch_dtype=torch.float16)
else:
if hf_token:
model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token, torch_dtype=float32) # , torch_dtype=float16
else:
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=float32) # , torch_dtype=float16
if hf_token:
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = context_length, token=hf_token)
else:
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = context_length)
return model, tokenizer
def load_model(model_type:str, gpu_layers:int, gpu_config:dict=gpu_config, cpu_config:dict=cpu_config, torch_device:str=torch_device):
print("Loading model")
if model_type == LARGE_MODEL_NAME:
if torch_device == "cuda":
gpu_config.update_gpu(gpu_layers)
print("Loading with", gpu_config.n_gpu_layers, "model layers sent to GPU.")
else:
gpu_config.update_gpu(gpu_layers)
cpu_config.update_gpu(gpu_layers)
print("Loading with", cpu_config.n_gpu_layers, "model layers sent to GPU.")
try:
model = Llama(
model_path=hf_hub_download(
repo_id=LARGE_MODEL_REPO_ID,
filename=LARGE_MODEL_GGUF_FILE
),
**vars(gpu_config) # change n_gpu_layers if you have more or less VRAM
)
except Exception as e:
print("GPU load failed", e, "loading CPU version instead")
model = Llama(
model_path=hf_hub_download(
repo_id=LARGE_MODEL_REPO_ID,
filename=LARGE_MODEL_GGUF_FILE
),
**vars(cpu_config)
)
tokenizer = []
if model_type == SMALL_MODEL_NAME:
# Huggingface chat model
hf_checkpoint = SMALL_MODEL_REPO_ID# 'declare-lab/flan-alpaca-large'#'declare-lab/flan-alpaca-base' # # # 'Qwen/Qwen1.5-0.5B-Chat' #
model, tokenizer = create_hf_model(model_name = hf_checkpoint)
else:
model = model_type
tokenizer = ""
chatf.model_object = model
chatf.tokenizer = tokenizer
chatf.model_type = model_type
load_confirmation = "Finished loading model: " + model_type
print(load_confirmation)
return model_type, load_confirmation, model_type#model, tokenizer, model_type
###
# RUN UI
###
app = gr.Blocks(theme = gr.themes.Base(), fill_width=True)#css=".gradio-container {background-color: black}")
with app:
model_type = SMALL_MODEL_NAME
load_model(model_type, 0, gpu_config, cpu_config, torch_device) # chatf.model_object, chatf.tokenizer, chatf.model_type =
# Both models are loaded on app initialisation so that users don't have to wait for the models to be downloaded
#model_type = "Phi 3.5 Mini (larger, slow)"
#load_model(model_type, gpu_layers, gpu_config, cpu_config, torch_device)
ingest_text = gr.State()
ingest_metadata = gr.State()
ingest_docs = gr.State()
model_type_state = gr.State(model_type)
gpu_config_state = gr.State(gpu_config)
cpu_config_state = gr.State(cpu_config)
torch_device_state = gr.State(torch_device)
# Embeddings related vars
embeddings_model_object_state = gr.State(embeddings_model)#globals()["embeddings"])
vectorstore_state = gr.State(vectorstore)#globals()["vectorstore"])
default_embeddings_store_text = gr.Textbox(value=DEFAULT_EMBEDDINGS_LOCATION, visible=False)
# Is the query relevant to the sources provided?
relevant_query_state = gr.Checkbox(value=True, visible=False)
# Storing model objects in state doesn't seem to work, so we have to load in different models in roundabout ways
model_state = gr.State() # chatf.model_object (gives error)
tokenizer_state = gr.State() # chatf.tokenizer (gives error)
chat_history_state = gr.State()
instruction_prompt_out = gr.State()
session_hash_state = gr.State()
output_folder_textbox = gr.Textbox(value=OUTPUT_FOLDER, visible=False)
input_folder_textbox = gr.Textbox(value=INPUT_FOLDER, visible=False)
session_hash_textbox = gr.Textbox(value="", visible=False)
s3_logs_output_textbox = gr.Textbox(label="S3 logs", visible=False)
latest_user_rating_data_path = gr.Textbox(label="output_ratings_textbox", visible=False)
access_logs_state = gr.State(access_logs_data_folder + 'dataset1.csv')
access_s3_logs_loc_state = gr.State(access_logs_data_folder)
usage_logs_state = gr.State(usage_data_folder + 'dataset1.csv')
usage_s3_logs_loc_state = gr.State(usage_data_folder)
feedback_logs_state = gr.State(feedback_data_folder + 'dataset1.csv')
feedback_s3_logs_loc_state = gr.State(feedback_data_folder)
gr.Markdown("<h1><center>Lightweight PDF / web page QA bot</center></h1>")
gr.Markdown(f"""Chat with PDFs, web pages or data files (.csv / .xlsx). The default is a small model ({SMALL_MODEL_NAME}), that can only answer specific questions that are answered in the text. It cannot give overall impressions of, or summarise the document. Go to Advanced settings to change model to e.g. a choice of Gemini models that are available on [their very generous free tier](https://ai.google.dev/gemini-api/docs/pricing) (needs an API key), or AWS Bedrock/larger local models if activated.\n\nBy default '[{DEFAULT_DATA_SOURCE_NAME}]({DEFAULT_DATA_SOURCE})' is loaded as a data source. If you want to query another data source, please upload it on the 'Change data source' tab. If switching topic, please click the 'Clear chat' button. 'Stop generating' will halt the language model during its response.\n\n**Caution: On Hugging Face, this is a public app. Please ensure that the document you upload is not sensitive is any way as other users may see it!** Also, please note that AI chatbots may give incomplete or incorrect information, so please use with care and ensure that you verify any outputs before further use.""")
with gr.Row():
current_source = gr.Textbox(label="Current data source(s)", value=DEFAULT_DATA_SOURCE, scale = 10)
current_model = gr.Textbox(label="Current model", value=model_type, scale = 3)
with gr.Tab("Chatbot"):
with gr.Row():
#chat_height = 500
chatbot = gr.Chatbot(value=None, avatar_images=('user.jfif', 'bot.jpg'), scale = 1, resizable=True, show_copy_all_button=True, show_copy_button=True, show_share_button=None, type='messages', max_height=500)
with gr.Accordion("Source paragraphs with the most relevant text will appear here", open = True):
sources = gr.HTML(value = "No relevant source paragraphs currently loaded", max_height=500) # , height=chat_height
gr.Markdown("Make sure that your questions are as specific as possible to allow the search engine to find the most relevant text to your query.")
with gr.Row():
message = gr.Textbox(
label="Enter your question here",
lines=1,
)
with gr.Row():
submit = gr.Button(value="Send message", variant="primary", scale = 4)
clear = gr.Button(value="Clear chat", variant="secondary", scale=1)
stop = gr.Button(value="Stop generating", variant="stop", scale=1)
examples_set = gr.Radio(label="Example questions", choices=default_examples_set)
current_topic = gr.Textbox(label="Feature currently disabled - Keywords related to current conversation topic.", placeholder="Keywords related to the conversation topic will appear here", visible=False)
with gr.Tab("Change data source"):
with gr.Accordion("PDF file", open = False):
in_pdf = gr.File(label="Upload pdf", file_count="multiple", file_types=['.pdf'])
load_pdf = gr.Button(value="Load in file", variant="secondary", scale=0)
with gr.Accordion("Web page", open = False):
with gr.Row():
in_web = gr.Textbox(label="Enter web page url")
in_div = gr.Textbox(label="(Advanced) Web page div for text extraction", value="p", placeholder="p")
load_web = gr.Button(value="Load in webpage", variant="secondary", scale=0)
with gr.Accordion("CSV/Excel file", open = False):
in_csv = gr.File(label="Upload CSV/Excel file", file_count="multiple", file_types=['.csv', '.xlsx'])
in_text_column = gr.Textbox(label="Enter column name where text is stored")
load_csv = gr.Button(value="Load in CSV/Excel file", variant="secondary", scale=0)
with gr.Row():
ingest_embed_out = gr.Textbox(label="File/web page preparation progress")
file_out_box = gr.File(file_count='single', file_types=['.zip'])
with gr.Tab("Advanced settings - change model/model options"):
out_passages = gr.Slider(minimum=1, value = 2, maximum=10, step=1, label="Choose number of passages to retrieve from the document. Numbers greater than 2 may lead to increased hallucinations or input text being truncated.")
temp_slide = gr.Slider(minimum=0.1, value = 0.5, maximum=1, step=0.1, label="Choose temperature setting for response generation.")
with gr.Row():
with gr.Column(scale=3):
model_choice = gr.Radio(label="Choose a chat model", value=SMALL_MODEL_NAME, choices = default_model_choices)
if RUN_GEMINI_MODELS == "1":
in_api_key = gr.Textbox(value = "", label="Enter Gemini API key (only if using Google API models)", lines=1, type="password",interactive=True, visible=True)
else:
in_api_key = gr.Textbox(value = "", label="Enter Gemini API key (only if using Google API models)", lines=1, type="password",interactive=True, visible=False)
with gr.Column(scale=1):
change_model_button = gr.Button(value="Load model")
if LOAD_LARGE_MODEL == "1":
with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False, visible=True):
gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=100, step = 1, visible=True)
else:
with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False, visible=False):
gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=100, step = 1, visible=False)
load_text = gr.Text(label="Load status")
gr.HTML(
"<center>This app is powered by Gradio, Transformers, and Llama.cpp.</center>"
)
examples_set.change(fn=chatf.update_message, inputs=[examples_set], outputs=[message])
###
# CHAT PAGE
###
# Click to send message
response_click = submit.click(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_model_object_state, model_type_state, out_passages, in_api_key], outputs=[chat_history_state, sources, instruction_prompt_out, relevant_query_state], queue=False, api_name="retrieval").\
success(chatf.turn_off_interactivity, inputs=None, outputs=[message, submit], queue=False).\
success(chatf.produce_streaming_answer_chatbot, inputs=[chatbot, instruction_prompt_out, model_type_state, temp_slide, relevant_query_state, chat_history_state, in_api_key], outputs=chatbot)
response_click.success(chatf.highlight_found_text, [chatbot, sources], [sources]).\
success(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\
success(lambda: chatf.restore_interactivity(), None, [message, submit], queue=False)
# Press enter to send message
response_enter = message.submit(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_model_object_state, model_type_state, out_passages, in_api_key], outputs=[chat_history_state, sources, instruction_prompt_out, relevant_query_state], queue=False).\
success(chatf.turn_off_interactivity, inputs=None, outputs=[message, submit], queue=False).\
success(chatf.produce_streaming_answer_chatbot, [chatbot, instruction_prompt_out, model_type_state, temp_slide, relevant_query_state, chat_history_state, in_api_key], chatbot)
response_enter.success(chatf.highlight_found_text, [chatbot, sources], [sources]).\
success(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\
success(lambda: chatf.restore_interactivity(), None, [message, submit], queue=False)
# Stop box
stop.click(fn=None, inputs=None, outputs=None, cancels=[response_click, response_enter])
# Clear box
clear.click(chatf.clear_chat, inputs=[chat_history_state, sources, message, current_topic], outputs=[chat_history_state, sources, message, current_topic])
clear.click(lambda: None, None, chatbot, queue=False)
# Thumbs up or thumbs down voting function
chatbot.like(chatf.vote, [chat_history_state, instruction_prompt_out, model_type_state], [latest_user_rating_data_path]).\
success(fn = upload_file_to_s3, inputs=[latest_user_rating_data_path, latest_user_rating_data_path], outputs=[s3_logs_output_textbox])
###
# LOAD NEW DATA PAGE
###
# Load in a pdf
load_pdf_click = load_pdf.click(ing.parse_file, inputs=[in_pdf], outputs=[ingest_text, current_source]).\
success(ing.text_to_docs, inputs=[ingest_text], outputs=[ingest_docs]).\
success(embed_faiss_save_to_zip, inputs=[ingest_docs, output_folder_textbox, embeddings_model_object_state], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\
success(chatf.hide_block, outputs = [examples_set])
# Load in a webpage
load_web_click = load_web.click(ing.parse_html, inputs=[in_web, in_div], outputs=[ingest_text, ingest_metadata, current_source]).\
success(ing.html_text_to_docs, inputs=[ingest_text, ingest_metadata], outputs=[ingest_docs]).\
success(embed_faiss_save_to_zip, inputs=[ingest_docs, output_folder_textbox, embeddings_model_object_state], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\
success(chatf.hide_block, outputs = [examples_set])
# Load in a csv/excel file
load_csv_click = load_csv.click(ing.parse_csv_or_excel, inputs=[in_csv, in_text_column], outputs=[ingest_text, current_source]).\
success(ing.csv_excel_text_to_docs, inputs=[ingest_text, in_text_column], outputs=[ingest_docs]).\
success(embed_faiss_save_to_zip, inputs=[ingest_docs, output_folder_textbox, embeddings_model_object_state], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\
success(chatf.hide_block, outputs = [examples_set])
###
# LOAD MODEL PAGE
###
change_model_button.click(fn=chatf.turn_off_interactivity, inputs=None, outputs=[message, submit], queue=False).\
success(fn=load_model, inputs=[model_choice, gpu_layer_choice], outputs = [model_type_state, load_text, current_model]).\
success(lambda: chatf.restore_interactivity(), None, [message, submit], queue=False).\
success(chatf.clear_chat, inputs=[chat_history_state, sources, message, current_topic], outputs=[chat_history_state, sources, message, current_topic]).\
success(lambda: None, None, chatbot, queue=False)
###
# LOGGING AND ON APP LOAD FUNCTIONS
###
# Load in default model and embeddings for each user
app.load(get_connection_params, inputs=None, outputs=[session_hash_state, output_folder_textbox, session_hash_textbox, input_folder_textbox]).\
success(load_model, inputs=[model_type_state, gpu_layer_choice, gpu_config_state, cpu_config_state, torch_device_state], outputs=[model_type_state, load_text, current_model]).\
success(get_faiss_store, inputs=[default_embeddings_store_text, embeddings_model_object_state], outputs=[vectorstore_state])
# Log usernames and times of access to file (to know who is using the app when running on AWS)
access_callback = gr.CSVLogger()
access_callback.setup([session_hash_textbox], access_logs_data_folder)
session_hash_textbox.change(lambda *args: access_callback.flag(list(args)), [session_hash_textbox], None, preprocess=False).\
success(fn = upload_file_to_s3, inputs=[access_logs_state, access_s3_logs_loc_state], outputs=[s3_logs_output_textbox])
if __name__ == "__main__":
if COGNITO_AUTH == "1":
app.queue(max_size=int(MAX_QUEUE_SIZE), default_concurrency_limit=int(DEFAULT_CONCURRENCY_LIMIT)).launch(show_error=True, inbrowser=True, auth=authenticate_user, max_file_size=MAX_FILE_SIZE, server_port=GRADIO_SERVER_PORT, root_path=ROOT_PATH)
else:
app.queue(max_size=int(MAX_QUEUE_SIZE), default_concurrency_limit=int(DEFAULT_CONCURRENCY_LIMIT)).launch(show_error=True, inbrowser=True, max_file_size=MAX_FILE_SIZE, server_port=GRADIO_SERVER_PORT, root_path=ROOT_PATH) |