Spaces:
Runtime error
Runtime error
import gradio as gr | |
import time | |
import random | |
import os | |
import shutil | |
# How to RUN code ==> gradio gradio_llm_example.py | |
# Define text and title information | |
title1 = "## </br> </br> </br> 🤗💬 QA App" | |
title2 = " ## </br> </br> </br> Gradio QA Bot" | |
intro = """ Welcome! This is not just any bot, it's a special one equipped with state-of-the-art natural language processing capabilities, and ready to answer your queries. | |
Ready to explore? Let's get started! | |
* Step 1: Upload a PDF document. | |
* Step 2: Type in a question related to your document's content. | |
* Step 3: Get your answer! | |
Push clear cache before uploading a new doc! | |
""" | |
about = """ | |
## </br> About | |
This app is an LLM-powered chatbot built using: | |
- [Streamlit](<https://streamlit.io/>) | |
- [HugChat](<https://github.com/Soulter/hugging-chat-api>) | |
- Chat Model = llama2-chat-hf 7B | |
- Retreiver model = all-MiniLM-L6-v2 | |
</br> | |
💡 Note: No API key required! | |
</br> | |
Made with ❤️ by us | |
""" | |
# Define theme ==> see gr.themes.builder() | |
theme = gr.themes.Soft( | |
primary_hue="emerald", | |
secondary_hue="emerald", | |
neutral_hue="slate", | |
).set( | |
body_background_fill_dark='*primary_50', | |
shadow_drop='*shadow_spread', | |
button_border_width='*block_border_width', | |
button_border_width_dark='*block_label_border_width' | |
) | |
def upload_file(files_obj): | |
""" Upload several files from drag and drop, and save them in local temp folder | |
files_obj (type:list) : list of tempfile._TemporaryFileWrapper | |
return checkbox to display uploaded documents """ | |
# Create local copy | |
temp_file_path = "./temp" | |
if not os.path.exists(temp_file_path): | |
os.makedirs(temp_file_path) | |
# Save each file among list of given files | |
file_name_list = list() | |
for file_obj in files_obj : | |
file_name = os.path.basename(file_obj.name) | |
file_name_list.append(file_name) | |
shutil.copyfile(file_obj.name, os.path.join(temp_file_path, file_name)) | |
# return visible button for next selection | |
return {uploaded_check : gr.CheckboxGroup(choices=file_name_list, visible=True), | |
choose_btn : gr.Button(value="Choose", visible=True)} | |
def read_content(content, files_name): | |
""" Read and update the content variable (state) according to the several files_names to read from temp folder | |
return updated content_var (type : list of str) | |
return visible error_box to display logs error """ | |
content_list = list() | |
text_list = list() | |
# Parse one or several docs among the selected ones | |
for file_name in files_name : | |
print(file_name, type(file_name)) | |
temp_file_path = "./temp" | |
file_path = os.path.join(temp_file_path, file_name) | |
# Read doc | |
with open(file_path, "rb") as file: | |
try: | |
content = file.read() | |
#### YOUR FONCTION FOR CONTENT ==> must be str | |
my_content = str(content[:10]) | |
content_list.append(my_content) | |
text_list.append(f"File {file_name} ready to be used. \n") | |
print(content) | |
except Exception as e: | |
print(f"Error occurred while writing the file: {e}") | |
text_list.append(f"Error occurred while writing the file {file_name}: {e}") | |
return {content_var : content_list, | |
error_box : gr.Textbox(value=f"""{" and ".join(text_list)} \n You can ask a question about the uploaded PDF document.""", visible=True)} | |
### YOUR model using the same inputand returning output | |
def my_model(message, chat_history, content_var, | |
language_choice, model_choice, max_length, temperature, | |
num_return_sequences, top_p, no_repeat_ngram_size): | |
#No LLM here, just respond with a random pre-made message | |
if content_var == []: | |
bot_message = f"Pas de context : {content_var}" + random.choice(["Tell me more about it", | |
"Cool, but I'm not interested", | |
"Hmmmm, ok then"]) | |
else: | |
bot_message = f" Voici le context {content_var}" | |
chat_history.append((message, bot_message)) | |
return "", chat_history | |
def queue_bot(history): | |
""" For in progress display during chat """ | |
bot_message = history[-1][1] | |
history[-1][1] = "" | |
for character in bot_message: | |
history[-1][1] += character | |
time.sleep(0.05) | |
yield history | |
# App | |
with gr.Blocks(theme=gr.themes.Soft()) as gradioApp: | |
# Initialize the document context variable as empty without any drag and drop | |
content_var = gr.State([]) | |
# Layout | |
with gr.Row(): | |
# Row 1 : About | |
with gr.Column(scale=1, min_width=100): | |
# gr.Image("./logo_neovision.png") | |
logo_gr = gr.Markdown(""" <img src="file/logo_neovision.png" alt="logo" style="width:400px;"/>""") | |
about_gr = gr.Markdown(about) | |
# Row 2 : Param | |
with gr.Column(scale=2, min_width=500): | |
title1_gr= gr.Markdown(title1) | |
intro_gr = gr.Markdown(intro) | |
# Upload several documents | |
upload_button = gr.UploadButton("Browse files", label="Drag and drop your documents here", | |
size="lg", scale=0, min_width=100, | |
file_types=["pdf"], file_count="multiple") | |
# invisible button while no documents uploaded | |
uploaded_check = gr.CheckboxGroup(label="Uploaded documents", visible=False, | |
info="Do you want to use a supporting document?") | |
choose_btn = gr.Button(value="Choose", visible=False) | |
# uploading one or several docs and display other buttons | |
upload_button.upload(upload_file, upload_button, [uploaded_check, choose_btn]) | |
# Read only one document | |
error_box = gr.Textbox(label="Reading files... ", visible=False) # display only when ready or error | |
choose_btn.click(read_content, inputs=[content_var, uploaded_check], outputs=[content_var, error_box]) | |
# Select advanced options, to be given as input for your model | |
gr.Markdown(""" ## Toolbox """) | |
with gr.Accordion(label="Select advanced options",open=False): | |
model_choice = gr.Dropdown(["LLM", "Other"], label="Model", info="Choose your AI model") | |
language_choice = gr.Dropdown(["English", "French"], label="Language", info="Choose your language") | |
max_length = gr.Slider(label="Token length", minimum=1, maximum=100, value=50, step=1) | |
temperature= gr.Slider(label="Temperature", minimum=0.1, maximum=1, value=0.8, step=0.1) | |
num_return_sequences= gr.Slider(label="Return Sequence", minimum=0.1, maximum=50, value=1, step=0.1) | |
top_p= gr.Slider(label="top p", minimum=0.1, maximum=1, value=0.8, step=0.1) | |
no_repeat_ngram_size= gr.Slider(label="repeat", minimum=0.1, maximum=1, value=3, step=0.1) | |
# Row 3 : Chat | |
with gr.Column(scale=2, min_width=600): | |
title2_gr = gr.Markdown(title2) | |
chatbot = gr.Chatbot(label="Bot", height=500) | |
msg = gr.Textbox(label="User", placeholder="Ask any question.") | |
### YOUR MODEL TO ADAPT | |
msg.submit(my_model, | |
inputs=[msg, chatbot, content_var, | |
language_choice, model_choice, max_length, temperature, | |
num_return_sequences, top_p, no_repeat_ngram_size], | |
outputs=[msg, chatbot]).then(queue_bot, chatbot, chatbot) | |
clear = gr.ClearButton(components=[msg, chatbot], value="Clear console") | |
gr.close_all() | |
gradioApp.queue() | |
gradioApp.launch(share=True, auth=("neovision", "gradio2023")) | |
#auth=("neovision", "gradio2023") to be placed inside the launch parameters |