dupuyta commited on
Commit
edfbd07
·
1 Parent(s): 254c56e
Files changed (1) hide show
  1. gradio_llm_example.py +74 -34
gradio_llm_example.py CHANGED
@@ -1,13 +1,22 @@
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
  import time
 
5
  import os
6
  import shutil
7
- import codecs
8
  # How to RUN code ==> gradio gradio_llm_example.py
9
 
10
 
 
 
 
 
 
 
 
 
 
 
 
11
  # Define text and title information
12
  title1 = "## </br> </br> </br> 🤗💬 QA App"
13
 
@@ -72,68 +81,99 @@ def upload_file(files_obj):
72
  file_name = os.path.basename(file_obj.name)
73
  file_name_list.append(file_name)
74
  shutil.copyfile(file_obj.name, os.path.join(temp_file_path, file_name))
75
- return {uploaded_check : gr.Radio(choices=file_name_list, visible=True),
 
76
  choose_btn : gr.Button(value="Choose", visible=True)}
77
 
78
 
79
- def read_content(content, file_name):
80
- print(file_name, type(file_name))
81
- temp_file_path = "./temp"
82
- file_path = os.path.join(temp_file_path, file_name)
83
- with open(file_path, "rb") as file:
84
- try:
85
- content = file.read()
86
- print(content)
87
- return { content_var : str(content[:10]),
88
- error_box : gr.Textbox(value=f"File ready to be used. \n You can ask a question about the uploaded PDF document.", visible=True)}
89
- except Exception as e:
90
- print(f"Error occurred while writing the file: {e}")
91
- return { content_var : str(content[:10]),
92
- error_box : gr.Textbox(value=f"Error occurred while writing the file: {e}", visible=True)}
93
-
94
-
 
 
 
 
 
 
 
 
 
 
 
 
95
  def my_model(message, chat_history, content_var,
96
  language_choice, model_choice, max_length, temperature,
97
  num_return_sequences, top_p, no_repeat_ngram_size):
98
  #No LLM here, just respond with a random pre-made message
99
- if content_var == "":
100
  bot_message = f"Pas de context : {content_var}" + random.choice(["Tell me more about it",
101
  "Cool, but I'm not interested",
102
  "Hmmmm, ok then"])
103
  else:
104
  bot_message = f" Voici le context {content_var}"
105
  chat_history.append((message, bot_message))
 
106
  return "", chat_history
107
 
108
 
 
 
 
 
 
 
 
 
109
 
110
- # Layout
 
111
  with gr.Blocks(theme=gr.themes.Soft()) as gradioApp:
 
 
 
 
 
112
  with gr.Row():
 
113
  with gr.Column(scale=1, min_width=100):
114
- logo_gr = gr.Markdown(""" <img src="file/logo_neovision.png" alt="logo" style="width:400px;"/>""")
115
  # gr.Image("./logo_neovision.png")
 
116
  about_gr = gr.Markdown(about)
117
-
 
118
  with gr.Column(scale=2, min_width=500):
119
  title1_gr= gr.Markdown(title1)
120
  intro_gr = gr.Markdown(intro)
121
 
122
  # Upload several documents
123
- content_var = gr.State("")
124
  upload_button = gr.UploadButton("Browse files", label="Drag and drop your documents here",
125
  size="lg", scale=0, min_width=100,
126
  file_types=["pdf"], file_count="multiple")
127
- uploaded_check = gr.Radio(label="Uploaded documents", visible=False,
 
128
  info="Do you want to use a supporting document?")
129
  choose_btn = gr.Button(value="Choose", visible=False)
 
130
  upload_button.upload(upload_file, upload_button, [uploaded_check, choose_btn])
131
 
132
  # Read only one document
133
- error_box = gr.Textbox(label="Reading files... ", visible=False)
134
  choose_btn.click(read_content, inputs=[content_var, uploaded_check], outputs=[content_var, error_box])
135
 
136
- # Select advanced options
137
  gr.Markdown(""" ## Toolbox """)
138
  with gr.Accordion(label="Select advanced options",open=False):
139
  model_choice = gr.Dropdown(["LLM", "Other"], label="Model", info="Choose your AI model")
@@ -145,21 +185,21 @@ with gr.Blocks(theme=gr.themes.Soft()) as gradioApp:
145
  no_repeat_ngram_size= gr.Slider(label="repeat", minimum=0.1, maximum=1, value=3, step=0.1)
146
 
147
 
148
- # Chat
149
  with gr.Column(scale=2, min_width=600):
150
  title2_gr = gr.Markdown(title2)
151
- chatbot = gr.Chatbot(label="Bot", height=500, queue = True)
152
  msg = gr.Textbox(label="User", placeholder="Ask any question.")
153
- # chatbot_btn = gr.Button("Submit")
154
- msg.submit(my_model, queue = True,
155
  inputs=[msg, chatbot, content_var,
156
  language_choice, model_choice, max_length, temperature,
157
  num_return_sequences, top_p, no_repeat_ngram_size],
158
- outputs=[msg, chatbot])
159
  clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
160
 
161
 
162
  gr.close_all()
163
  gradioApp.queue()
164
  gradioApp.launch(share=True, auth=("neovision", "gradio2023"))
165
- #auth=("neovision", "gradio2023")
 
1
  import gradio as gr
 
 
2
  import time
3
+ import random
4
  import os
5
  import shutil
 
6
  # How to RUN code ==> gradio gradio_llm_example.py
7
 
8
 
9
+ def load_llm_model(model: str = "google/flan-t5-large") -> HuggingFacePipeline:
10
+ llm = HuggingFacePipeline.from_model_id(
11
+ model_id=model,
12
+ task="text2text-generation",
13
+ model_kwargs={"max_length": 1500, "load_in_8bit": True},
14
+ )
15
+ return llm
16
+
17
+ from langchain import HuggingFacePipeline
18
+
19
+
20
  # Define text and title information
21
  title1 = "## </br> </br> </br> 🤗💬 QA App"
22
 
 
81
  file_name = os.path.basename(file_obj.name)
82
  file_name_list.append(file_name)
83
  shutil.copyfile(file_obj.name, os.path.join(temp_file_path, file_name))
84
+ # return visible button for next selection
85
+ return {uploaded_check : gr.CheckboxGroup(choices=file_name_list, visible=True),
86
  choose_btn : gr.Button(value="Choose", visible=True)}
87
 
88
 
89
+ def read_content(content, files_name):
90
+ """ Read and update the content variable (state) according to the several files_names to read from temp folder
91
+ return updated content_var (type : list of str)
92
+ return visible error_box to display logs error """
93
+ content_list = list()
94
+ text_list = list()
95
+ # Parse one or several docs among the selected ones
96
+ for file_name in files_name :
97
+ print(file_name, type(file_name))
98
+ temp_file_path = "./temp"
99
+ file_path = os.path.join(temp_file_path, file_name)
100
+ # Read doc
101
+ with open(file_path, "rb") as file:
102
+ try:
103
+ content = file.read()
104
+ #### YOUR FONCTION FOR CONTENT ==> must be str
105
+ my_content = str(content[:10])
106
+ content_list.append(my_content)
107
+ text_list.append(f"File {file_name} ready to be used. \n")
108
+ print(content)
109
+ except Exception as e:
110
+ print(f"Error occurred while writing the file: {e}")
111
+ text_list.append(f"Error occurred while writing the file {file_name}: {e}")
112
+ return {content_var : content_list,
113
+ error_box : gr.Textbox(value=f"""{" and ".join(text_list)} \n You can ask a question about the uploaded PDF document.""", visible=True)}
114
+
115
+
116
+ ### YOUR model using the same inputand returning output
117
  def my_model(message, chat_history, content_var,
118
  language_choice, model_choice, max_length, temperature,
119
  num_return_sequences, top_p, no_repeat_ngram_size):
120
  #No LLM here, just respond with a random pre-made message
121
+ if content_var == []:
122
  bot_message = f"Pas de context : {content_var}" + random.choice(["Tell me more about it",
123
  "Cool, but I'm not interested",
124
  "Hmmmm, ok then"])
125
  else:
126
  bot_message = f" Voici le context {content_var}"
127
  chat_history.append((message, bot_message))
128
+
129
  return "", chat_history
130
 
131
 
132
+ def queue_bot(history):
133
+ """ For in progress display during chat """
134
+ bot_message = history[-1][1]
135
+ history[-1][1] = ""
136
+ for character in bot_message:
137
+ history[-1][1] += character
138
+ time.sleep(0.05)
139
+ yield history
140
 
141
+
142
+ # App
143
  with gr.Blocks(theme=gr.themes.Soft()) as gradioApp:
144
+
145
+ # Initialize the document context variable as empty without any drag and drop
146
+ content_var = gr.State([])
147
+
148
+ # Layout
149
  with gr.Row():
150
+ # Row 1 : About
151
  with gr.Column(scale=1, min_width=100):
 
152
  # gr.Image("./logo_neovision.png")
153
+ logo_gr = gr.Markdown(""" <img src="file/logo_neovision.png" alt="logo" style="width:400px;"/>""")
154
  about_gr = gr.Markdown(about)
155
+
156
+ # Row 2 : Param
157
  with gr.Column(scale=2, min_width=500):
158
  title1_gr= gr.Markdown(title1)
159
  intro_gr = gr.Markdown(intro)
160
 
161
  # Upload several documents
 
162
  upload_button = gr.UploadButton("Browse files", label="Drag and drop your documents here",
163
  size="lg", scale=0, min_width=100,
164
  file_types=["pdf"], file_count="multiple")
165
+ # invisible button while no documents uploaded
166
+ uploaded_check = gr.CheckboxGroup(label="Uploaded documents", visible=False,
167
  info="Do you want to use a supporting document?")
168
  choose_btn = gr.Button(value="Choose", visible=False)
169
+ # uploading one or several docs and display other buttons
170
  upload_button.upload(upload_file, upload_button, [uploaded_check, choose_btn])
171
 
172
  # Read only one document
173
+ error_box = gr.Textbox(label="Reading files... ", visible=False) # display only when ready or error
174
  choose_btn.click(read_content, inputs=[content_var, uploaded_check], outputs=[content_var, error_box])
175
 
176
+ # Select advanced options, to be given as input for your model
177
  gr.Markdown(""" ## Toolbox """)
178
  with gr.Accordion(label="Select advanced options",open=False):
179
  model_choice = gr.Dropdown(["LLM", "Other"], label="Model", info="Choose your AI model")
 
185
  no_repeat_ngram_size= gr.Slider(label="repeat", minimum=0.1, maximum=1, value=3, step=0.1)
186
 
187
 
188
+ # Row 3 : Chat
189
  with gr.Column(scale=2, min_width=600):
190
  title2_gr = gr.Markdown(title2)
191
+ chatbot = gr.Chatbot(label="Bot", height=500)
192
  msg = gr.Textbox(label="User", placeholder="Ask any question.")
193
+ ### YOUR MODEL TO ADAPT
194
+ msg.submit(my_model,
195
  inputs=[msg, chatbot, content_var,
196
  language_choice, model_choice, max_length, temperature,
197
  num_return_sequences, top_p, no_repeat_ngram_size],
198
+ outputs=[msg, chatbot]).then(queue_bot, chatbot, chatbot)
199
  clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
200
 
201
 
202
  gr.close_all()
203
  gradioApp.queue()
204
  gradioApp.launch(share=True, auth=("neovision", "gradio2023"))
205
+ #auth=("neovision", "gradio2023") to be placed inside the launch parameters