Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -156,6 +156,30 @@ def conversation(qa_chain, message, history):
|
|
156 |
new_history = history + [(message, response_answer)]
|
157 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
def conversation_no_doc(llm, message, history):
|
160 |
formatted_chat_history = format_chat_history(message, history)
|
161 |
response = llm({"question": message, "chat_history": formatted_chat_history})
|
@@ -288,6 +312,27 @@ def demo():
|
|
288 |
outputs=[chatbot_no_doc, msg_no_doc],
|
289 |
queue=False)
|
290 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
demo.queue().launch(debug=True, share=True)
|
292 |
|
293 |
if __name__ == "__main__":
|
|
|
156 |
new_history = history + [(message, response_answer)]
|
157 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
158 |
|
159 |
+
def initialize_llm_no_doc(llm_model, temperature, max_tokens, top_k, initial_prompt, progress=gr.Progress()):
|
160 |
+
progress(0.1, desc="Initializing HF tokenizer...")
|
161 |
+
|
162 |
+
progress(0.5, desc="Initializing HF Hub...")
|
163 |
+
|
164 |
+
llm = HuggingFaceEndpoint(
|
165 |
+
repo_id=llm_model,
|
166 |
+
huggingfacehub_api_token=api_token,
|
167 |
+
temperature=temperature,
|
168 |
+
max_new_tokens=max_tokens,
|
169 |
+
top_k=top_k,
|
170 |
+
)
|
171 |
+
|
172 |
+
progress(0.75, desc="Defining buffer memory...")
|
173 |
+
memory = ConversationBufferMemory(
|
174 |
+
memory_key="chat_history",
|
175 |
+
output_key='answer',
|
176 |
+
return_messages=True
|
177 |
+
)
|
178 |
+
conversation_chain = ConversationChain(llm=llm, memory=memory, verbose=False)
|
179 |
+
conversation_chain({"question": initial_prompt})
|
180 |
+
progress(0.9, desc="Done!")
|
181 |
+
return conversation_chain
|
182 |
+
|
183 |
def conversation_no_doc(llm, message, history):
|
184 |
formatted_chat_history = format_chat_history(message, history)
|
185 |
response = llm({"question": message, "chat_history": formatted_chat_history})
|
|
|
312 |
outputs=[chatbot_no_doc, msg_no_doc],
|
313 |
queue=False)
|
314 |
|
315 |
+
# Initialize LLM without document for conversation
|
316 |
+
with gr.Tab("Initialize LLM for Chatbot without document"):
|
317 |
+
with gr.Row():
|
318 |
+
llm_no_doc_btn = gr.Radio(list_llm_simple,
|
319 |
+
label="LLM models", value=list_llm_simple[0], type="index", info="Choose your LLM model for chatbot without document")
|
320 |
+
with gr.Accordion("Advanced options - LLM model", open=False):
|
321 |
+
with gr.Row():
|
322 |
+
slider_temperature_no_doc = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
323 |
+
with gr.Row():
|
324 |
+
slider_maxtokens_no_doc = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
|
325 |
+
with gr.Row():
|
326 |
+
slider_topk_no_doc = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
|
327 |
+
with gr.Row():
|
328 |
+
llm_no_doc_progress = gr.Textbox(value="None", label="LLM initialization for chatbot without document")
|
329 |
+
with gr.Row():
|
330 |
+
llm_no_doc_init_btn = gr.Button("Initialize LLM for Chatbot without document")
|
331 |
+
|
332 |
+
llm_no_doc_init_btn.click(initialize_llm_no_doc,
|
333 |
+
inputs=[llm_no_doc_btn, slider_temperature_no_doc, slider_maxtokens_no_doc, slider_topk_no_doc, initial_prompt],
|
334 |
+
outputs=[llm_no_doc, llm_no_doc_progress])
|
335 |
+
|
336 |
demo.queue().launch(debug=True, share=True)
|
337 |
|
338 |
if __name__ == "__main__":
|