Update app.py
Browse files
app.py
CHANGED
@@ -292,7 +292,7 @@ def chatbot_interface(message, history, use_web_search, model, temperature, num_
|
|
292 |
history[-1] = (message, "Generating response... (This may take a moment)")
|
293 |
yield history
|
294 |
|
295 |
-
for response in respond(message, history, model, temperature, num_calls, use_web_search):
|
296 |
history[-1] = (message, response)
|
297 |
yield history
|
298 |
except gr.CancelledError:
|
@@ -327,8 +327,7 @@ def respond(message, history, model, temperature, num_calls, use_web_search, sel
|
|
327 |
use_web_search = False # Ensure we use PDF search for summaries
|
328 |
|
329 |
if use_web_search:
|
330 |
-
for
|
331 |
-
response = f"{main_content}\n\n{sources}"
|
332 |
yield response
|
333 |
else:
|
334 |
embed = get_embeddings()
|
@@ -520,13 +519,16 @@ Critique:
|
|
520 |
Revised Response:"""
|
521 |
|
522 |
# Generate final response
|
|
|
523 |
for chunk in client.text_generation(final_prompt, max_new_tokens=1500, temperature=temperature, stream=True):
|
524 |
-
|
|
|
525 |
|
526 |
# Add a disclaimer
|
527 |
disclaimer = ("\nNote: This response was generated by an AI model based on web search results. "
|
528 |
"While efforts have been made to ensure accuracy, please verify important information from authoritative sources.")
|
529 |
-
|
|
|
530 |
|
531 |
except Exception as e:
|
532 |
logging.error(f"Error in multi-step generation process: {str(e)}")
|
|
|
292 |
history[-1] = (message, "Generating response... (This may take a moment)")
|
293 |
yield history
|
294 |
|
295 |
+
for response in respond(message, history, model, temperature, num_calls, use_web_search, selected_docs, instruction_key):
|
296 |
history[-1] = (message, response)
|
297 |
yield history
|
298 |
except gr.CancelledError:
|
|
|
327 |
use_web_search = False # Ensure we use PDF search for summaries
|
328 |
|
329 |
if use_web_search:
|
330 |
+
for response, _ in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature):
|
|
|
331 |
yield response
|
332 |
else:
|
333 |
embed = get_embeddings()
|
|
|
519 |
Revised Response:"""
|
520 |
|
521 |
# Generate final response
|
522 |
+
full_response = ""
|
523 |
for chunk in client.text_generation(final_prompt, max_new_tokens=1500, temperature=temperature, stream=True):
|
524 |
+
full_response += chunk
|
525 |
+
yield full_response, ""
|
526 |
|
527 |
# Add a disclaimer
|
528 |
disclaimer = ("\nNote: This response was generated by an AI model based on web search results. "
|
529 |
"While efforts have been made to ensure accuracy, please verify important information from authoritative sources.")
|
530 |
+
full_response += disclaimer
|
531 |
+
yield full_response, ""
|
532 |
|
533 |
except Exception as e:
|
534 |
logging.error(f"Error in multi-step generation process: {str(e)}")
|