chatbot / app.py
aidevhund's picture
Update app.py
c4c3f4e verified
import gradio as gr
import httpx
import time
from datetime import datetime
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_parse import LlamaParse
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
# LLM and Parser Initialization
llm = HuggingFaceInferenceAPI(model_name="tiiuae/falcon-7b-instruct")
parser = LlamaParse(api_key='llx-zKtsC5UBLs8DOApOsLluXMBdQhC75ea0Vs80SmPSjsmDzuhh', result_type='markdown')
# PDF document extraction and indexing
file_extractor = {'.pdf': parser}
documents = SimpleDirectoryReader('data/', file_extractor=file_extractor).load_data()
# Embedding Model and Query Engine Initialization
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
vector_index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
query_engine = vector_index.as_query_engine(llm=llm)
# System Prompt for LLM
system_prompt = """
You are an AI assistant designed to answer questions about the Hund Ecosystem based on the uploaded PDF document.
Your primary responsibility is to provide detailed, accurate, and clear answers to user queries related to the content of the document.
For any question that is not related to the content of the document, kindly ask the user to refer to the Hund Ecosystem.
Please ensure to be polite and professional in your responses. If the question cannot be answered based on the document, kindly guide the user accordingly.
"""
# Query Retry Logic
def query_with_retry(query, max_retries=3, wait_time=5):
for attempt in range(max_retries):
try:
start_time = datetime.now()
response = query_engine.query(query) # System prompt is added here
end_time = datetime.now()
duration = (end_time - start_time).total_seconds()
print(f"Query completed in {duration:.2f} seconds.\n {response}")
return response
except httpx.ReadTimeout:
if attempt < max_retries - 1:
print(f"Timeout occurred. Retrying in {wait_time} seconds...")
time.sleep(wait_time)
else:
raise
except Exception as e:
print(f"An error occurred: {e}")
break
# Manage user messages and bot responses
def respond(message, history):
try:
# Run the query engine with the user message
bot_message = query_engine.query(message)
print(f"\n{datetime.now()}:{llm.model_name}:: {message} --> {str(bot_message)}\n")
# Add user's message and bot's response to history
history.append((message, str(bot_message)))
# Clear the input field after sending the message
return history, "" # Clear the input field for the next question
except Exception as e:
if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
return "Please upload a file.", history
return f"An error occurred: {e}", history
# UI Setup
with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")])) as demo:
gr.Markdown("# HundAI Chatbot🤖")
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(height=500) # Display chat history here
user_message = gr.Textbox(placeholder="Ask me questions about the Hund Ecosystem!", container=False)
submit_btn = gr.Button("Send")
clear_btn = gr.Button("Clear Chat")
# When submit is clicked, process the message and show response
submit_btn.click(fn=respond, inputs=[user_message, chatbot], outputs=[chatbot, user_message])
# Clear the chat history when the clear button is clicked
clear_btn.click(lambda: [None, []], outputs=[chatbot, chatbot])
# Launch the demo
if __name__ == "__main__":
demo.launch()