llm-rag / app.py
decodingdatascience's picture
Create app.py
35a4aa1 verified
raw
history blame
1.34 kB
import gradio as gr
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
# Load documents from "data" directory and build the index
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# Function to handle user queries
def query_document(query):
response = query_engine.query(query)
return str(response)
# Build Gradio app using Blocks for better layout and UX
with gr.Blocks(css=".gradio-container {font-family: 'Arial'; background-color: #fafafa;}") as demo:
gr.Markdown("<h1 style='text-align: center;'>πŸ“„ RAG Application with LlamaIndex</h1>")
gr.Markdown(
"Ask questions about the documents stored in the local directory. "
"This app uses Retrieval-Augmented Generation (RAG) powered by LlamaIndex."
)
with gr.Box():
query_input = gr.Textbox(
label="Enter your query",
placeholder="e.g., What is the refund policy mentioned in the document?",
lines=3
)
submit_btn = gr.Button("Submit", variant="primary")
response_output = gr.Textbox(label="Response", lines=8)
submit_btn.click(fn=query_document, inputs=query_input, outputs=response_output)
# Run the app
if __name__ == "__main__":
demo.launch()