RAG_Demo / app.py
jchen8000's picture
Create app.py
aeb1340 verified
raw
history blame
2 kB
import gradio as gr
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
# Initialize the FAISS vector store
vector_store = None
# Function to handle PDF upload and indexing
def index_pdf(pdf):
global vector_store
# Load the PDF
loader = PyPDFLoader(pdf.name)
documents = loader.load()
# Split the documents into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
texts = text_splitter.split_documents(documents)
# Embed the chunks and store them in the vector store
embeddings = OpenAIEmbeddings()
vector_store = FAISS.from_documents(texts, embeddings)
return "PDF indexed successfully!"
# Function to handle chatbot queries
def chatbot_query(query):
if vector_store is None:
return "Please upload and index a PDF first."
# Create a retrieval-based QA chain
retriever = vector_store.as_retriever()
qa_chain = RetrievalQA(llm=OpenAI(), retriever=retriever)
# Get the response from the QA chain
response = qa_chain.run(query)
return response
# Create the Gradio interface
with gr.Blocks() as demo:
with gr.Tab("Indexing"):
pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
index_button = gr.Button("Index PDF")
index_output = gr.Textbox(label="Indexing Status")
index_button.click(index_pdf, inputs=pdf_input, outputs=index_output)
with gr.Tab("Chatbot"):
query_input = gr.Textbox(label="Enter your question")
query_button = gr.Button("Submit")
query_output = gr.Textbox(label="Response")
query_button.click(chatbot_query, inputs=query_input, outputs=query_output)
# Launch the Gradio app
demo.launch()