|
import streamlit as st |
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains.question_answering import load_qa_chain |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import Chroma |
|
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI |
|
from dotenv import load_dotenv |
|
import PyPDF2 |
|
import os |
|
import io |
|
|
|
st.title("Chat Your PDFs") |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
google_api_key = os.getenv("GOOGLE_API_KEY") |
|
|
|
|
|
if google_api_key is None: |
|
st.warning("API key not found. Please set the google_api_key environment variable.") |
|
st.stop() |
|
|
|
|
|
uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"]) |
|
|
|
if uploaded_file is not None: |
|
st.text("PDF File Uploaded Successfully!") |
|
|
|
|
|
pdf_data = uploaded_file.read() |
|
pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_data)) |
|
pdf_pages = pdf_reader.pages |
|
|
|
|
|
context = "\n\n".join(page.extract_text() for page in pdf_pages) |
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=200) |
|
texts = text_splitter.split_text(context) |
|
|
|
persist_directory="data" |
|
|
|
|
|
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") |
|
vector_index = Chroma.from_texts( texts=texts,embedding=embeddings,persist_directory=persist_directory).as_retriever() |
|
|
|
|
|
user_question = st.text_input("Ask a Question:") |
|
|
|
if st.button("Get Answer"): |
|
if user_question: |
|
|
|
docs = vector_index.get_relevant_documents(user_question) |
|
|
|
|
|
prompt_template = """ |
|
Answer the question as detailed as possible from the provided context, |
|
make sure to provide all the details, if the answer is not in |
|
provided context just say, "answer is not available in the context", |
|
don't provide the wrong answer\n\n |
|
Context:\n {context}?\n |
|
Question: \n{question}\n |
|
Answer: |
|
""" |
|
|
|
|
|
prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question']) |
|
|
|
|
|
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, api_key=google_api_key) |
|
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt) |
|
|
|
|
|
response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True) |
|
|
|
|
|
st.subheader("Answer:") |
|
st.write(response['output_text']) |
|
|
|
else: |
|
st.warning("Please enter a question.") |