|
import os |
|
import streamlit as st |
|
from langchain.embeddings.openai import OpenAIEmbeddings |
|
from langchain.vectorstores import Chroma |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.chains import ConversationalRetrievalChain |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.document_loaders import PyPDFLoader |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
if "chain" not in st.session_state: |
|
st.session_state.chain = None |
|
if "processed_pdfs" not in st.session_state: |
|
st.session_state.processed_pdfs = False |
|
|
|
def create_sidebar(): |
|
with st.sidebar: |
|
st.title("PDF Chat") |
|
st.markdown("### Quick Demo of RAG") |
|
api_key = st.text_input("OpenAI API Key:", type="password") |
|
st.markdown(""" |
|
### Tools Used |
|
- OpenAI |
|
- LangChain |
|
- ChromaDB |
|
|
|
### Steps |
|
1. Add API key |
|
2. Upload PDF |
|
3. Chat! |
|
""") |
|
return api_key |
|
|
|
def process_pdfs(papers, api_key): |
|
if papers and not st.session_state.processed_pdfs: |
|
with st.spinner("Processing PDFs..."): |
|
texts = [] |
|
for paper in papers: |
|
try: |
|
file_path = os.path.join('./uploads', paper.name) |
|
os.makedirs('./uploads', exist_ok=True) |
|
with open(file_path, "wb") as f: |
|
f.write(paper.getbuffer()) |
|
|
|
loader = PyPDFLoader(file_path) |
|
documents = loader.load() |
|
text_splitter = RecursiveCharacterTextSplitter( |
|
chunk_size=1000, |
|
chunk_overlap=200, |
|
length_function=len, |
|
is_separator_regex=False, |
|
) |
|
texts.extend(text_splitter.split_documents(documents)) |
|
os.remove(file_path) |
|
except Exception as e: |
|
st.error(f"Error processing {paper.name}: {str(e)}") |
|
|
|
if texts: |
|
embedding = OpenAIEmbeddings(openai_api_key=api_key) |
|
vectorstore = Chroma(embedding_function=embedding, persist_directory="db") |
|
vectorstore.add_documents(texts) |
|
|
|
st.session_state.chain = ConversationalRetrievalChain.from_llm( |
|
ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=api_key), |
|
vectorstore.as_retriever(), |
|
memory=ConversationBufferMemory( |
|
memory_key="chat_history", |
|
return_messages=True |
|
) |
|
) |
|
st.session_state.processed_pdfs = True |
|
st.success("PDFs processed successfully!") |
|
return texts |
|
return [] |
|
|
|
def main(): |
|
st.set_page_config(page_title="PDF Chat") |
|
|
|
|
|
api_key = create_sidebar() |
|
|
|
if not api_key: |
|
st.warning("Please enter your OpenAI API key") |
|
return |
|
|
|
st.title("Chat with PDF") |
|
|
|
|
|
papers = st.file_uploader("Upload PDFs", type=["pdf"], accept_multiple_files=True) |
|
|
|
|
|
texts = process_pdfs(papers, api_key) |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("Ask about your PDFs"): |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
if not st.session_state.processed_pdfs: |
|
response = "Please upload a PDF first." |
|
else: |
|
with st.spinner("Thinking..."): |
|
result = st.session_state.chain({"question": prompt}) |
|
response = result["answer"] |
|
|
|
st.markdown(response) |
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|
|
if __name__ == "__main__": |
|
main() |