File size: 5,872 Bytes
255d5ac 10c9c26 255d5ac 54460d6 255d5ac 54460d6 255d5ac 6333666 255d5ac 601e853 10c9c26 601e853 54460d6 10c9c26 255d5ac 54460d6 10c9c26 255d5ac c967848 255d5ac d85e8ac 255d5ac 10c9c26 255d5ac 10c9c26 255d5ac 8001400 10c9c26 255d5ac 10c9c26 255d5ac 601e853 10c9c26 255d5ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import os
import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain_community.llms import llamacpp
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain_community.chat_message_histories.streamlit import StreamlitChatMessageHistory
from langchain.prompts import PromptTemplate,SystemMessagePromptTemplate,ChatPromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_history_aware_retriever, create_retrieval_chain, ConversationalRetrievalChain
from langchain.text_splitter import TokenTextSplitter,RecursiveCharacterTextSplitter
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_community.document_loaders.directory import DirectoryLoader
from langchain.document_loaders import PyPDFLoader
from htmlTemplates import css, bot_template, user_template
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain import hub
from state_manager import StateManager
lang_api_key = os.getenv("lang_api_key")
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.langchain.plus"
os.environ["LANGCHAIN_API_KEY"] = lang_api_key
os.environ["LANGCHAIN_PROJECT"] = "Chat with multiple PDFs"
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=250, chunk_overlap=50,
separators=["\n \n \n", "\n \n", "\n1", "(?<=\. )", " ", ""],
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
model_name = "Alibaba-NLP/gte-base-en-v1.5"
model_kwargs = {'device': 'cpu',
"trust_remote_code" : 'True'}
encode_kwargs = {'normalize_embeddings': True}
embeddings = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
vectorstore = Chroma.from_texts(
texts=text_chunks, embedding=embeddings, persist_directory="docs/chroma/")
return vectorstore
def get_conversation_chain(vectorstore):
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = llamacpp.LlamaCpp(
model_path="qwen2-0_5b-instruct-q8_0.gguf",
n_gpu_layers=0,
temperature=0.1,
top_p = 0.9,
n_ctx=20000,
n_batch=2000,
max_tokens = 300,
repeat_penalty=1.9,
last_n_tokens_size = 300,
#callback_manager=callback_manager,
verbose=False,
)
retriever = vectorstore.as_retriever(search_type='mmr', k=7)
prompt = hub.pull("rlm/rag-prompt")
rag_chain = ({"context": retriever} | prompt | llm | StrOutputParser())
return rag_chain
def main():
st.set_page_config(page_title="Chat with multiple PDFs", page_icon=":books:")
st.write(css, unsafe_allow_html=True)
st.header("Chat with multiple PDFs :books:")
state_manager = StateManager()
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
state_manager.create_vectorstore(text_chunks)
# create conversation chain
state_manager.create_conversation_chain()
if user_question := st.text_input("Ask a question about your documents:"):
handle_userinput(user_question, state_manager)
def handle_userinput(user_question, state_manager):
if "chat_history" not in st.session_state:
st.session_state["chat_history"] = [
{"role": "assistant", "content": "Hi, I'm a Q&A chatbot who is based on your imported pdf documents. How can I help you?"}
]
st.session_state.chat_history.append({"role": "user", "content": user_question})
if state_manager.vectorstore is not None and state_manager.conversation_chain is not None:
# Invoke conversation chain
response = state_manager.conversation_chain({"question": user_question})
st.session_state.chat_history.append({"role": "assistant", "content": response})
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message['content']), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message['content']), unsafe_allow_html=True)
st.subheader("Your documents")
if state_manager.vectorstore is not None:
docs = state_manager.vectorstore.as_retriever().get_relevant_documents(user_question)
for doc in docs:
st.write(f"Document: {doc}")
if __name__ == '__main__':
main()
|