arnold-anand commited on
Commit
9a521a4
·
1 Parent(s): 8cb6413

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -0
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import streamlit as st
3
+ from dotenv import load_dotenv
4
+ from PyPDF2 import PdfReader
5
+ from langchain.text_splitter import CharacterTextSplitter
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.chat_models import ChatOpenAI
8
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain.chains import ConversationalRetrievalChain
11
+ # from langchain.llms import HuggingFaceHub
12
+ from streamlit_chat import message
13
+ def get_pdf_text(pdfs):
14
+ text=""
15
+ for pdf in pdfs:
16
+ pdf_reader = PdfReader(pdf)
17
+ for page in pdf_reader.pages:
18
+ text+= page.extract_text()
19
+ return text
20
+
21
+ def get_text_chunks(text):
22
+ text_splitter = CharacterTextSplitter(separator="\n",
23
+ chunk_size=1000, chunk_overlap = 200, length_function=len)
24
+ chunks = text_splitter.split_text(text)
25
+ return chunks
26
+
27
+ def get_vectorstore(text_chunks):
28
+ embeddings = OpenAIEmbeddings()
29
+ # embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
30
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
31
+ return vectorstore
32
+
33
+ def get_conversation_chain(vectorstore):
34
+ # llm = HuggingFaceHub(repo_id="google/flan-t5-xxl")
35
+ llm = ChatOpenAI()
36
+ memory = ConversationBufferMemory(
37
+ memory_key='chat_history', return_messages=True)
38
+ conversation_chain = ConversationalRetrievalChain.from_llm(
39
+ llm=llm,
40
+ retriever=vectorstore.as_retriever(),
41
+ memory=memory
42
+ )
43
+ return conversation_chain
44
+ def user_input(user_question):
45
+ response = st.session_state.conversation({'question':user_question})
46
+ st.session_state.chat_history = response['chat_history']
47
+ for i, messages in enumerate(st.session_state.chat_history):
48
+ if i % 2 == 0:
49
+ message(messages.content, is_user=True)
50
+ else:
51
+ message(messages.content)
52
+ def main():
53
+ load_dotenv()
54
+ st.set_page_config(page_title="Chat with PDF")
55
+ if "conversation" not in st.session_state:
56
+ st.session_state.conversation = None
57
+ if "chat_history" not in st.session_state:
58
+ st.session_state.chat_history = None
59
+
60
+ st.header("Chat with PDF")
61
+ user_question = st.text_input("Ask a question about your documents...")
62
+ if user_question:
63
+ user_input(user_question)
64
+ with st.sidebar:
65
+ st.subheader("Your Documents")
66
+ pdfs = st.file_uploader("Upload here", accept_multiple_files=True)
67
+ if st.button("Process"):
68
+ with st.spinner("Processing"):
69
+ raw_text = get_pdf_text(pdfs)
70
+ # print(raw_text)
71
+ chunks = get_text_chunks(raw_text)
72
+ vectorstore = get_vectorstore(chunks)
73
+ st.session_state.conversation = get_conversation_chain(vectorstore)
74
+ st.success("Processing Complete !")
75
+
76
+ if __name__ == '__main__':
77
+ main()