rajesh1729 commited on
Commit
32c2394
·
verified ·
1 Parent(s): efb1b7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -57
app.py CHANGED
@@ -8,6 +8,14 @@ from langchain.chains import ConversationalRetrievalChain, ConversationChain
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.document_loaders import PyPDFLoader
10
 
 
 
 
 
 
 
 
 
11
  def create_sidebar():
12
  with st.sidebar:
13
  st.title("PDF Chat")
@@ -36,7 +44,6 @@ def save_uploaded_file(uploaded_file, path='./uploads/'):
36
  f.write(uploaded_file.getbuffer())
37
  return file_path
38
 
39
- @st.cache_data
40
  def load_texts_from_papers(papers):
41
  all_texts = []
42
  for paper in papers:
@@ -57,77 +64,77 @@ def load_texts_from_papers(papers):
57
  st.error(f"Error processing {paper.name}: {str(e)}")
58
  return all_texts
59
 
60
- @st.cache_resource
61
- def initialize_vectorstore(api_key): # Added api_key parameter
62
  embedding = OpenAIEmbeddings(openai_api_key=api_key)
63
  vectorstore = Chroma(embedding_function=embedding, persist_directory="db")
64
  return vectorstore
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  def main():
67
  st.set_page_config(page_title="PDF Chat", layout="wide")
68
-
69
- # Get API key from sidebar
70
  api_key = create_sidebar()
71
-
72
- st.title("Chat with PDF")
73
- papers = st.file_uploader("Upload PDFs", type=["pdf"], accept_multiple_files=True)
74
-
75
- if "messages" not in st.session_state:
76
- st.session_state.messages = []
77
-
78
  if not api_key:
79
  st.warning("Please enter your OpenAI API key")
80
  return
81
 
82
- try:
83
- vectorstore = initialize_vectorstore(api_key)
84
- texts = load_texts_from_papers(papers) if papers else []
85
-
86
- if texts:
87
- vectorstore.add_documents(texts)
88
- qa_chain = ConversationalRetrievalChain.from_llm(
89
- ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=api_key), # Added api_key here
90
- vectorstore.as_retriever(),
91
- memory=ConversationBufferMemory(
92
- memory_key="chat_history",
93
- return_messages=True
94
- )
95
- )
96
- st.success("PDF processed successfully!")
97
- else:
98
- memory = ConversationBufferMemory(memory_key="chat_history")
99
- qa_chain = ConversationChain(
100
- llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=api_key), # Added api_key here
101
- memory=memory
102
- )
103
-
104
- # Chat interface
105
  for message in st.session_state.messages:
106
  with st.chat_message(message["role"]):
107
  st.markdown(message["content"])
108
-
 
109
  if prompt := st.chat_input("Ask about your PDFs"):
110
- st.session_state.messages.append({"role": "user", "content": prompt})
111
- with st.chat_message("user"):
112
- st.markdown(prompt)
113
-
114
- with st.chat_message("assistant"):
115
- try:
116
- if texts:
117
- result = qa_chain({"question": prompt})
118
- response = result["answer"]
119
- else:
120
- result = qa_chain.predict(input=prompt)
121
- response = result
122
-
123
- st.session_state.messages.append({"role": "assistant", "content": response})
124
- st.markdown(response)
125
-
126
- except Exception as e:
127
- st.error(f"Error: {str(e)}")
128
-
129
- except Exception as e:
130
- st.error(f"Error: {str(e)}")
131
 
132
  if __name__ == "__main__":
133
  main()
 
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.document_loaders import PyPDFLoader
10
 
11
+ # Initialize session state variables
12
+ if "messages" not in st.session_state:
13
+ st.session_state.messages = []
14
+ if "chain" not in st.session_state:
15
+ st.session_state.chain = None
16
+ if "processed_pdfs" not in st.session_state:
17
+ st.session_state.processed_pdfs = False
18
+
19
  def create_sidebar():
20
  with st.sidebar:
21
  st.title("PDF Chat")
 
44
  f.write(uploaded_file.getbuffer())
45
  return file_path
46
 
 
47
  def load_texts_from_papers(papers):
48
  all_texts = []
49
  for paper in papers:
 
64
  st.error(f"Error processing {paper.name}: {str(e)}")
65
  return all_texts
66
 
67
+ def initialize_vectorstore(api_key):
 
68
  embedding = OpenAIEmbeddings(openai_api_key=api_key)
69
  vectorstore = Chroma(embedding_function=embedding, persist_directory="db")
70
  return vectorstore
71
 
72
+ def process_pdfs(papers, api_key):
73
+ if papers and not st.session_state.processed_pdfs:
74
+ with st.spinner("Processing PDFs..."):
75
+ texts = load_texts_from_papers(papers)
76
+ if texts:
77
+ vectorstore = initialize_vectorstore(api_key)
78
+ vectorstore.add_documents(texts)
79
+ st.session_state.chain = ConversationalRetrievalChain.from_llm(
80
+ ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=api_key),
81
+ vectorstore.as_retriever(),
82
+ memory=ConversationBufferMemory(
83
+ memory_key="chat_history",
84
+ return_messages=True
85
+ )
86
+ )
87
+ st.session_state.processed_pdfs = True
88
+ st.success("PDFs processed successfully!")
89
+ return texts
90
+ return []
91
+
92
+ def handle_chat(prompt, texts):
93
+ st.session_state.messages.append({"role": "user", "content": prompt})
94
+
95
+ try:
96
+ if texts or st.session_state.processed_pdfs:
97
+ result = st.session_state.chain({"question": prompt})
98
+ response = result["answer"]
99
+ else:
100
+ response = "Please upload a PDF first."
101
+
102
+ st.session_state.messages.append({"role": "assistant", "content": response})
103
+
104
+ except Exception as e:
105
+ st.error(f"Error: {str(e)}")
106
+
107
  def main():
108
  st.set_page_config(page_title="PDF Chat", layout="wide")
109
+
 
110
  api_key = create_sidebar()
111
+
 
 
 
 
 
 
112
  if not api_key:
113
  st.warning("Please enter your OpenAI API key")
114
  return
115
 
116
+ st.title("Chat with PDF")
117
+
118
+ # File uploader
119
+ papers = st.file_uploader("Upload PDFs", type=["pdf"], accept_multiple_files=True)
120
+
121
+ # Process PDFs
122
+ texts = process_pdfs(papers, api_key)
123
+
124
+ # Chat interface container
125
+ chat_container = st.container()
126
+
127
+ with chat_container:
128
+ # Display chat messages
 
 
 
 
 
 
 
 
 
 
129
  for message in st.session_state.messages:
130
  with st.chat_message(message["role"]):
131
  st.markdown(message["content"])
132
+
133
+ # Chat input
134
  if prompt := st.chat_input("Ask about your PDFs"):
135
+ handle_chat(prompt, texts)
136
+ # Force a rerun to display the new message
137
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  if __name__ == "__main__":
140
  main()