Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ from langchain.chat_models import ChatOpenAI
|
|
7 |
from langchain.chains import ConversationalRetrievalChain, ConversationChain
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
from langchain.document_loaders import PyPDFLoader
|
|
|
10 |
|
11 |
# Initialize session state variables
|
12 |
if "messages" not in st.session_state:
|
@@ -15,6 +16,8 @@ if "chain" not in st.session_state:
|
|
15 |
st.session_state.chain = None
|
16 |
if "processed_pdfs" not in st.session_state:
|
17 |
st.session_state.processed_pdfs = False
|
|
|
|
|
18 |
|
19 |
def create_sidebar():
|
20 |
with st.sidebar:
|
@@ -89,20 +92,15 @@ def process_pdfs(papers, api_key):
|
|
89 |
return texts
|
90 |
return []
|
91 |
|
92 |
-
def
|
93 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
94 |
-
|
95 |
try:
|
96 |
if texts or st.session_state.processed_pdfs:
|
97 |
result = st.session_state.chain({"question": prompt})
|
98 |
-
|
99 |
else:
|
100 |
-
|
101 |
-
|
102 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
103 |
-
|
104 |
except Exception as e:
|
105 |
-
|
106 |
|
107 |
def main():
|
108 |
st.set_page_config(page_title="PDF Chat", layout="wide")
|
@@ -121,20 +119,29 @@ def main():
|
|
121 |
# Process PDFs
|
122 |
texts = process_pdfs(papers, api_key)
|
123 |
|
124 |
-
# Chat interface
|
125 |
chat_container = st.container()
|
126 |
|
127 |
with chat_container:
|
128 |
-
# Display chat messages
|
129 |
for message in st.session_state.messages:
|
130 |
with st.chat_message(message["role"]):
|
131 |
st.markdown(message["content"])
|
132 |
-
|
133 |
-
#
|
134 |
if prompt := st.chat_input("Ask about your PDFs"):
|
135 |
-
|
136 |
-
|
137 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
|
139 |
if __name__ == "__main__":
|
140 |
main()
|
|
|
7 |
from langchain.chains import ConversationalRetrievalChain, ConversationChain
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
from langchain.document_loaders import PyPDFLoader
|
10 |
+
import time
|
11 |
|
12 |
# Initialize session state variables
|
13 |
if "messages" not in st.session_state:
|
|
|
16 |
st.session_state.chain = None
|
17 |
if "processed_pdfs" not in st.session_state:
|
18 |
st.session_state.processed_pdfs = False
|
19 |
+
if "waiting_for_answer" not in st.session_state:
|
20 |
+
st.session_state.waiting_for_answer = False
|
21 |
|
22 |
def create_sidebar():
|
23 |
with st.sidebar:
|
|
|
92 |
return texts
|
93 |
return []
|
94 |
|
95 |
+
def get_assistant_response(prompt, texts):
|
|
|
|
|
96 |
try:
|
97 |
if texts or st.session_state.processed_pdfs:
|
98 |
result = st.session_state.chain({"question": prompt})
|
99 |
+
return result["answer"]
|
100 |
else:
|
101 |
+
return "Please upload a PDF first."
|
|
|
|
|
|
|
102 |
except Exception as e:
|
103 |
+
return f"Error: {str(e)}"
|
104 |
|
105 |
def main():
|
106 |
st.set_page_config(page_title="PDF Chat", layout="wide")
|
|
|
119 |
# Process PDFs
|
120 |
texts = process_pdfs(papers, api_key)
|
121 |
|
122 |
+
# Chat interface
|
123 |
chat_container = st.container()
|
124 |
|
125 |
with chat_container:
|
126 |
+
# Display existing chat messages
|
127 |
for message in st.session_state.messages:
|
128 |
with st.chat_message(message["role"]):
|
129 |
st.markdown(message["content"])
|
130 |
+
|
131 |
+
# Get user input
|
132 |
if prompt := st.chat_input("Ask about your PDFs"):
|
133 |
+
# Add user message immediately
|
134 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
135 |
+
st.chat_message("user").markdown(prompt)
|
136 |
+
|
137 |
+
# Get assistant response with a loading indicator
|
138 |
+
with st.chat_message("assistant"):
|
139 |
+
with st.spinner("Thinking..."):
|
140 |
+
response = get_assistant_response(prompt, texts)
|
141 |
+
st.markdown(response)
|
142 |
+
|
143 |
+
# Add assistant response to messages
|
144 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
145 |
|
146 |
if __name__ == "__main__":
|
147 |
main()
|