Spaces:
Sleeping
Sleeping
Chandranshu Jain
commited on
Commit
•
42c7411
1
Parent(s):
6f8c5b6
Update app.py
Browse files
app.py
CHANGED
@@ -12,21 +12,21 @@ from langchain_chroma import Chroma
|
|
12 |
import tempfile
|
13 |
from langchain_cohere import CohereEmbeddings
|
14 |
|
15 |
-
st.set_page_config(page_title="Document Genie", layout="wide")
|
16 |
|
17 |
-
st.markdown("""
|
18 |
-
|
19 |
|
20 |
-
This chatbot is built using the Retrieval-Augmented Generation (RAG) framework, leveraging Google's Generative AI model Gemini-PRO. It processes uploaded PDF documents by breaking them down into manageable chunks, creates a searchable vector store, and generates accurate answers to user queries. This advanced approach ensures high-quality, contextually relevant responses for an efficient and effective user experience.
|
21 |
|
22 |
-
|
23 |
|
24 |
-
Follow these simple steps to interact with the chatbot:
|
25 |
|
26 |
-
1. **Upload Your Document**: The system accepts a PDF file at one time, analyzing the content to provide comprehensive insights.
|
27 |
|
28 |
-
2. **Ask a Question**: After processing the document, ask any question related to the content of your uploaded document for a precise answer.
|
29 |
-
""")
|
30 |
|
31 |
#def get_pdf(pdf_docs):
|
32 |
# loader = PyPDFLoader(pdf_docs)
|
@@ -86,8 +86,11 @@ def embedding(chunk,query):
|
|
86 |
chain = get_conversational_chain()
|
87 |
response = chain({"input_documents": doc, "question": query}, return_only_outputs=True)
|
88 |
print(response)
|
89 |
-
|
|
|
90 |
|
|
|
|
|
91 |
|
92 |
|
93 |
st.header("Chat with your pdf💁")
|
@@ -103,3 +106,11 @@ if st.button("Submit & Process", key="process_button"):
|
|
103 |
if query:
|
104 |
embedding(text_chunks,query)
|
105 |
st.success("Done")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
import tempfile
|
13 |
from langchain_cohere import CohereEmbeddings
|
14 |
|
15 |
+
#st.set_page_config(page_title="Document Genie", layout="wide")
|
16 |
|
17 |
+
#st.markdown("""
|
18 |
+
### PDFChat: Get instant insights from your PDF
|
19 |
|
20 |
+
#This chatbot is built using the Retrieval-Augmented Generation (RAG) framework, leveraging Google's Generative AI model Gemini-PRO. It processes uploaded PDF documents by breaking them down into manageable chunks, creates a searchable vector store, and generates accurate answers to user queries. This advanced approach ensures high-quality, contextually relevant responses for an efficient and effective user experience.
|
21 |
|
22 |
+
#### How It Works
|
23 |
|
24 |
+
#Follow these simple steps to interact with the chatbot:
|
25 |
|
26 |
+
#1. **Upload Your Document**: The system accepts a PDF file at one time, analyzing the content to provide comprehensive insights.
|
27 |
|
28 |
+
#2. **Ask a Question**: After processing the document, ask any question related to the content of your uploaded document for a precise answer.
|
29 |
+
#""")
|
30 |
|
31 |
#def get_pdf(pdf_docs):
|
32 |
# loader = PyPDFLoader(pdf_docs)
|
|
|
86 |
chain = get_conversational_chain()
|
87 |
response = chain({"input_documents": doc, "question": query}, return_only_outputs=True)
|
88 |
print(response)
|
89 |
+
return response["output_text"]
|
90 |
+
#st.write("Reply: ", response["output_text"])
|
91 |
|
92 |
+
if 'messages' not in st.session_state:
|
93 |
+
st.session_state.messages = [{'role': 'assistant', "content": 'Hello! Upload a PDF and ask me anything about its content.'}]
|
94 |
|
95 |
|
96 |
st.header("Chat with your pdf💁")
|
|
|
106 |
if query:
|
107 |
embedding(text_chunks,query)
|
108 |
st.success("Done")
|
109 |
+
if query:
|
110 |
+
st.session_state.messages.append({'role': 'user', "content": query})
|
111 |
+
response = embedding(text_chunks,query)
|
112 |
+
st.session_state.messages.append({'role': 'assistant', "content": response})
|
113 |
+
|
114 |
+
for message in st.session_state.messages:
|
115 |
+
with st.chat_message(message['role']):
|
116 |
+
st.write(message['content'])
|