Pavan178 commited on
Commit
432a54a
·
verified ·
1 Parent(s): f13caea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -19
app.py CHANGED
@@ -19,11 +19,10 @@ class AdvancedPdfChatbot:
19
  os.environ["OPENAI_API_KEY"] = openai_api_key
20
  self.embeddings = OpenAIEmbeddings()
21
  self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
22
- self.llm = ChatOpenAI(temperature=0.5,model_name='gpt-4o',max_tokens=3000)
23
 
24
  self.memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
25
  self.qa_chain = None
26
- self.pdf_path = None
27
  self.template = """
28
  You are a file-based knowledge assistant that interacts with users like ChatGPT. Your primary source of knowledge comes from user-uploaded files, such as PDFs. You do not rely on general knowledge or the internet. Instead, you extract, analyze, and synthesize information directly from the content of the provided file(s).
29
  **1. Personality and Tone**
@@ -71,7 +70,7 @@ NOTE : DESCRIBE/SUMMARY should always return the overall summary of the document
71
  Question: {question}
72
  Answer:
73
  """
74
-
75
  self.prompt = PromptTemplate(template=self.template, input_variables=["context", "question"])
76
 
77
  def load_and_process_pdf(self, pdf_path):
@@ -79,7 +78,6 @@ NOTE : DESCRIBE/SUMMARY should always return the overall summary of the document
79
  documents = loader.load()
80
  texts = self.text_splitter.split_documents(documents)
81
  self.db = FAISS.from_documents(texts, self.embeddings)
82
- self.pdf_path = pdf_path
83
  self.setup_conversation_chain()
84
 
85
  def setup_conversation_chain(self):
@@ -96,13 +94,6 @@ NOTE : DESCRIBE/SUMMARY should always return the overall summary of the document
96
  result = self.qa_chain({"question": query})
97
  return result['answer']
98
 
99
- def get_pdf_path(self):
100
- # Return the stored PDF path
101
- if self.pdf_path:
102
- return self.pdf_path
103
- else:
104
- return "No PDF uploaded yet."
105
-
106
  # Initialize the chatbot
107
  pdf_chatbot = AdvancedPdfChatbot(openai_api_key)
108
 
@@ -111,7 +102,7 @@ def upload_pdf(pdf_file):
111
  return "Please upload a PDF file."
112
  file_path = pdf_file.name
113
  pdf_chatbot.load_and_process_pdf(file_path)
114
- return file_path
115
 
116
  def respond(message, history):
117
  bot_message = pdf_chatbot.chat(message)
@@ -122,10 +113,6 @@ def clear_chatbot():
122
  pdf_chatbot.memory.clear()
123
  return []
124
 
125
- def get_pdf_path():
126
- # Call the method to return the current PDF path
127
- return pdf_chatbot.get_pdf_path()
128
-
129
  # Create the Gradio interface
130
  with gr.Blocks() as demo:
131
  gr.Markdown("# PDF Chatbot")
@@ -136,15 +123,18 @@ with gr.Blocks() as demo:
136
 
137
  upload_status = gr.Textbox(label="Upload Status")
138
  upload_button.click(upload_pdf, inputs=[pdf_upload], outputs=[upload_status])
139
- path_button = gr.Button("Get PDF Path")
140
- pdf_path_display = gr.Textbox(label="Current PDF Path")
141
  chatbot_interface = gr.Chatbot()
142
  msg = gr.Textbox()
143
  clear = gr.Button("Clear")
144
 
145
  msg.submit(respond, inputs=[msg, chatbot_interface], outputs=[msg, chatbot_interface])
146
  clear.click(clear_chatbot, outputs=[chatbot_interface])
147
- path_button.click(get_pdf_path, outputs=[pdf_path_display])
148
 
149
  if __name__ == "__main__":
150
  demo.launch()
 
 
 
 
 
 
19
  os.environ["OPENAI_API_KEY"] = openai_api_key
20
  self.embeddings = OpenAIEmbeddings()
21
  self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
22
+ self.llm = ChatOpenAI(temperature=0.5,model_name='gpt-4o',max_tokens=3000')
23
 
24
  self.memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
25
  self.qa_chain = None
 
26
  self.template = """
27
  You are a file-based knowledge assistant that interacts with users like ChatGPT. Your primary source of knowledge comes from user-uploaded files, such as PDFs. You do not rely on general knowledge or the internet. Instead, you extract, analyze, and synthesize information directly from the content of the provided file(s).
28
  **1. Personality and Tone**
 
70
  Question: {question}
71
  Answer:
72
  """
73
+
74
  self.prompt = PromptTemplate(template=self.template, input_variables=["context", "question"])
75
 
76
  def load_and_process_pdf(self, pdf_path):
 
78
  documents = loader.load()
79
  texts = self.text_splitter.split_documents(documents)
80
  self.db = FAISS.from_documents(texts, self.embeddings)
 
81
  self.setup_conversation_chain()
82
 
83
  def setup_conversation_chain(self):
 
94
  result = self.qa_chain({"question": query})
95
  return result['answer']
96
 
 
 
 
 
 
 
 
97
  # Initialize the chatbot
98
  pdf_chatbot = AdvancedPdfChatbot(openai_api_key)
99
 
 
102
  return "Please upload a PDF file."
103
  file_path = pdf_file.name
104
  pdf_chatbot.load_and_process_pdf(file_path)
105
+ return "PDF uploaded and processed successfully. You can now start chatting!"
106
 
107
  def respond(message, history):
108
  bot_message = pdf_chatbot.chat(message)
 
113
  pdf_chatbot.memory.clear()
114
  return []
115
 
 
 
 
 
116
  # Create the Gradio interface
117
  with gr.Blocks() as demo:
118
  gr.Markdown("# PDF Chatbot")
 
123
 
124
  upload_status = gr.Textbox(label="Upload Status")
125
  upload_button.click(upload_pdf, inputs=[pdf_upload], outputs=[upload_status])
126
+
 
127
  chatbot_interface = gr.Chatbot()
128
  msg = gr.Textbox()
129
  clear = gr.Button("Clear")
130
 
131
  msg.submit(respond, inputs=[msg, chatbot_interface], outputs=[msg, chatbot_interface])
132
  clear.click(clear_chatbot, outputs=[chatbot_interface])
 
133
 
134
  if __name__ == "__main__":
135
  demo.launch()
136
+
137
+
138
+
139
+
140
+