Pavan178 commited on
Commit
58027e2
·
verified ·
1 Parent(s): 298792b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -10,6 +10,8 @@ from langchain.chains import LLMChain
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain.prompts import PromptTemplate
12
  from PyPDF2 import PdfReader
 
 
13
 
14
  class ContextAwareResponseGenerator:
15
  def __init__(self, llm):
@@ -25,7 +27,7 @@ Chat History: {chat_history}
25
  Choose the most appropriate response structure and generate the response directly, without explicit guidance on which format to use. Your response should be based on the query and context provided."""
26
  )
27
  self.response_chain = LLMChain(llm=self.llm, prompt=self.response_prompt)
28
-
29
  def generate_response(self, context, query, chat_history=''):
30
  try:
31
  # Generate the response content with structure handled by the LLM itself
@@ -43,6 +45,7 @@ Choose the most appropriate response structure and generate the response directl
43
  return f"I couldn't generate a response for: {query}"
44
 
45
  class AdvancedPdfChatbot:
 
46
  def __init__(self, openai_api_key):
47
  os.environ["OPENAI_API_KEY"] = openai_api_key
48
  self.llm = ChatOpenAI(temperature=0.2, model_name='gpt-4o')
@@ -55,7 +58,8 @@ class AdvancedPdfChatbot:
55
 
56
  self.db = None
57
  self.document_context = ""
58
-
 
59
  def load_and_process_pdf(self, pdf_path):
60
  try:
61
  reader = PdfReader(pdf_path)
@@ -75,7 +79,8 @@ class AdvancedPdfChatbot:
75
  except Exception as e:
76
  logging.error(f"PDF processing error: {e}")
77
  return False
78
-
 
79
  def chat(self, query, is_new_question=False):
80
  if not self.db:
81
  return "Please upload a PDF first."
 
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain.prompts import PromptTemplate
12
  from PyPDF2 import PdfReader
13
+ import spaces
14
+
15
 
16
  class ContextAwareResponseGenerator:
17
  def __init__(self, llm):
 
27
  Choose the most appropriate response structure and generate the response directly, without explicit guidance on which format to use. Your response should be based on the query and context provided."""
28
  )
29
  self.response_chain = LLMChain(llm=self.llm, prompt=self.response_prompt)
30
+ @spaces.GPU
31
  def generate_response(self, context, query, chat_history=''):
32
  try:
33
  # Generate the response content with structure handled by the LLM itself
 
45
  return f"I couldn't generate a response for: {query}"
46
 
47
  class AdvancedPdfChatbot:
48
+ @spaces.GPU
49
  def __init__(self, openai_api_key):
50
  os.environ["OPENAI_API_KEY"] = openai_api_key
51
  self.llm = ChatOpenAI(temperature=0.2, model_name='gpt-4o')
 
58
 
59
  self.db = None
60
  self.document_context = ""
61
+
62
+ @spaces.GPU
63
  def load_and_process_pdf(self, pdf_path):
64
  try:
65
  reader = PdfReader(pdf_path)
 
79
  except Exception as e:
80
  logging.error(f"PDF processing error: {e}")
81
  return False
82
+
83
+ @spaces.GPU
84
  def chat(self, query, is_new_question=False):
85
  if not self.db:
86
  return "Please upload a PDF first."