Pavan178 commited on
Commit
4277202
·
verified ·
1 Parent(s): 6a6fbcd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -4
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import gradio as gr
 
3
  from langchain.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain.embeddings import OpenAIEmbeddings
@@ -8,10 +9,16 @@ from langchain.chat_models import ChatOpenAI
8
  from langchain.chains import ConversationalRetrievalChain, LLMChain
9
  from langchain.memory import ConversationBufferMemory
10
  from langchain.prompts import PromptTemplate
 
 
 
 
 
 
11
 
12
  class QueryRefiner:
13
  def __init__(self):
14
- self.refinement_llm = ChatOpenAI(temperature=0.2, model_name='gpt-3.5-turbo')
15
  self.refinement_prompt = PromptTemplate(
16
  input_variables=['query', 'context'],
17
  template="""Refine and enhance the following query for maximum clarity and precision:
@@ -32,6 +39,7 @@ Refined Query:"""
32
  prompt=self.refinement_prompt
33
  )
34
 
 
35
  def refine_query(self, original_query, context_hints=''):
36
  try:
37
  refined_query = self.refinement_chain.run({
@@ -40,15 +48,15 @@ Refined Query:"""
40
  })
41
  return refined_query.strip()
42
  except Exception as e:
43
- print(f"Query refinement error: {e}")
44
  return original_query
45
 
46
  class AdvancedPdfChatbot:
47
  def __init__(self, openai_api_key):
48
  os.environ["OPENAI_API_KEY"] = openai_api_key
49
- self.embeddings = OpenAIEmbeddings()
50
  self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
51
- self.llm = ChatOpenAI(temperature=0, model_name='gpt-4')
52
 
53
  self.memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
54
  self.query_refiner = QueryRefiner()
@@ -66,6 +74,7 @@ If the answer isn't directly available, explain why.""",
66
  input_variables=["context", "question"]
67
  )
68
 
 
69
  def load_and_process_pdf(self, pdf_path):
70
  loader = PyPDFLoader(pdf_path)
71
  documents = loader.load()
@@ -79,6 +88,7 @@ If the answer isn't directly available, explain why.""",
79
  combine_docs_chain_kwargs={"prompt": self.qa_prompt}
80
  )
81
 
 
82
  def chat(self, query):
83
  if not self.chain:
84
  return "Please upload a PDF first."
@@ -113,6 +123,7 @@ def upload_pdf(pdf_file):
113
  pdf_chatbot.load_and_process_pdf(file_path)
114
  return f"PDF processed successfully: {file_path}"
115
  except Exception as e:
 
116
  return f"Error processing PDF: {str(e)}"
117
 
118
  def respond(message, history):
@@ -123,6 +134,7 @@ def respond(message, history):
123
  history.append((message, bot_message))
124
  return "", history
125
  except Exception as e:
 
126
  return f"Error: {str(e)}", history
127
 
128
  def clear_chatbot():
 
1
  import os
2
  import gradio as gr
3
+ import logging
4
  from langchain.document_loaders import PyPDFLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain.embeddings import OpenAIEmbeddings
 
9
  from langchain.chains import ConversationalRetrievalChain, LLMChain
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain.prompts import PromptTemplate
12
+ import concurrent.futures
13
+ import timeout_decorator
14
+
15
+ # Configure logging
16
+ logging.basicConfig(level=logging.INFO)
17
+ logger = logging.getLogger(__name__)
18
 
19
  class QueryRefiner:
20
  def __init__(self):
21
+ self.refinement_llm = ChatOpenAI(temperature=0.2, model_name='gpt-3.5-turbo', request_timeout=30)
22
  self.refinement_prompt = PromptTemplate(
23
  input_variables=['query', 'context'],
24
  template="""Refine and enhance the following query for maximum clarity and precision:
 
39
  prompt=self.refinement_prompt
40
  )
41
 
42
+ @timeout_decorator.timeout(30) # 30 seconds timeout
43
  def refine_query(self, original_query, context_hints=''):
44
  try:
45
  refined_query = self.refinement_chain.run({
 
48
  })
49
  return refined_query.strip()
50
  except Exception as e:
51
+ logger.error(f"Query refinement error: {e}")
52
  return original_query
53
 
54
  class AdvancedPdfChatbot:
55
  def __init__(self, openai_api_key):
56
  os.environ["OPENAI_API_KEY"] = openai_api_key
57
+ self.embeddings = OpenAIEmbeddings(request_timeout=30)
58
  self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
59
+ self.llm = ChatOpenAI(temperature=0, model_name='gpt-4', request_timeout=30)
60
 
61
  self.memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
62
  self.query_refiner = QueryRefiner()
 
74
  input_variables=["context", "question"]
75
  )
76
 
77
+ @timeout_decorator.timeout(60) # 1 minute timeout for PDF processing
78
  def load_and_process_pdf(self, pdf_path):
79
  loader = PyPDFLoader(pdf_path)
80
  documents = loader.load()
 
88
  combine_docs_chain_kwargs={"prompt": self.qa_prompt}
89
  )
90
 
91
+ @timeout_decorator.timeout(30) # 30 seconds timeout for chat
92
  def chat(self, query):
93
  if not self.chain:
94
  return "Please upload a PDF first."
 
123
  pdf_chatbot.load_and_process_pdf(file_path)
124
  return f"PDF processed successfully: {file_path}"
125
  except Exception as e:
126
+ logger.error(f"PDF processing error: {e}")
127
  return f"Error processing PDF: {str(e)}"
128
 
129
  def respond(message, history):
 
134
  history.append((message, bot_message))
135
  return "", history
136
  except Exception as e:
137
+ logger.error(f"Chat response error: {e}")
138
  return f"Error: {str(e)}", history
139
 
140
  def clear_chatbot():