Pavan178 commited on
Commit
343938a
·
verified ·
1 Parent(s): b10e9f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -15
app.py CHANGED
@@ -6,7 +6,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain.embeddings import OpenAIEmbeddings
7
  from langchain.vectorstores import FAISS
8
  from langchain.chat_models import ChatOpenAI
9
- from langchain.chains import ConversationalRetrievalChain, LLMChain
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain.prompts import PromptTemplate
12
  from PyPDF2 import PdfReader
@@ -16,35 +16,25 @@ class ContextAwareResponseGenerator:
16
  self.llm = llm
17
  self.response_prompt = PromptTemplate(
18
  input_variables=['context', 'query', 'chat_history'],
19
- template="""Based on the context, query, and chat history, generate a clear, concise, and helpful response.
20
 
21
  Context: {context}
22
  Query: {query}
23
  Chat History: {chat_history}
24
 
25
- Response Structure Selection Criteria (internal):
26
- 1. Technical academic breakdown
27
- 2. Concise summary with key points
28
- 3. Markdown with hierarchical insights
29
- 4. Narrative explanation
30
- 5. Comparative analysis
31
-
32
- Generate the response based on the appropriate structure, but do not display structure selection to the user. Only show the final response.""" # This internal prompt generates the response.
33
  )
34
  self.response_chain = LLMChain(llm=self.llm, prompt=self.response_prompt)
35
 
36
  def generate_response(self, context, query, chat_history=''):
37
  try:
38
- # Generate structured response internally
39
  response = self.response_chain.run({
40
  'context': context,
41
  'query': query,
42
  'chat_history': chat_history or "No previous context"
43
  })
44
-
45
- # Optionally process response internally (e.g., format it based on structure)
46
- # but only return the final formatted response.
47
- return response.strip()
48
  except Exception as e:
49
  logging.error(f"Response generation error: {e}")
50
  return self._default_response(query)
 
6
  from langchain.embeddings import OpenAIEmbeddings
7
  from langchain.vectorstores import FAISS
8
  from langchain.chat_models import ChatOpenAI
9
+ from langchain.chains import LLMChain
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain.prompts import PromptTemplate
12
  from PyPDF2 import PdfReader
 
16
  self.llm = llm
17
  self.response_prompt = PromptTemplate(
18
  input_variables=['context', 'query', 'chat_history'],
19
+ template="""Given the context, query, and chat history, generate the best response that is clear and helpful.
20
 
21
  Context: {context}
22
  Query: {query}
23
  Chat History: {chat_history}
24
 
25
+ Choose the most appropriate response structure and generate the response directly, without explicit guidance on which format to use. Your response should be based on the query and context provided."""
 
 
 
 
 
 
 
26
  )
27
  self.response_chain = LLMChain(llm=self.llm, prompt=self.response_prompt)
28
 
29
  def generate_response(self, context, query, chat_history=''):
30
  try:
31
+ # Generate the response content with structure handled by the LLM itself
32
  response = self.response_chain.run({
33
  'context': context,
34
  'query': query,
35
  'chat_history': chat_history or "No previous context"
36
  })
37
+ return response.strip() # LLM decides on the structure
 
 
 
38
  except Exception as e:
39
  logging.error(f"Response generation error: {e}")
40
  return self._default_response(query)