Update app.py
Browse files
app.py
CHANGED
@@ -16,39 +16,41 @@ class ContextAwareResponseGenerator:
|
|
16 |
self.llm = llm
|
17 |
self.response_prompt = PromptTemplate(
|
18 |
input_variables=['context', 'query', 'chat_history'],
|
19 |
-
template="""
|
20 |
|
21 |
Context: {context}
|
22 |
Query: {query}
|
23 |
Chat History: {chat_history}
|
24 |
|
25 |
-
Response Structure Selection Criteria:
|
26 |
1. Technical academic breakdown
|
27 |
2. Concise summary with key points
|
28 |
3. Markdown with hierarchical insights
|
29 |
4. Narrative explanation
|
30 |
5. Comparative analysis
|
31 |
|
32 |
-
|
33 |
)
|
34 |
self.response_chain = LLMChain(llm=self.llm, prompt=self.response_prompt)
|
35 |
|
36 |
def generate_response(self, context, query, chat_history=''):
|
37 |
try:
|
38 |
-
# Generate structured response
|
39 |
-
|
40 |
'context': context,
|
41 |
'query': query,
|
42 |
'chat_history': chat_history or "No previous context"
|
43 |
})
|
44 |
|
45 |
-
#
|
46 |
-
|
47 |
-
|
48 |
-
return response_content
|
49 |
except Exception as e:
|
50 |
logging.error(f"Response generation error: {e}")
|
51 |
-
return
|
|
|
|
|
|
|
52 |
|
53 |
class AdvancedPdfChatbot:
|
54 |
def __init__(self, openai_api_key):
|
@@ -135,4 +137,4 @@ with gr.Blocks() as demo:
|
|
135 |
msg.submit(respond, inputs=[msg, chatbot_interface], outputs=[msg, chatbot_interface])
|
136 |
|
137 |
if __name__ == "__main__":
|
138 |
-
demo.launch()
|
|
|
16 |
self.llm = llm
|
17 |
self.response_prompt = PromptTemplate(
|
18 |
input_variables=['context', 'query', 'chat_history'],
|
19 |
+
template="""Based on the context, query, and chat history, generate a clear, concise, and helpful response.
|
20 |
|
21 |
Context: {context}
|
22 |
Query: {query}
|
23 |
Chat History: {chat_history}
|
24 |
|
25 |
+
Response Structure Selection Criteria (internal):
|
26 |
1. Technical academic breakdown
|
27 |
2. Concise summary with key points
|
28 |
3. Markdown with hierarchical insights
|
29 |
4. Narrative explanation
|
30 |
5. Comparative analysis
|
31 |
|
32 |
+
Generate the response based on the appropriate structure, but do not display structure selection to the user. Only show the final response.""" # This internal prompt generates the response.
|
33 |
)
|
34 |
self.response_chain = LLMChain(llm=self.llm, prompt=self.response_prompt)
|
35 |
|
36 |
def generate_response(self, context, query, chat_history=''):
|
37 |
try:
|
38 |
+
# Generate structured response internally
|
39 |
+
response = self.response_chain.run({
|
40 |
'context': context,
|
41 |
'query': query,
|
42 |
'chat_history': chat_history or "No previous context"
|
43 |
})
|
44 |
|
45 |
+
# Optionally process response internally (e.g., format it based on structure)
|
46 |
+
# but only return the final formatted response.
|
47 |
+
return response.strip()
|
|
|
48 |
except Exception as e:
|
49 |
logging.error(f"Response generation error: {e}")
|
50 |
+
return self._default_response(query)
|
51 |
+
|
52 |
+
def _default_response(self, query):
|
53 |
+
return f"I couldn't generate a response for: {query}"
|
54 |
|
55 |
class AdvancedPdfChatbot:
|
56 |
def __init__(self, openai_api_key):
|
|
|
137 |
msg.submit(respond, inputs=[msg, chatbot_interface], outputs=[msg, chatbot_interface])
|
138 |
|
139 |
if __name__ == "__main__":
|
140 |
+
demo.launch()
|