bupa1018 commited on
Commit
c606b41
·
1 Parent(s): d8207a8

Update kadi_apy_bot.py

Browse files
Files changed (1) hide show
  1. kadi_apy_bot.py +67 -30
kadi_apy_bot.py CHANGED
@@ -10,7 +10,7 @@ class KadiAPYBot:
10
 
11
 
12
 
13
- def process_query(self, query, chat_history):
14
  """
15
  Process a user query, handle history, retrieve contexts, and generate a response.
16
  """
@@ -48,7 +48,44 @@ class KadiAPYBot:
48
 
49
  return response
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  #not supported yet, need session handling in app.py
53
  def add_to_conversation(self, user_query=None, llm_response=None):
54
  """
@@ -132,7 +169,7 @@ class KadiAPYBot:
132
  context = self.vector_store.similarity_search(query = query, k=k, filter=filter)
133
  return context
134
 
135
- def generate_response(self, query, history, doc_context, code_context):
136
  """
137
  Generate a response using the retrieved contexts and the LLM.
138
  """
@@ -165,40 +202,40 @@ class KadiAPYBot:
165
  return self.llm.invoke(prompt).content
166
 
167
 
168
- def generate_response(self, query, history, doc_context, code_context):
169
- """
170
- Generate a response using the retrieved contexts and the LLM.
171
- """
172
- formatted_history = format_history(history)
173
-
174
- # Update the prompt with history included
175
- prompt = f"""
176
- You are a Python programming assistant specialized in the "Kadi-APY" library.
177
- The "Kadi-APY" library is a Python package designed to facilitate interaction with the REST-like API of a software platform called Kadi4Mat.
178
- Your task is to answer the user's query based on the guidelines, and if needed, combine understanding provided by
179
- "Document Snippets" with the implementation details provided by "Code Snippets."
180
 
181
- Guidelines if generating code:
182
- - Display the complete code first, followed by a concise explanation in no more than 5 sentences.
183
 
184
- General Guidelines:
185
- - Refer to the "Conversation History" if it provides context that could enhance your understanding of the user's query.
186
- - If the user's query cannot be fulfilled based on the provided snippets, reply with "The API does not support the requested functionality."
187
- - If the user's query does not implicate any task, reply with a question asking the user to elaborate.
188
 
189
- Conversation History:
190
- {formatted_history}
191
 
192
- Document Snippets:
193
- {doc_context}
194
 
195
- Code Snippets:
196
- {code_context}
197
 
198
- Query:
199
- {query}
200
- """
201
- return self.llm.invoke(prompt).content
202
 
203
 
204
 
 
10
 
11
 
12
 
13
+ # def process_query(self, query, chat_history):
14
  """
15
  Process a user query, handle history, retrieve contexts, and generate a response.
16
  """
 
48
 
49
  return response
50
 
51
+ def process_query(self, query):
52
+ """
53
+ Process a user query, handle history, retrieve contexts, and generate a response.
54
+ """
55
+ # Add the user query to the conversation history
56
+ self.add_to_conversation(user_query=query)
57
+
58
+ # Rewrite query
59
+ rewritten_query = self.rewrite_query(query)
60
+ print("RRRRRRRRRREEEEEEEEEEWRITEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE: ",rewritten_query)
61
+ # Predict library usage
62
+ code_library_usage_prediction = self.predict_library_usage(query)
63
+
64
+ # Retrieve contexts
65
+ doc_contexts = self.retrieve_contexts(query, k=3, filter={"directory": "doc/"})
66
+ code_contexts = self.retrieve_contexts(rewritten_query, k=5, filter={"usage": code_library_usage_prediction})
67
+
68
 
69
+ # Vanilla
70
+ #doc_contexts = self.retrieve_contexts(query, k=3, filter={"directory": "doc/"})
71
+ #code_contexts = self.retrieve_contexts(query, k=5, filter={"directory": "kadi_apy/"})
72
+
73
+ # doc_contexts = self.retrieve_contexts(query, k=3, filter={"directory": "doc/"})
74
+ # code_contexts = self.retrieve_contexts(rewritten_query, k=5, filter={"directory": "kadi_apy/"})
75
+
76
+
77
+ # Format contexts
78
+ formatted_doc_contexts = self.format_documents(doc_contexts)
79
+ formatted_code_contexts = self.format_documents(code_contexts)
80
+
81
+ # Generate response
82
+ response = self.generate_response(query, formatted_doc_contexts, formatted_code_contexts)
83
+
84
+ # Add the response to the existing query in the conversation history
85
+ #self.add_to_conversation(llm_response=response)
86
+
87
+ return response
88
+
89
  #not supported yet, need session handling in app.py
90
  def add_to_conversation(self, user_query=None, llm_response=None):
91
  """
 
169
  context = self.vector_store.similarity_search(query = query, k=k, filter=filter)
170
  return context
171
 
172
+ def generate_response(self, query, doc_context, code_context):
173
  """
174
  Generate a response using the retrieved contexts and the LLM.
175
  """
 
202
  return self.llm.invoke(prompt).content
203
 
204
 
205
+ # def generate_response(self, query, history, doc_context, code_context):
206
+ # """
207
+ # Generate a response using the retrieved contexts and the LLM.
208
+ # """
209
+ # formatted_history = format_history(history)
210
+
211
+ # # Update the prompt with history included
212
+ # prompt = f"""
213
+ # You are a Python programming assistant specialized in the "Kadi-APY" library.
214
+ # The "Kadi-APY" library is a Python package designed to facilitate interaction with the REST-like API of a software platform called Kadi4Mat.
215
+ # Your task is to answer the user's query based on the guidelines, and if needed, combine understanding provided by
216
+ # "Document Snippets" with the implementation details provided by "Code Snippets."
217
 
218
+ # Guidelines if generating code:
219
+ # - Display the complete code first, followed by a concise explanation in no more than 5 sentences.
220
 
221
+ # General Guidelines:
222
+ # - Refer to the "Conversation History" if it provides context that could enhance your understanding of the user's query.
223
+ # - If the user's query cannot be fulfilled based on the provided snippets, reply with "The API does not support the requested functionality."
224
+ # - If the user's query does not implicate any task, reply with a question asking the user to elaborate.
225
 
226
+ # Conversation History:
227
+ # {formatted_history}
228
 
229
+ # Document Snippets:
230
+ # {doc_context}
231
 
232
+ # Code Snippets:
233
+ # {code_context}
234
 
235
+ # Query:
236
+ # {query}
237
+ # """
238
+ # return self.llm.invoke(prompt).content
239
 
240
 
241