Spaces:
Sleeping
Sleeping
Update kadiApy_ragchain.py
Browse files- kadiApy_ragchain.py +32 -2
kadiApy_ragchain.py
CHANGED
@@ -134,7 +134,7 @@ class KadiApyRagchain:
|
|
134 |
context = self.vector_store.similarity_search(query = query, k=k, filter=filter)
|
135 |
return context
|
136 |
|
137 |
-
def
|
138 |
"""
|
139 |
Generate a response using the retrieved contexts and the LLM.
|
140 |
"""
|
@@ -170,7 +170,37 @@ class KadiApyRagchain:
|
|
170 |
"""
|
171 |
return self.llm.invoke(prompt).content
|
172 |
|
173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
|
175 |
def format_documents(self, documents):
|
176 |
formatted_docs = []
|
|
|
134 |
context = self.vector_store.similarity_search(query = query, k=k, filter=filter)
|
135 |
return context
|
136 |
|
137 |
+
def generate_response2(self, query, chat_history, doc_context, code_context):
|
138 |
"""
|
139 |
Generate a response using the retrieved contexts and the LLM.
|
140 |
"""
|
|
|
170 |
"""
|
171 |
return self.llm.invoke(prompt).content
|
172 |
|
173 |
+
def generate_response(self, query, chat_history, doc_context, code_context):
|
174 |
+
"""
|
175 |
+
Generate a response using the retrieved contexts and the LLM.
|
176 |
+
"""
|
177 |
+
|
178 |
+
# Update the prompt with history included
|
179 |
+
prompt = f"""
|
180 |
+
You are a Python programming assistant specialized in the "Kadi-APY" library.
|
181 |
+
The "Kadi-APY" library is a Python package designed to facilitate interaction with the REST-like API of a software platform called Kadi4Mat.
|
182 |
+
Your task is to answer the user's query based on the guidelines, and if needed, combine understanding provided by
|
183 |
+
"Document Snippets" with the implementation details provided by "Code Snippets."
|
184 |
+
|
185 |
+
Guidelines if generating code:
|
186 |
+
- Display the complete code first, followed by a concise explanation in no more than 5 sentences.
|
187 |
+
|
188 |
+
General Guidelines:
|
189 |
+
- Refer to the chat history if it provides context that could enhance your understanding of the user's query.
|
190 |
+
- Always include the chat history if relevant to the user's query for continuity and clarity in responses.
|
191 |
+
- If the user's query cannot be fulfilled based on the provided snippets, reply with "The API does not support the requested functionality."
|
192 |
+
- If the user's query does not implicate any task, reply with a question asking the user to elaborate.
|
193 |
+
|
194 |
+
Document Snippets:
|
195 |
+
{doc_context}
|
196 |
+
|
197 |
+
Code Snippets:
|
198 |
+
{code_context}
|
199 |
+
|
200 |
+
Query:
|
201 |
+
{query}
|
202 |
+
"""
|
203 |
+
return self.llm.invoke(prompt).content
|
204 |
|
205 |
def format_documents(self, documents):
|
206 |
formatted_docs = []
|