Spaces:
Sleeping
Sleeping
Update kadiApy_ragchain.py
Browse files- kadiApy_ragchain.py +53 -52
kadiApy_ragchain.py
CHANGED
@@ -9,7 +9,7 @@ class KadiApyRagchain:
|
|
9 |
self.conversation = []
|
10 |
|
11 |
|
12 |
-
def process_query(self, query):
|
13 |
"""
|
14 |
Process a user query, handle history, retrieve contexts, and generate a response.
|
15 |
"""
|
@@ -40,7 +40,7 @@ class KadiApyRagchain:
|
|
40 |
formatted_code_contexts = self.format_documents(code_contexts)
|
41 |
|
42 |
# Generate response
|
43 |
-
response = self.generate_response(query, formatted_doc_contexts, formatted_code_contexts)
|
44 |
|
45 |
# Add the response to the existing query in the conversation history
|
46 |
#self.add_to_conversation(llm_response=response)
|
@@ -130,73 +130,74 @@ class KadiApyRagchain:
|
|
130 |
context = self.vector_store.similarity_search(query = query, k=k, filter=filter)
|
131 |
return context
|
132 |
|
133 |
-
def generate_response(self, query, doc_context, code_context):
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
|
144 |
-
|
145 |
-
|
146 |
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
|
155 |
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
|
165 |
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
|
179 |
-
|
180 |
-
|
181 |
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
|
|
186 |
|
187 |
-
|
188 |
-
|
189 |
|
190 |
-
|
191 |
-
|
192 |
|
193 |
-
|
194 |
-
|
195 |
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
|
201 |
|
202 |
|
|
|
9 |
self.conversation = []
|
10 |
|
11 |
|
12 |
+
def process_query(self, query, chat_history):
|
13 |
"""
|
14 |
Process a user query, handle history, retrieve contexts, and generate a response.
|
15 |
"""
|
|
|
40 |
formatted_code_contexts = self.format_documents(code_contexts)
|
41 |
|
42 |
# Generate response
|
43 |
+
response = self.generate_response(query, chat_history, formatted_doc_contexts, formatted_code_contexts)
|
44 |
|
45 |
# Add the response to the existing query in the conversation history
|
46 |
#self.add_to_conversation(llm_response=response)
|
|
|
130 |
context = self.vector_store.similarity_search(query = query, k=k, filter=filter)
|
131 |
return context
|
132 |
|
133 |
+
# def generate_response(self, query, doc_context, code_context):
|
134 |
+
# """
|
135 |
+
# Generate a response using the retrieved contexts and the LLM.
|
136 |
+
# """
|
137 |
|
138 |
|
139 |
+
# prompt = f"""You are a Python programming assistant specialized in the "Kadi-APY" library.
|
140 |
+
# The "Kadi-APY" library is a Python package designed to facilitate interaction with the REST-like API of a software platform called Kadi4Mat.
|
141 |
+
# Your task is to answer the user's query based on the guidelines and if needed the combine understanding provided by
|
142 |
+
# "Document snippets" with the implementation details provided by "Code Snippets."
|
143 |
|
144 |
+
# Guidelines if generating code:
|
145 |
+
# - Display the complete code first, followed by a concise explanation in no more than 5 sentences.
|
146 |
|
147 |
+
# General Guideline:
|
148 |
+
# - If the user's query can not be fullfilled based on the provided snippets, reply with "The API does not support the requested functionality"
|
149 |
+
# - If the user's query does not implicate any task, reply with a question asking the user to elaborate.
|
150 |
|
151 |
+
# "Document Snippets": These contain documentation excerpts and code examples that explain how to use the "Kadi-APY" library
|
152 |
+
# Document Snippets:
|
153 |
+
# {doc_context}
|
154 |
|
155 |
|
156 |
+
# "Code Snippets": These are raw source code fragments from the implementation of the "Kadi-APY" library.
|
157 |
+
# Code Snippets:
|
158 |
+
# {code_context}
|
159 |
|
160 |
+
# Query:
|
161 |
+
# {query}
|
162 |
+
# """
|
163 |
+
# return self.llm.invoke(prompt).content
|
164 |
|
165 |
|
166 |
+
def generate_response(self, query, chat_history, doc_context, code_context):
|
167 |
+
"""
|
168 |
+
Generate a response using the retrieved contexts and the LLM.
|
169 |
+
"""
|
170 |
+
formatted_history = format_history(history)
|
171 |
|
172 |
+
# Update the prompt with history included
|
173 |
+
prompt = f"""
|
174 |
+
You are a Python programming assistant specialized in the "Kadi-APY" library.
|
175 |
+
The "Kadi-APY" library is a Python package designed to facilitate interaction with the REST-like API of a software platform called Kadi4Mat.
|
176 |
+
Your task is to answer the user's query based on the guidelines, and if needed, combine understanding provided by
|
177 |
+
"Document Snippets" with the implementation details provided by "Code Snippets."
|
178 |
|
179 |
+
Guidelines if generating code:
|
180 |
+
- Display the complete code first, followed by a concise explanation in no more than 5 sentences.
|
181 |
|
182 |
+
General Guidelines:
|
183 |
+
- Refer to the "Chat History" if it provides context that could enhance your understanding of the user's query.
|
184 |
+
- Always include the "Chat History" if relevant to the user's query for continuity and clarity in responses.
|
185 |
+
- If the user's query cannot be fulfilled based on the provided snippets, reply with "The API does not support the requested functionality."
|
186 |
+
- If the user's query does not implicate any task, reply with a question asking the user to elaborate.
|
187 |
|
188 |
+
Chat History:
|
189 |
+
{formatted_history}
|
190 |
|
191 |
+
Document Snippets:
|
192 |
+
{doc_context}
|
193 |
|
194 |
+
Code Snippets:
|
195 |
+
{code_context}
|
196 |
|
197 |
+
Query:
|
198 |
+
{query}
|
199 |
+
"""
|
200 |
+
return self.llm.invoke(prompt).content
|
201 |
|
202 |
|
203 |
|