Update app.py
Browse files
app.py
CHANGED
@@ -225,25 +225,54 @@ prompt_template = ChatPromptTemplate.from_messages([
|
|
225 |
("human", "{question}")
|
226 |
])
|
227 |
|
228 |
-
def process_question(question: str) -> tuple[str, str]:
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
|
233 |
-
|
234 |
-
|
235 |
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
|
241 |
-
|
242 |
-
|
243 |
|
244 |
-
|
245 |
-
|
246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
# Custom CSS for right-aligned text in textboxes
|
249 |
custom_css = """
|
|
|
225 |
("human", "{question}")
|
226 |
])
|
227 |
|
228 |
+
# def process_question(question: str) -> tuple[str, str]:
|
229 |
+
# # Check cache first
|
230 |
+
# if question in question_cache:
|
231 |
+
# return question_cache[question]
|
232 |
|
233 |
+
# relevant_docs = retriever(question)
|
234 |
+
# context = "\n".join([doc.page_content for doc in relevant_docs])
|
235 |
|
236 |
+
# prompt = prompt_template.format_messages(
|
237 |
+
# context=context,
|
238 |
+
# question=question
|
239 |
+
# )
|
240 |
|
241 |
+
# response = llm(prompt)
|
242 |
+
# result = (response.content, context)
|
243 |
|
244 |
+
# # Cache the result
|
245 |
+
# question_cache[question] = result
|
246 |
+
# return result
|
247 |
+
|
248 |
+
def process_question(question: str):
|
249 |
+
"""
|
250 |
+
Process the question and yield the answer progressively.
|
251 |
+
"""
|
252 |
+
# Check cache first
|
253 |
+
if question in question_cache:
|
254 |
+
return question_cache[question] # Retourne directement depuis le cache si disponible
|
255 |
+
|
256 |
+
relevant_docs = retriever(question)
|
257 |
+
context = "\n".join([doc.page_content for doc in relevant_docs])
|
258 |
+
|
259 |
+
prompt = prompt_template.format_messages(
|
260 |
+
context=context,
|
261 |
+
question=question
|
262 |
+
)
|
263 |
+
|
264 |
+
response = "" # Initialise la réponse
|
265 |
+
# Ici, nous supposons que 'llm.stream' est un générateur qui renvoie des chunks
|
266 |
+
for chunk in llm.stream(prompt): # suppose que llm.stream renvoie des chunks de réponse
|
267 |
+
if isinstance(chunk, str):
|
268 |
+
response += chunk # Accumulez la réponse si c'est déjà une chaîne
|
269 |
+
else:
|
270 |
+
response += chunk.content # Sinon, prenez le contenu du chunk (si chunk est un type d'objet spécifique)
|
271 |
+
|
272 |
+
yield response, context # Renvoie la réponse mise à jour et le contexte
|
273 |
+
|
274 |
+
# Mettez le résultat en cache à la fin
|
275 |
+
question_cache[question] = (response, context)
|
276 |
|
277 |
# Custom CSS for right-aligned text in textboxes
|
278 |
custom_css = """
|