colibri.assistant.ai / utils /async_flowise.py
Gouzi Mohaled
Ajoute des fichiers et sous-dossiers supplémentaires
fe4792e
raw
history blame
8.97 kB
import asyncio
import aiohttp
import logging
from typing import Dict, Optional, Any, AsyncGenerator
import json
from datetime import datetime, timedelta
import pytz
from utils.logging_utils import log_to_file
import os
import hashlib
from dotenv import load_dotenv
from utils.rate_limiter import RateLimiter
from utils.circuit_breaker import AsyncCircuitBreaker
from utils.cache_manager import CacheManager
FLOWISE_API_URL_SANTE = os.getenv("FLOWISE_API_URL_SANTE")
FLOWISE_API_URL_CAR = os.getenv("FLOWISE_API_URL_CAR")
FLOWISE_API_URL_BTP = os.getenv("FLOWISE_API_URL_BTP")
FLOWISE_API_URL_RH = os.getenv("FLOWISE_API_URL_RH")
FLOWISE_API_URL_PILOTAGE = os.getenv("FLOWISE_API_URL_PILOTAGE")
class AsyncFlowiseClient:
def __init__(self):
"""Initialise le client asynchrone pour Flowise"""
self.api_urls = {
'insuranceSANTE': FLOWISE_API_URL_SANTE,
'insuranceCAR': FLOWISE_API_URL_CAR,
'insuranceBTP': FLOWISE_API_URL_BTP,
'RH': FLOWISE_API_URL_RH,
'Pilotage': FLOWISE_API_URL_PILOTAGE
}
self.session = None
self.rate_limiter = RateLimiter(requests_per_second=5, burst_limit=10)
self.circuit_breaker = AsyncCircuitBreaker(
failure_threshold=5,
recovery_timeout=60,
half_open_timeout=30
)
self.cache_manager = CacheManager()
def _generate_cache_key(self, question: str, assistant_type: str) -> str:
"""Génère une clé de cache unique pour une question"""
question_hash = hashlib.md5(question.encode()).hexdigest()
return f"assistant_response:{assistant_type}:{question_hash}"
def _clean_question(self, question: str) -> str:
"""Nettoie la question des métadonnées"""
try:
data = json.loads(question)
if isinstance(data, dict) and "next_inputs" in data and "query" in data["next_inputs"]:
return data["next_inputs"]["query"]
except json.JSONDecodeError:
pass
return question
def _process_response(self, response_data: Any) -> Dict[str, str]:
"""Extrait uniquement le texte de la réponse"""
try:
# Si c'est une chaîne, essayer de la parser comme JSON
if isinstance(response_data, str):
try:
data = json.loads(response_data)
if isinstance(data, dict) and "text" in data:
return {"answer": data["text"]}
except json.JSONDecodeError:
return {"answer": response_data}
# Si c'est déjà un dictionnaire
if isinstance(response_data, dict) and "text" in response_data:
return {"answer": response_data["text"]}
# Si on ne peut pas extraire le texte, retourner une erreur
return {"error": "Format de réponse non reconnu"}
except Exception as e:
log_to_file(f"Erreur lors du traitement de la réponse : {str(e)}", level=logging.ERROR)
return {"error": "Format de réponse non reconnu"}
async def query_assistant_stream(self, question: str, assistant_type: str, user_id: Optional[str] = None) -> AsyncGenerator[str, None]:
"""Envoie une requête en streaming à Flowise"""
if assistant_type not in self.api_urls:
log_to_file(f"Type d'assistant non valide : {assistant_type}", level=logging.ERROR)
yield json.dumps({"error": f"Type d'assistant non valide : {assistant_type}"})
return
if not await self.rate_limiter.acquire(user_id):
yield json.dumps({"error": "Trop de requêtes. Veuillez réessayer dans quelques instants."})
return
if not self.session:
self.session = aiohttp.ClientSession()
try:
url = self.api_urls[assistant_type]
clean_question = self._clean_question(question)
payload = {"question": clean_question}
async with self.session.post(url, json=payload) as response:
if response.status != 200:
error_text = await response.text()
log_to_file(f"Erreur lors de la requête à {assistant_type}: {error_text}", level=logging.ERROR)
yield json.dumps({"error": f"Erreur {response.status}: {error_text}"})
return
async for chunk in response.content.iter_any():
if chunk:
try:
text = chunk.decode('utf-8')
processed_response = self._process_response(text)
if "answer" in processed_response:
yield processed_response["answer"]
except Exception as e:
log_to_file(f"Erreur de décodage du chunk : {str(e)}", level=logging.ERROR)
continue
except Exception as e:
error_message = str(e)
log_to_file(f"Erreur lors du streaming pour {assistant_type}: {error_message}", level=logging.ERROR)
yield json.dumps({"error": error_message})
async def query_assistant(self, question: str, assistant_type: str, user_id: Optional[str] = None) -> Dict[str, Any]:
"""Envoie une requête asynchrone à Flowise avec cache"""
if assistant_type not in self.api_urls:
log_to_file(f"Type d'assistant non valide : {assistant_type}", level=logging.ERROR)
return {"error": f"Type d'assistant non valide : {assistant_type}"}
if not await self.rate_limiter.acquire(user_id):
return {"error": "Trop de requêtes. Veuillez réessayer dans quelques instants."}
cache_key = self._generate_cache_key(question, assistant_type)
cached_response = await self.cache_manager.get(cache_key)
if cached_response:
log_to_file(f"Réponse trouvée dans le cache pour {assistant_type}", level=logging.INFO)
return cached_response
if not self.session:
self.session = aiohttp.ClientSession()
try:
url = self.api_urls[assistant_type]
clean_question = self._clean_question(question)
payload = {"question": clean_question}
async with self.session.post(url, json=payload) as response:
if response.status != 200:
error_text = await response.text()
log_to_file(f"Erreur lors de la requête à {assistant_type}: {error_text}", level=logging.ERROR)
return {"error": f"Erreur {response.status}: {error_text}"}
response_text = await response.text()
processed_response = self._process_response(response_text)
if "error" not in processed_response:
await self.cache_manager.set(
cache_key,
processed_response,
expiry=timedelta(hours=1)
)
log_to_file(f"Réponse mise en cache pour {assistant_type}", level=logging.INFO)
return processed_response
except Exception as e:
error_message = str(e)
if "Circuit breaker ouvert" in error_message:
log_to_file(f"Service {assistant_type} temporairement indisponible (Circuit breaker)", level=logging.ERROR)
return {"error": "Service temporairement indisponible. Veuillez réessayer plus tard."}
log_to_file(f"Erreur lors de la requête à {assistant_type}: {error_message}", level=logging.ERROR)
return {"error": error_message}
async def check_health(self) -> Dict[str, bool]:
"""Vérifie la santé des connexions aux différents assistants"""
if not self.session:
self.session = aiohttp.ClientSession()
results = {}
for assistant_type, url in self.api_urls.items():
try:
base_url = '/'.join(url.split('/')[:-4]) # Obtenir l'URL de base
async with self.session.get(
f"{base_url}/health",
timeout=aiohttp.ClientTimeout(total=5)
) as response:
results[assistant_type] = response.status == 200
log_to_file(f"Statut santé {assistant_type}: {response.status}", level=logging.INFO)
except Exception as e:
log_to_file(f"Erreur de santé pour {assistant_type}: {str(e)}", level=logging.ERROR)
results[assistant_type] = False
return results
async def close(self):
"""Ferme la session client"""
if self.session:
await self.session.close()
self.session = None