flare / chat_handler.py
ciyidogan's picture
Update chat_handler.py
4c88d20 verified
raw
history blame
27.4 kB
"""
Flare – Chat Handler (v1.7 Β· parameter parsing dΓΌzeltmesi)
==========================================
"""
import os
import re, json, sys, httpx
from datetime import datetime
from typing import Dict, List, Optional
from fastapi import APIRouter, HTTPException, Header
from pydantic import BaseModel
import requests
from prompt_builder import build_intent_prompt, build_parameter_prompt, build_smart_parameter_question_prompt, extract_params_from_question
from utils import log
from api_executor import call_api as execute_api
from validation_engine import validate
from session import session_store, Session
from llm_interface import LLMInterface, SparkLLM, GPT4oLLM
from config_provider import ConfigProvider
# ───────────────────────── CONFIG ───────────────────────── #
# Global config reference
cfg = None
def get_config():
"""Always get fresh config"""
global cfg
cfg = ConfigProvider.get()
return cfg
# Initialize on module load
cfg = get_config()
# Global LLM instance
llm_provider: Optional[LLMInterface] = None
# ───────────────────────── HELPERS ───────────────────────── #
def _trim_response(raw: str) -> str:
"""
Remove everything after the first logical assistant block or intent tag.
Also strips trailing 'assistant' artifacts and prompt injections.
"""
# Stop at our own rules if model leaked them
for stop in ["#DETECTED_INTENT", "⚠️", "\nassistant", "assistant\n", "assistant"]:
idx = raw.find(stop)
if idx != -1:
raw = raw[:idx]
# Normalise selamlama
raw = re.sub(r"Hoş[\s-]?geldin(iz)?", "Hoş geldiniz", raw, flags=re.IGNORECASE)
return raw.strip()
def _safe_intent_parse(raw: str) -> tuple[str, str]:
"""Extract intent name and extra tail."""
m = re.search(r"#DETECTED_INTENT:\s*([A-Za-z0-9_-]+)", raw)
if not m:
return "", raw
name = m.group(1)
# Remove 'assistant' suffix if exists
if name.endswith("assistant"):
name = name[:-9] # Remove last 9 chars ("assistant")
log(f"πŸ”§ Removed 'assistant' suffix from intent name")
tail = raw[m.end():]
log(f"🎯 Parsed intent: {name}")
return name, tail
# ───────────────────────── LLM SETUP ───────────────────────── #
def setup_llm_provider():
"""Initialize LLM provider based on internal_prompt config"""
global llm_provider
cfg = ConfigProvider.get()
internal_prompt = cfg.global_config.internal_prompt
if not internal_prompt:
log("⚠️ No internal_prompt configured, using default Spark")
# Get Spark token
spark_token = _get_spark_token()
if not spark_token:
log("❌ SPARK_TOKEN not found")
raise ValueError("SPARK_TOKEN not configured")
spark_endpoint = str(cfg.global_config.spark_endpoint).rstrip("/")
work_mode = cfg.global_config.work_mode
log(f"πŸ”Œ Initializing SparkLLM: {spark_endpoint}")
log(f"πŸ”§ Work mode: {work_mode}")
llm_provider = SparkLLM(
spark_endpoint=spark_endpoint,
spark_token=spark_token,
work_mode=work_mode
)
log("βœ… SparkLLM initialized")
return
# Check if it's a GPT-4o config
if internal_prompt.get("provider") == "gpt-4o":
api_key = internal_prompt.get("api_key")
if not api_key:
if cfg.global_config.is_cloud_mode():
api_key = os.environ.get("OPENAI_API_KEY")
else:
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OpenAI API key not configured")
llm_provider = GPT4oLLM(
api_key=api_key,
model=internal_prompt.get("model", "gpt-4o"),
max_tokens=internal_prompt.get("max_tokens", 4096),
temperature=internal_prompt.get("temperature", 0.7)
)
log("βœ… GPT-4o LLM initialized")
else:
# Default to Spark
spark_token = _get_spark_token()
if not spark_token:
raise ValueError("SPARK_TOKEN not configured")
spark_endpoint = str(cfg.global_config.spark_endpoint).rstrip("/")
work_mode = cfg.global_config.work_mode
llm_provider = SparkLLM(
spark_endpoint=spark_endpoint,
spark_token=spark_token,
work_mode=work_mode
)
log("βœ… SparkLLM initialized (via internal_prompt)")
def _get_spark_token() -> Optional[str]:
"""Get Spark token based on work mode"""
cfg = ConfigProvider.get()
if cfg.global_config.is_cloud_mode():
# Cloud mode - use HuggingFace secrets
token = os.environ.get("SPARK_TOKEN")
if token:
log("πŸ”‘ Using SPARK_TOKEN from environment")
return token
else:
# On-premise mode - use .env file
from dotenv import load_dotenv
load_dotenv()
return os.getenv("SPARK_TOKEN")
# ───────────────────────── SPARK/LLM CALL ───────────────────────── #
async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
"""Call LLM (Spark or configured provider) with proper error handling"""
global llm_provider
if llm_provider is None:
setup_llm_provider()
try:
# Get version config from session
version = s.get_version_config()
if not version:
# Fallback: get from project config
project = next((p for p in cfg.projects if p.name == s.project_name), None)
if not project:
raise ValueError(f"Project not found: {s.project_name}")
version = next((v for v in project.versions if v.published), None)
if not version:
raise ValueError("No published version found")
log(f"πŸš€ Calling LLM for session {s.session_id[:8]}...")
log(f"πŸ“‹ Prompt preview (first 200 chars): {prompt[:200]}...")
# Call the configured LLM provider
raw = await llm_provider.generate(
project_name=s.project_name,
user_input=user_msg,
system_prompt=prompt,
context=s.chat_history[-10:],
version_config=version
)
log(f"πŸͺ„ LLM raw response: {raw[:120]!r}")
return raw
except httpx.TimeoutException:
log(f"⏱️ LLM timeout for session {s.session_id[:8]}")
raise
except Exception as e:
log(f"❌ LLM error: {e}")
raise
# ───────────────────────── ALLOWED INTENTS ───────────────────────── #
ALLOWED_INTENTS = {"flight-booking", "flight-info", "booking-cancel"}
# ───────────────────────── FASTAPI ───────────────────────── #
router = APIRouter()
@router.get("/")
def health():
return {"status": "ok", "sessions": len(session_store._sessions)}
class StartRequest(BaseModel):
project_name: str
class ChatRequest(BaseModel):
user_input: str
class ChatResponse(BaseModel):
session_id: str
answer: str
@router.post("/start_session", response_model=ChatResponse)
async def start_session(req: StartRequest):
"""Create new session"""
try:
# Validate project exists
project = next((p for p in cfg.projects if p.name == req.project_name and p.enabled), None)
if not project:
raise HTTPException(404, f"Project '{req.project_name}' not found or disabled")
# Find published version
version = next((v for v in project.versions if v.published), None)
if not version:
raise HTTPException(404, f"No published version for project '{req.project_name}'")
# Create session with version config
session = session_store.create_session(req.project_name, version)
greeting = "Hoş geldiniz! Size nasıl yardımcı olabilirim?"
session.add_turn("assistant", greeting)
return ChatResponse(session_id=session.session_id, answer=greeting)
except Exception as e:
log(f"❌ Error creating session: {e}")
raise HTTPException(500, str(e))
@router.post("/chat", response_model=ChatResponse)
async def chat(body: ChatRequest, x_session_id: str = Header(...)):
"""Process chat message"""
try:
# Get session
session = session_store.get_session(x_session_id)
if not session:
raise HTTPException(404, "Session not found")
user_input = body.user_input.strip()
if not user_input:
raise HTTPException(400, "Empty message")
log(f"πŸ’¬ User input: {user_input}")
log(f"πŸ“Š Session state: {session.state}, last_intent: {session.last_intent}")
log(f"πŸ“Š Session version: {session.version_number}")
session.add_turn("user", user_input)
# Get version config from session
version = session.get_version_config()
if not version:
raise HTTPException(500, "Version configuration lost")
# Handle based on state
if session.state == "await_param":
log(f"πŸ”„ Handling parameter followup for missing: {session.awaiting_parameters}")
answer = await _handle_parameter_followup(session, user_input)
else:
log("πŸ†• Handling new message")
answer = await _handle_new_message(session, user_input)
session.add_turn("assistant", answer)
return ChatResponse(session_id=session.session_id, answer=answer)
except HTTPException:
raise
except Exception as e:
log(f"❌ Chat error: {e}")
session.reset_flow()
error_msg = "Bir hata oluştu. Lütfen tekrar deneyin."
session.add_turn("assistant", error_msg)
return ChatResponse(session_id=x_session_id, answer=error_msg)
# ───────────────────────── MESSAGE HANDLERS ───────────────────────── #
async def _handle_new_message(session: Session, user_input: str) -> str:
"""Handle new message (not parameter followup)"""
# Get version config from session
version = session.get_version_config()
if not version:
log("❌ Version config not found")
return "Bir hata oluştu. Lütfen tekrar deneyin."
# Build intent detection prompt
prompt = build_intent_prompt(
version.general_prompt,
session.chat_history,
user_input,
version.intents
)
# Get LLM response
raw = await spark_generate(session, prompt, user_input)
# Empty response fallback
if not raw:
log("⚠️ Empty response from LLM")
return "Üzgünüm, mesajınızı anlayamadım. Lütfen tekrar dener misiniz?"
# Check for intent
if not raw.startswith("#DETECTED_INTENT"):
# Small talk response
log("πŸ’¬ No intent detected, returning small talk")
return _trim_response(raw)
# Parse intent
intent_name, tail = _safe_intent_parse(raw)
# Validate intent
if intent_name not in ALLOWED_INTENTS:
log(f"⚠️ Invalid intent: {intent_name}")
return _trim_response(tail) if tail else "Size nasΔ±l yardΔ±mcΔ± olabilirim?"
# Short message guard (less than 3 words usually means incomplete request)
if len(user_input.split()) < 3 and intent_name != "flight-info":
log(f"⚠️ Message too short ({len(user_input.split())} words) for intent {intent_name}")
return _trim_response(tail) if tail else "LΓΌtfen talebinizi biraz daha detaylandΔ±rΔ±r mΔ±sΔ±nΔ±z?"
# Find intent config
intent_config = next((i for i in version.intents if i.name == intent_name), None)
if not intent_config:
log(f"❌ Intent config not found for: {intent_name}")
return "Üzgünüm, bu işlemi gerçekleştiremiyorum."
# Set intent in session
session.last_intent = intent_name
log(f"βœ… Intent set: {intent_name}")
# Log intent parameters
log(f"πŸ“‹ Intent parameters: {[p.name for p in intent_config.parameters]}")
# Extract parameters
return await _extract_parameters(session, intent_config, user_input)
async def _handle_parameter_followup(session: Session, user_input: str) -> str:
"""Handle parameter collection followup"""
if not session.last_intent:
log("⚠️ No last intent in session")
session.reset_flow()
return "Üzgünüm, hangi işlem için bilgi istediğimi unuttum. Baştan başlayalım."
# Get version config from session
version = session.get_version_config()
if not version:
log("❌ Version config not found")
session.reset_flow()
return "Bir hata oluştu. Lütfen tekrar deneyin."
# Get intent config
intent_config = next((i for i in version.intents if i.name == session.last_intent), None)
if not intent_config:
log(f"❌ Intent config not found for: {session.last_intent}")
session.reset_flow()
return "Bir hata oluştu. Lütfen tekrar deneyin."
# Smart parameter collection
if cfg.global_config.parameter_collection_config.smart_grouping:
return await _handle_smart_parameter_collection(session, intent_config, user_input)
else:
return await _handle_simple_parameter_collection(session, intent_config, user_input)
async def _handle_simple_parameter_collection(session: Session, intent_config, user_input: str) -> str:
"""Original simple parameter collection logic"""
# Try to extract missing parameters
missing = session.awaiting_parameters
log(f"πŸ” Trying to extract missing params: {missing}")
prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history, intent_config.locale)
raw = await spark_generate(session, prompt, user_input)
# Try parsing with or without #PARAMETERS: prefix
success = _process_parameters(session, intent_config, raw)
if not success:
# Increment miss count
session.missing_ask_count += 1
log(f"⚠️ No parameters extracted, miss count: {session.missing_ask_count}")
if session.missing_ask_count >= 3:
session.reset_flow()
return "Üzgünüm, istediğiniz bilgileri anlayamadım. Başka bir konuda yardımcı olabilir miyim?"
return "Üzgünüm, anlayamadım. Lütfen tekrar sâyler misiniz?"
# Check if we have all required parameters
missing = _get_missing_parameters(session, intent_config)
log(f"πŸ“Š Still missing params: {missing}")
if missing:
session.awaiting_parameters = missing
param = next(p for p in intent_config.parameters if p.name == missing[0])
return f"{param.caption} bilgisini alabilir miyim?"
# All parameters collected, call API
log("βœ… All parameters collected, calling API")
session.state = "call_api"
return await _execute_api_call(session, intent_config)
async def _handle_smart_parameter_collection(session: Session, intent_config, user_input: str) -> str:
"""Smart parameter collection with grouping and retry logic"""
# Try to extract missing parameters
missing = session.awaiting_parameters
log(f"πŸ” Trying to extract missing params: {missing}")
prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history, intent_config.locale)
raw = await spark_generate(session, prompt, user_input)
# Try parsing with or without #PARAMETERS: prefix
success = _process_parameters(session, intent_config, raw)
# Hangi parametreler hala eksik?
still_missing = _get_missing_parameters(session, intent_config)
# Sorulan ama cevaplanmayan parametreleri belirle
asked_but_not_answered = []
for param in session.awaiting_parameters:
if param in still_missing:
asked_but_not_answered.append(param)
# CevaplanmayanlarΔ± session'a kaydet
if asked_but_not_answered:
session.mark_parameters_unanswered(asked_but_not_answered)
log(f"❓ Parameters not answered: {asked_but_not_answered}")
# Cevaplananları işaretle
for param in session.awaiting_parameters:
if param not in still_missing:
session.mark_parameter_answered(param)
log(f"βœ… Parameter answered: {param}")
if still_missing:
# Maksimum deneme kontrolΓΌ
if session.missing_ask_count >= 3:
session.reset_flow()
return "Üzgünüm, istediğiniz bilgileri anlayamadım. Başka bir konuda yardımcı olabilir miyim?"
# Smart parameter question oluştur
return await _generate_smart_parameter_question(session, intent_config, still_missing)
# TΓΌm parametreler toplandΔ±
log("βœ… All parameters collected, calling API")
session.state = "call_api"
return await _execute_api_call(session, intent_config)
async def _generate_smart_parameter_question(session: Session, intent_config, missing_params: List[str]) -> str:
"""Generate smart parameter collection question"""
# Kaç parametre soracağımızı belirle
max_params = cfg.global_config.parameter_collection_config.max_params_per_question
# Γ–ncelik sΔ±rasΔ±na gΓΆre parametreleri seΓ§
params_to_ask = []
# Γ–nce daha ΓΆnce sorulmamış parametreler
for param in missing_params:
if session.get_parameter_ask_count(param) == 0:
params_to_ask.append(param)
if len(params_to_ask) >= max_params:
break
# Hala yer varsa, daha ânce sorulmuş ama cevaplanmamış parametreler
if len(params_to_ask) < max_params and cfg.global_config.parameter_collection_config.retry_unanswered:
for param in session.unanswered_parameters:
if param in missing_params and param not in params_to_ask:
params_to_ask.append(param)
if len(params_to_ask) >= max_params:
break
# Hala yer varsa, kalan parametreler
if len(params_to_ask) < max_params:
for param in missing_params:
if param not in params_to_ask:
params_to_ask.append(param)
if len(params_to_ask) >= max_params:
break
# Parametreleri session'a kaydet
session.record_parameter_question(params_to_ask)
session.awaiting_parameters = params_to_ask
session.missing_ask_count += 1
# Build smart question prompt
collected_params = {
p.name: session.variables.get(p.variable_name, "")
for p in intent_config.parameters
if p.variable_name in session.variables
}
question_prompt = build_smart_parameter_question_prompt(
intent_config,
params_to_ask,
session.chat_history,
collected_params,
session.unanswered_parameters,
cfg.global_config.parameter_collection_config.collection_prompt
)
# Generate natural question
question = await spark_generate(session, question_prompt, "")
# Clean up the response
question = _trim_response(question)
log(f"πŸ€– Generated smart question for {params_to_ask}: {question}")
return question
# ───────────────────────── PARAMETER HANDLING ───────────────────────── #
async def _extract_parameters(session: Session, intent_config, user_input: str) -> str:
"""Extract parameters from user input"""
missing = _get_missing_parameters(session, intent_config)
log(f"πŸ” Missing parameters: {missing}")
if not missing:
# All parameters already available
log("βœ… All parameters already available")
return await _execute_api_call(session, intent_config)
# Build parameter extraction prompt
prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history)
raw = await spark_generate(session, prompt, user_input)
# Try processing with flexible parsing
success = _process_parameters(session, intent_config, raw)
if success:
missing = _get_missing_parameters(session, intent_config)
log(f"πŸ“Š After extraction, missing: {missing}")
else:
log("⚠️ Failed to extract parameters from response")
if missing:
# Smart parameter collection
if cfg.global_config.parameter_collection_config.smart_grouping:
# Reset parameter tracking for new intent
session.reset_parameter_tracking()
return await _generate_smart_parameter_question(session, intent_config, missing)
else:
# Simple parameter collection
session.state = "await_param"
session.awaiting_parameters = missing
session.missing_ask_count = 0
param = next(p for p in intent_config.parameters if p.name == missing[0])
log(f"❓ Asking for parameter: {param.name} ({param.caption})")
return f"{param.caption} bilgisini alabilir miyim?"
# All parameters collected
log("βœ… All parameters collected after extraction")
return await _execute_api_call(session, intent_config)
def _get_missing_parameters(session: Session, intent_config) -> List[str]:
"""Get list of missing required parameters"""
missing = [
p.name for p in intent_config.parameters
if p.required and p.variable_name not in session.variables
]
log(f"πŸ“Š Session variables: {list(session.variables.keys())}")
return missing
def _process_parameters(session: Session, intent_config, raw: str) -> bool:
"""Process parameter extraction response with flexible parsing"""
try:
# Try to parse JSON, handling both with and without #PARAMETERS: prefix
json_str = raw
if raw.startswith("#PARAMETERS:"):
json_str = raw[len("#PARAMETERS:"):]
log(f"πŸ” Found #PARAMETERS: prefix, removing it")
# Clean up any trailing content after JSON
# Find the closing brace for the JSON object
brace_count = 0
json_end = -1
in_string = False
escape_next = False
for i, char in enumerate(json_str):
if escape_next:
escape_next = False
continue
if char == '\\':
escape_next = True
continue
if char == '"' and not escape_next:
in_string = not in_string
continue
if not in_string:
if char == '{':
brace_count += 1
elif char == '}':
brace_count -= 1
if brace_count == 0:
json_end = i + 1
break
if json_end > 0:
json_str = json_str[:json_end]
log(f"πŸ” Cleaned JSON string: {json_str[:200]}")
data = json.loads(json_str)
extracted = data.get("extracted", [])
log(f"πŸ“¦ Extracted data: {extracted}")
any_valid = False
for param_data in extracted:
param_name = param_data.get("name")
param_value = param_data.get("value")
if not param_name or not param_value:
log(f"⚠️ Invalid param data: {param_data}")
continue
# Find parameter config
param_config = next(
(p for p in intent_config.parameters if p.name == param_name),
None
)
if not param_config:
log(f"⚠️ Parameter config not found for: {param_name}")
continue
# Validate parameter
if validate(str(param_value), param_config):
session.variables[param_config.variable_name] = str(param_value)
any_valid = True
log(f"βœ… Extracted {param_name}={param_value} β†’ {param_config.variable_name}")
else:
log(f"❌ Invalid {param_name}={param_value}")
return any_valid
except json.JSONDecodeError as e:
log(f"❌ JSON parsing error: {e}")
log(f"❌ Failed to parse: {raw[:200]}")
return False
except Exception as e:
log(f"❌ Parameter processing error: {e}")
return False
# ───────────────────────── API EXECUTION ───────────────────────── #
async def _execute_api_call(session: Session, intent_config) -> str:
"""Execute API call and return humanized response"""
try:
session.state = "call_api"
api_name = intent_config.action
api_config = cfg.get_api(api_name)
if not api_config:
log(f"❌ API config not found: {api_name}")
session.reset_flow()
return intent_config.fallback_error_prompt or "İşlem başarısız oldu."
log(f"πŸ“‘ Calling API: {api_name}")
log(f"πŸ“¦ API variables: {session.variables}")
# Execute API call with session
response = execute_api(api_config, session)
api_json = response.json()
log(f"βœ… API response: {api_json}")
# Humanize response
session.state = "humanize"
if api_config.response_prompt:
prompt = api_config.response_prompt.replace(
"{{api_response}}",
json.dumps(api_json, ensure_ascii=False)
)
human_response = await spark_generate(session, prompt, json.dumps(api_json))
session.reset_flow()
return human_response if human_response else f"İşlem sonucu: {api_json}"
else:
session.reset_flow()
return f"İşlem tamamlandı: {api_json}"
except requests.exceptions.Timeout:
log(f"⏱️ API timeout: {api_name}")
session.reset_flow()
return intent_config.fallback_timeout_prompt or "İşlem zaman aşımına uğradı."
except Exception as e:
log(f"❌ API call error: {e}")
session.reset_flow()
return intent_config.fallback_error_prompt or "İşlem sırasında bir hata oluştu."
# Initialize LLM on module load
setup_llm_provider()