ciyidogan commited on
Commit
341a67b
Β·
verified Β·
1 Parent(s): 23c8ffc

Update chat_handler.py

Browse files
Files changed (1) hide show
  1. chat_handler.py +226 -267
chat_handler.py CHANGED
@@ -18,9 +18,20 @@ from validation_engine import validate
18
  from session import session_store, Session
19
  from llm_interface import LLMInterface, SparkLLM, GPT4oLLM
20
  from config_provider import ConfigProvider
21
- from locale_manager import LocaleManager
22
 
23
- # ───────────────────────── GLOBAL ───────────────────────── #
 
 
 
 
 
 
 
 
 
 
 
 
24
  # Global LLM instance
25
  llm_provider: Optional[LLMInterface] = None
26
 
@@ -52,83 +63,93 @@ def _safe_intent_parse(raw: str) -> tuple[str, str]:
52
  tail = raw[m.end():]
53
  log(f"🎯 Parsed intent: {name}")
54
  return name, tail
55
-
56
- # ───────────────────────── SPARK ───────────────────────── #
57
- def initialize_llm(force_reload=False):
58
- """Initialize LLM provider based on work_mode"""
59
  global llm_provider
60
 
61
- cfg = ConfigProvider.get()
62
-
63
- work_mode = cfg.global_config.work_mode
64
-
65
- if cfg.global_config.is_gpt_mode():
66
- # GPT mode
67
- api_key = cfg.global_config.get_plain_token()
 
 
 
 
 
 
 
 
 
 
 
 
68
  if not api_key:
69
- raise ValueError("OpenAI API key not configured")
 
 
70
 
71
- model = cfg.global_config.get_gpt_model()
72
  llm_provider = GPT4oLLM(api_key, model)
73
- log(f"βœ… Initialized {model} provider")
74
  else:
75
- # Spark mode
76
- spark_token = _get_spark_token()
77
- if not spark_token:
78
- raise ValueError("Spark token not configured")
79
-
80
- spark_endpoint = str(cfg.global_config.spark_endpoint)
81
- llm_provider = SparkLLM(spark_endpoint, spark_token)
82
- log("βœ… Initialized Spark provider")
83
-
84
- # ───────────────────────── SPARK ───────────────────────── #
85
- def _get_spark_token() -> Optional[str]:
86
- """Get Spark token based on work_mode"""
87
- cfg = ConfigProvider.get()
88
-
89
- work_mode = cfg.global_config.work_mode
90
-
91
- if cfg.global_config.is_cloud_mode():
92
- token = os.getenv("SPARK_TOKEN")
93
- if not token and not cfg.global_config.is_gpt_mode():
94
- # GPT modlarΔ± iΓ§in SPARK_TOKEN gerekmez
95
- log("❌ SPARK_TOKEN not found in cloud Secrets!")
96
- return token
97
- else:
98
- # On-premise mode - use .env file
99
- from dotenv import load_dotenv
100
- load_dotenv()
101
- return os.getenv("SPARK_TOKEN")
102
 
 
103
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
104
- """Call LLM provider with proper error handling"""
 
 
 
 
 
105
  try:
106
- # Always reinitialize to get fresh config
107
- initialize_llm(force_reload=True)
108
-
109
- if not llm_provider:
110
- raise ValueError("Failed to initialize LLM provider")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
- # Use the abstract interface
113
- raw = await llm_provider.generate(prompt, user_msg, s.chat_history)
114
  log(f"πŸͺ„ LLM raw response: {raw[:120]!r}")
115
  return raw
116
-
 
 
 
117
  except Exception as e:
118
  log(f"❌ LLM error: {e}")
119
  raise
120
 
 
 
 
121
  # ───────────────────────── FASTAPI ───────────────────────── #
122
  router = APIRouter()
123
 
124
- @router.get("/health")
125
- def health_check():
126
- """Health check endpoint for monitoring"""
127
- return {
128
- "status": "ok",
129
- "sessions": len(session_store._sessions),
130
- "timestamp": datetime.now().isoformat()
131
- }
132
 
133
  class StartRequest(BaseModel):
134
  project_name: str
@@ -144,19 +165,17 @@ class ChatResponse(BaseModel):
144
  async def start_session(req: StartRequest):
145
  """Create new session"""
146
  try:
147
- cfg = ConfigProvider.get()
148
-
149
  # Validate project exists
150
  project = next((p for p in cfg.projects if p.name == req.project_name and p.enabled), None)
151
  if not project:
152
  raise HTTPException(404, f"Project '{req.project_name}' not found or disabled")
153
-
154
- # Get published version
155
  version = next((v for v in project.versions if v.published), None)
156
  if not version:
157
- raise HTTPException(404, f"No published version found for project '{req.project_name}'")
158
-
159
- # Create session
160
  session = session_store.create_session(req.project_name, version)
161
  greeting = "Hoş geldiniz! Size nasΔ±l yardΔ±mcΔ± olabilirim?"
162
  session.add_turn("assistant", greeting)
@@ -194,10 +213,10 @@ async def chat(body: ChatRequest, x_session_id: str = Header(...)):
194
  # Handle based on state
195
  if session.state == "await_param":
196
  log(f"πŸ”„ Handling parameter followup for missing: {session.awaiting_parameters}")
197
- answer = await _handle_parameter_followup(session, user_input) # version parametresi kaldΔ±rΔ±ldΔ±
198
  else:
199
  log("πŸ†• Handling new message")
200
- answer = await _handle_new_message(session, user_input) # version parametresi kaldΔ±rΔ±ldΔ±
201
 
202
  session.add_turn("assistant", answer)
203
  return ChatResponse(session_id=session.session_id, answer=answer)
@@ -226,8 +245,7 @@ async def _handle_new_message(session: Session, user_input: str) -> str:
226
  version.general_prompt,
227
  session.chat_history,
228
  user_input,
229
- version.intents,
230
- session.project_name
231
  )
232
 
233
  # Get LLM response
@@ -247,10 +265,9 @@ async def _handle_new_message(session: Session, user_input: str) -> str:
247
  # Parse intent
248
  intent_name, tail = _safe_intent_parse(raw)
249
 
250
- # Validate intent against version's intents
251
- valid_intents = {intent.name for intent in version.intents}
252
- if intent_name not in valid_intents:
253
- log(f"⚠️ Invalid intent: {intent_name} (valid: {valid_intents})")
254
  return _trim_response(tail) if tail else "Size nasΔ±l yardΔ±mcΔ± olabilirim?"
255
 
256
  # Short message guard (less than 3 words usually means incomplete request)
@@ -274,8 +291,8 @@ async def _handle_new_message(session: Session, user_input: str) -> str:
274
  # Extract parameters
275
  return await _extract_parameters(session, intent_config, user_input)
276
 
277
- async def _handle_parameter_followup(session: Session, user_input: str, version) -> str:
278
- """Handle parameter collection followup with smart question generation"""
279
  if not session.last_intent:
280
  log("⚠️ No last intent in session")
281
  session.reset_flow()
@@ -295,6 +312,51 @@ async def _handle_parameter_followup(session: Session, user_input: str, version)
295
  session.reset_flow()
296
  return "Bir hata oluştu. LΓΌtfen tekrar deneyin."
297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  # Try to extract missing parameters
299
  missing = session.awaiting_parameters
300
  log(f"πŸ” Trying to extract missing params: {missing}")
@@ -331,40 +393,80 @@ async def _handle_parameter_followup(session: Session, user_input: str, version)
331
  session.reset_flow()
332
  return "ÜzgΓΌnΓΌm, istediğiniz bilgileri anlayamadΔ±m. Başka bir konuda yardΔ±mcΔ± olabilir miyim?"
333
 
334
- session.missing_ask_count += 1
335
-
336
- # AkΔ±llΔ± soru ΓΌret
337
- question = await _generate_smart_parameter_question(
338
- session, intent_config, still_missing, version
339
- )
340
-
341
- # Sorulan parametreleri tahmin et ve kaydet
342
- params_in_question = extract_params_from_question(question, still_missing, intent_config)
343
- session.record_parameter_question(params_in_question)
344
- session.awaiting_parameters = params_in_question
345
-
346
- log(f"πŸ€– Smart question generated for params: {params_in_question}")
347
- return question
348
 
349
- # All parameters collected, call API
350
  log("βœ… All parameters collected, calling API")
351
  session.state = "call_api"
352
  return await _execute_api_call(session, intent_config)
353
 
354
- # ───────────────────────── PARAMETER HANDLING ───────────────────────── #
355
- async def _extract_parameters(session: Session, intent_config, user_input: str) -> str:
356
- """Extract parameters from user input with smart question generation"""
357
-
358
- # Version config'i al
359
- version = session.get_version_config()
360
- if not version:
361
- log("❌ Version config not found")
362
- return "Bir hata oluştu. Lütfen tekrar deneyin."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
 
364
- # Yeni intent iΓ§in parametre takibini sΔ±fΔ±rla
365
- if session.parameter_ask_rounds == 0:
366
- session.reset_parameter_tracking()
367
 
 
 
 
 
 
 
 
 
 
 
368
  missing = _get_missing_parameters(session, intent_config)
369
  log(f"πŸ” Missing parameters: {missing}")
370
 
@@ -374,7 +476,7 @@ async def _extract_parameters(session: Session, intent_config, user_input: str)
374
  return await _execute_api_call(session, intent_config)
375
 
376
  # Build parameter extraction prompt
377
- prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history, intent_config.locale)
378
  raw = await spark_generate(session, prompt, user_input)
379
 
380
  # Try processing with flexible parsing
@@ -387,133 +489,24 @@ async def _extract_parameters(session: Session, intent_config, user_input: str)
387
  log("⚠️ Failed to extract parameters from response")
388
 
389
  if missing:
390
- # Still missing parameters - generate smart question
391
- session.state = "await_param"
392
- session.missing_ask_count = 0
393
-
394
- # AkΔ±llΔ± soru ΓΌret
395
- question = await _generate_smart_parameter_question(
396
- session, intent_config, missing, version
397
- )
398
-
399
- # Sorulan parametreleri tahmin et ve kaydet
400
- params_in_question = extract_params_from_question(question, missing, intent_config)
401
- session.record_parameter_question(params_in_question)
402
- session.awaiting_parameters = params_in_question
403
-
404
- log(f"πŸ€– Smart question generated for initial params: {params_in_question}")
405
- return question
406
 
407
  # All parameters collected
408
  log("βœ… All parameters collected after extraction")
409
  return await _execute_api_call(session, intent_config)
410
 
411
- async def _generate_smart_parameter_question(
412
- session: Session,
413
- intent_config,
414
- missing_params: List[str],
415
- version
416
- ) -> str:
417
- """LLM kullanarak doğal parametre sorusu üret"""
418
- cfg = ConfigProvider.get()
419
-
420
- # Config'i al
421
- collection_config = cfg.global_config.parameter_collection_config
422
-
423
- # Γ–ncelik sΔ±ralamasΔ±: ΓΆnce cevaplanmayanlar
424
- prioritized_params = []
425
-
426
- # 1. Daha ΓΆnce sorulup cevaplanmayanlar
427
- for param in session.unanswered_parameters:
428
- if param in missing_params:
429
- prioritized_params.append(param)
430
- log(f"πŸ” Priority param (unanswered): {param}")
431
-
432
- # 2. Hiç sorulmamışlar
433
- for param in missing_params:
434
- if param not in prioritized_params:
435
- prioritized_params.append(param)
436
- log(f"βž• Additional param (not asked): {param}")
437
-
438
- # Maksimum parametre sayΔ±sΔ±nΔ± belirle
439
- max_params = min(
440
- len(prioritized_params),
441
- collection_config.max_params_per_question
442
- )
443
- params_to_ask = prioritized_params[:max_params]
444
-
445
- log(f"πŸ“‹ Params to ask in this round: {params_to_ask}")
446
-
447
- # Proje dilini belirle
448
- project = next((p for p in cfg.projects if p.name == session.project_name), None)
449
- if project and hasattr(project, 'default_language'):
450
- # default_language locale code'dur (tr-TR, en-US vb.)
451
- project_locale_code = project.default_language
452
- # Locale'den dil adΔ±nΔ± al
453
- locale_data = LocaleManager.get_locale(project_locale_code)
454
- project_language = locale_data.get('name', 'Turkish')
455
- else:
456
- project_locale_code = 'tr-TR'
457
- project_language = 'Turkish'
458
-
459
- log(f"πŸ“‹ Project locale: {project_locale_code}, language: {project_language}")
460
-
461
- # Prompt oluştur
462
- prompt = build_smart_parameter_question_prompt(
463
- collection_config,
464
- intent_config,
465
- params_to_ask, # Sadece bu turda sorulacak parametreler
466
- session,
467
- project_language,
468
- intent_config.locale # Locale code'u da gΓΆnder
469
- )
470
-
471
- # LLM'den soru ΓΌret
472
- response = await spark_generate(session, prompt, "")
473
-
474
- # Güvenlik: Eğer LLM boş veya hatalı response verirse fallback
475
- if not response or len(response.strip()) < 10:
476
- log("⚠️ Empty or invalid response from LLM, using fallback")
477
- # En yΓΌksek ΓΆncelikli parametre iΓ§in fallback soru
478
- param = params_to_ask[0]
479
- param_config = next(p for p in intent_config.parameters if p.name == param)
480
-
481
- # Intent'in locale'ini kullan
482
- intent_locale_code = getattr(intent_config, 'locale', project_locale_code)
483
- locale_data = LocaleManager.get_locale(intent_locale_code)
484
-
485
- # Parametrenin kaç kez sorulduğuna gâre farklı fallback
486
- ask_count = session.get_parameter_ask_count(param)
487
- fallback_questions = locale_data.get('parameter_collection', {}).get('fallback_questions', {})
488
-
489
- # Caption'Δ± kΓΌΓ§ΓΌk harfe Γ§evir (TΓΌrkΓ§e iΓ§in ΓΆzel)
490
- caption = param_config.caption
491
- if intent_locale_code.startswith('tr'):
492
- caption_lower = caption.lower()
493
- else:
494
- caption_lower = caption
495
-
496
- if ask_count == 0:
497
- template = fallback_questions.get('first_ask', '{caption} bilgisini alabilir miyim?')
498
- return template.replace('{caption}', caption)
499
- elif ask_count == 1:
500
- template = fallback_questions.get('second_ask', 'Lütfen {caption} bilgisini paylaşır mısınız?')
501
- return template.replace('{caption}', caption_lower)
502
- else:
503
- template = fallback_questions.get('third_ask', 'Devam edebilmem iΓ§in {caption} bilgisine ihtiyacΔ±m var.')
504
- return template.replace('{caption}', caption_lower)
505
-
506
- # Response'u temizle
507
- clean_response = response.strip()
508
-
509
- # Eğer response yanlışlıkla başka şeyler içeriyorsa temizle
510
- if "#" in clean_response or "assistant:" in clean_response.lower():
511
- # Δ°lk satΔ±rΔ± al
512
- clean_response = clean_response.split('\n')[0].strip()
513
-
514
- log(f"πŸ’¬ Generated smart question: {clean_response[:100]}...")
515
- return clean_response
516
-
517
  def _get_missing_parameters(session: Session, intent_config) -> List[str]:
518
  """Get list of missing required parameters"""
519
  missing = [
@@ -588,16 +581,6 @@ def _process_parameters(session: Session, intent_config, raw: str) -> bool:
588
  if not param_config:
589
  log(f"⚠️ Parameter config not found for: {param_name}")
590
  continue
591
-
592
- # Date tipi iΓ§in ΓΆzel kontrol
593
- if param_config.type == "date":
594
- try:
595
- # ISO format kontrolΓΌ
596
- from datetime import datetime
597
- datetime.strptime(str(param_value), "%Y-%m-%d")
598
- except ValueError:
599
- log(f"❌ Invalid date format for {param_name}: {param_value}")
600
- continue
601
 
602
  # Validate parameter
603
  if validate(str(param_value), param_config):
@@ -612,42 +595,17 @@ def _process_parameters(session: Session, intent_config, raw: str) -> bool:
612
  except json.JSONDecodeError as e:
613
  log(f"❌ JSON parsing error: {e}")
614
  log(f"❌ Failed to parse: {raw[:200]}")
615
-
616
- # Fallback: Try to extract simple values from user input
617
- # This is especially useful for single parameter responses
618
- if session.state == "await_param" and len(session.awaiting_parameters) > 0:
619
- # Get the first missing parameter
620
- first_missing = session.awaiting_parameters[0]
621
- param_config = next(
622
- (p for p in intent_config.parameters if p.name == first_missing),
623
- None
624
- )
625
-
626
- if param_config and session.chat_history:
627
- # Get the last user input
628
- last_user_input = session.chat_history[-1].get("content", "").strip()
629
-
630
- # For simple inputs like city names, try direct assignment
631
- if param_config.type in ["str", "string"] and len(last_user_input.split()) <= 3:
632
- if validate(last_user_input, param_config):
633
- session.variables[param_config.variable_name] = last_user_input
634
- log(f"βœ… Fallback extraction: {first_missing}={last_user_input}")
635
- return True
636
-
637
  return False
638
  except Exception as e:
639
  log(f"❌ Parameter processing error: {e}")
640
  return False
641
-
642
  # ───────────────────────── API EXECUTION ───────────────────────── #
643
  async def _execute_api_call(session: Session, intent_config) -> str:
644
  """Execute API call and return humanized response"""
645
  try:
646
  session.state = "call_api"
647
  api_name = intent_config.action
648
-
649
- cfg = ConfigProvider.get()
650
-
651
  api_config = cfg.get_api(api_name)
652
 
653
  if not api_config:
@@ -671,10 +629,8 @@ async def _execute_api_call(session: Session, intent_config) -> str:
671
  json.dumps(api_json, ensure_ascii=False)
672
  )
673
  human_response = await spark_generate(session, prompt, json.dumps(api_json))
674
- # Trim response to remove any trailing "assistant" artifacts
675
- trimmed_response = _trim_response(human_response)
676
  session.reset_flow()
677
- return trimmed_response if trimmed_response else f"İşlem sonucu: {api_json}"
678
  else:
679
  session.reset_flow()
680
  return f"İşlem tamamlandΔ±: {api_json}"
@@ -686,4 +642,7 @@ async def _execute_api_call(session: Session, intent_config) -> str:
686
  except Exception as e:
687
  log(f"❌ API call error: {e}")
688
  session.reset_flow()
689
- return intent_config.fallback_error_prompt or "İşlem sırasında bir hata oluştu."
 
 
 
 
18
  from session import session_store, Session
19
  from llm_interface import LLMInterface, SparkLLM, GPT4oLLM
20
  from config_provider import ConfigProvider
 
21
 
22
+ # ───────────────────────── CONFIG ───────────────────────── #
23
+ # Global config reference
24
+ cfg = None
25
+
26
+ def get_config():
27
+ """Always get fresh config"""
28
+ global cfg
29
+ cfg = ConfigProvider.get()
30
+ return cfg
31
+
32
+ # Initialize on module load
33
+ cfg = get_config()
34
+
35
  # Global LLM instance
36
  llm_provider: Optional[LLMInterface] = None
37
 
 
63
  tail = raw[m.end():]
64
  log(f"🎯 Parsed intent: {name}")
65
  return name, tail
66
+
67
+ # ───────────────────────── LLM SETUP ───────────────────────── #
68
+ def setup_llm_provider():
69
+ """Initialize LLM provider based on internal_prompt config"""
70
  global llm_provider
71
 
72
+ cfg = ConfigProvider.get() # Her zaman gΓΌncel config'i al
73
+ internal_prompt = cfg.global_config.internal_prompt
74
+ if not internal_prompt:
75
+ log("⚠️ No internal_prompt configured, using default Spark")
76
+ llm_provider = SparkLLM(cfg)
77
+ return
78
+
79
+ # Parse internal prompt format: "provider:model"
80
+ parts = internal_prompt.split(":", 1)
81
+ if len(parts) != 2:
82
+ log(f"⚠️ Invalid internal_prompt format: {internal_prompt}, using Spark")
83
+ llm_provider = SparkLLM(cfg)
84
+ return
85
+
86
+ provider, model = parts[0].lower(), parts[1]
87
+
88
+ if provider == "openai":
89
+ # Get API key from environment
90
+ api_key = os.getenv("OPENAI_API_KEY")
91
  if not api_key:
92
+ log("❌ OPENAI_API_KEY not found in environment")
93
+ llm_provider = SparkLLM(cfg)
94
+ return
95
 
96
+ log(f"πŸ€– Using OpenAI with model: {model}")
97
  llm_provider = GPT4oLLM(api_key, model)
 
98
  else:
99
+ log(f"⚠️ Unknown provider: {provider}, using Spark")
100
+ llm_provider = SparkLLM(cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ # ───────────────────────── SPARK/LLM CALL ───────────────────────── #
103
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
104
+ """Call LLM (Spark or configured provider) with proper error handling"""
105
+ global llm_provider
106
+
107
+ if llm_provider is None:
108
+ setup_llm_provider()
109
+
110
  try:
111
+ # Get version config from session
112
+ version = s.get_version_config()
113
+ if not version:
114
+ # Fallback: get from project config
115
+ project = next((p for p in cfg.projects if p.name == s.project_name), None)
116
+ if not project:
117
+ raise ValueError(f"Project not found: {s.project_name}")
118
+ version = next((v for v in project.versions if v.published), None)
119
+ if not version:
120
+ raise ValueError("No published version found")
121
+
122
+ log(f"πŸš€ Calling LLM for session {s.session_id[:8]}...")
123
+ log(f"πŸ“‹ Prompt preview (first 200 chars): {prompt[:200]}...")
124
+
125
+ # Call the configured LLM provider
126
+ raw = await llm_provider.generate(
127
+ project_name=s.project_name,
128
+ user_input=user_msg,
129
+ system_prompt=prompt,
130
+ context=s.chat_history[-10:],
131
+ version_config=version
132
+ )
133
 
 
 
134
  log(f"πŸͺ„ LLM raw response: {raw[:120]!r}")
135
  return raw
136
+
137
+ except httpx.TimeoutException:
138
+ log(f"⏱️ LLM timeout for session {s.session_id[:8]}")
139
+ raise
140
  except Exception as e:
141
  log(f"❌ LLM error: {e}")
142
  raise
143
 
144
+ # ───────────────────────── ALLOWED INTENTS ───────────────────────── #
145
+ ALLOWED_INTENTS = {"flight-booking", "flight-info", "booking-cancel"}
146
+
147
  # ───────────────────────── FASTAPI ───────────────────────── #
148
  router = APIRouter()
149
 
150
+ @router.get("/")
151
+ def health():
152
+ return {"status": "ok", "sessions": len(session_store._sessions)}
 
 
 
 
 
153
 
154
  class StartRequest(BaseModel):
155
  project_name: str
 
165
  async def start_session(req: StartRequest):
166
  """Create new session"""
167
  try:
 
 
168
  # Validate project exists
169
  project = next((p for p in cfg.projects if p.name == req.project_name and p.enabled), None)
170
  if not project:
171
  raise HTTPException(404, f"Project '{req.project_name}' not found or disabled")
172
+
173
+ # Find published version
174
  version = next((v for v in project.versions if v.published), None)
175
  if not version:
176
+ raise HTTPException(404, f"No published version for project '{req.project_name}'")
177
+
178
+ # Create session with version config
179
  session = session_store.create_session(req.project_name, version)
180
  greeting = "Hoş geldiniz! Size nasΔ±l yardΔ±mcΔ± olabilirim?"
181
  session.add_turn("assistant", greeting)
 
213
  # Handle based on state
214
  if session.state == "await_param":
215
  log(f"πŸ”„ Handling parameter followup for missing: {session.awaiting_parameters}")
216
+ answer = await _handle_parameter_followup(session, user_input)
217
  else:
218
  log("πŸ†• Handling new message")
219
+ answer = await _handle_new_message(session, user_input)
220
 
221
  session.add_turn("assistant", answer)
222
  return ChatResponse(session_id=session.session_id, answer=answer)
 
245
  version.general_prompt,
246
  session.chat_history,
247
  user_input,
248
+ version.intents
 
249
  )
250
 
251
  # Get LLM response
 
265
  # Parse intent
266
  intent_name, tail = _safe_intent_parse(raw)
267
 
268
+ # Validate intent
269
+ if intent_name not in ALLOWED_INTENTS:
270
+ log(f"⚠️ Invalid intent: {intent_name}")
 
271
  return _trim_response(tail) if tail else "Size nasΔ±l yardΔ±mcΔ± olabilirim?"
272
 
273
  # Short message guard (less than 3 words usually means incomplete request)
 
291
  # Extract parameters
292
  return await _extract_parameters(session, intent_config, user_input)
293
 
294
+ async def _handle_parameter_followup(session: Session, user_input: str) -> str:
295
+ """Handle parameter collection followup"""
296
  if not session.last_intent:
297
  log("⚠️ No last intent in session")
298
  session.reset_flow()
 
312
  session.reset_flow()
313
  return "Bir hata oluştu. LΓΌtfen tekrar deneyin."
314
 
315
+ # Smart parameter collection
316
+ if cfg.global_config.parameter_collection_config.smart_grouping:
317
+ return await _handle_smart_parameter_collection(session, intent_config, user_input)
318
+ else:
319
+ return await _handle_simple_parameter_collection(session, intent_config, user_input)
320
+
321
+ async def _handle_simple_parameter_collection(session: Session, intent_config, user_input: str) -> str:
322
+ """Original simple parameter collection logic"""
323
+ # Try to extract missing parameters
324
+ missing = session.awaiting_parameters
325
+ log(f"πŸ” Trying to extract missing params: {missing}")
326
+
327
+ prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history, intent_config.locale)
328
+ raw = await spark_generate(session, prompt, user_input)
329
+
330
+ # Try parsing with or without #PARAMETERS: prefix
331
+ success = _process_parameters(session, intent_config, raw)
332
+
333
+ if not success:
334
+ # Increment miss count
335
+ session.missing_ask_count += 1
336
+ log(f"⚠️ No parameters extracted, miss count: {session.missing_ask_count}")
337
+
338
+ if session.missing_ask_count >= 3:
339
+ session.reset_flow()
340
+ return "Üzgünüm, istediğiniz bilgileri anlayamadım. Başka bir konuda yardımcı olabilir miyim?"
341
+ return "Üzgünüm, anlayamadım. Lütfen tekrar sâyler misiniz?"
342
+
343
+ # Check if we have all required parameters
344
+ missing = _get_missing_parameters(session, intent_config)
345
+ log(f"πŸ“Š Still missing params: {missing}")
346
+
347
+ if missing:
348
+ session.awaiting_parameters = missing
349
+ param = next(p for p in intent_config.parameters if p.name == missing[0])
350
+ return f"{param.caption} bilgisini alabilir miyim?"
351
+
352
+ # All parameters collected, call API
353
+ log("βœ… All parameters collected, calling API")
354
+ session.state = "call_api"
355
+ return await _execute_api_call(session, intent_config)
356
+
357
+ async def _handle_smart_parameter_collection(session: Session, intent_config, user_input: str) -> str:
358
+ """Smart parameter collection with grouping and retry logic"""
359
+
360
  # Try to extract missing parameters
361
  missing = session.awaiting_parameters
362
  log(f"πŸ” Trying to extract missing params: {missing}")
 
393
  session.reset_flow()
394
  return "ÜzgΓΌnΓΌm, istediğiniz bilgileri anlayamadΔ±m. Başka bir konuda yardΔ±mcΔ± olabilir miyim?"
395
 
396
+ # Smart parameter question oluştur
397
+ return await _generate_smart_parameter_question(session, intent_config, still_missing)
 
 
 
 
 
 
 
 
 
 
 
 
398
 
399
+ # TΓΌm parametreler toplandΔ±
400
  log("βœ… All parameters collected, calling API")
401
  session.state = "call_api"
402
  return await _execute_api_call(session, intent_config)
403
 
404
+ async def _generate_smart_parameter_question(session: Session, intent_config, missing_params: List[str]) -> str:
405
+ """Generate smart parameter collection question"""
406
+
407
+ # Kaç parametre soracağımızı belirle
408
+ max_params = cfg.global_config.parameter_collection_config.max_params_per_question
409
+
410
+ # Γ–ncelik sΔ±rasΔ±na gΓΆre parametreleri seΓ§
411
+ params_to_ask = []
412
+
413
+ # Γ–nce daha ΓΆnce sorulmamış parametreler
414
+ for param in missing_params:
415
+ if session.get_parameter_ask_count(param) == 0:
416
+ params_to_ask.append(param)
417
+ if len(params_to_ask) >= max_params:
418
+ break
419
+
420
+ # Hala yer varsa, daha ânce sorulmuş ama cevaplanmamış parametreler
421
+ if len(params_to_ask) < max_params and cfg.global_config.parameter_collection_config.retry_unanswered:
422
+ for param in session.unanswered_parameters:
423
+ if param in missing_params and param not in params_to_ask:
424
+ params_to_ask.append(param)
425
+ if len(params_to_ask) >= max_params:
426
+ break
427
+
428
+ # Hala yer varsa, kalan parametreler
429
+ if len(params_to_ask) < max_params:
430
+ for param in missing_params:
431
+ if param not in params_to_ask:
432
+ params_to_ask.append(param)
433
+ if len(params_to_ask) >= max_params:
434
+ break
435
+
436
+ # Parametreleri session'a kaydet
437
+ session.record_parameter_question(params_to_ask)
438
+ session.awaiting_parameters = params_to_ask
439
+ session.missing_ask_count += 1
440
+
441
+ # Build smart question prompt
442
+ collected_params = {
443
+ p.name: session.variables.get(p.variable_name, "")
444
+ for p in intent_config.parameters
445
+ if p.variable_name in session.variables
446
+ }
447
+
448
+ question_prompt = build_smart_parameter_question_prompt(
449
+ intent_config,
450
+ params_to_ask,
451
+ session.chat_history,
452
+ collected_params,
453
+ session.unanswered_parameters,
454
+ cfg.global_config.parameter_collection_config.collection_prompt
455
+ )
456
 
457
+ # Generate natural question
458
+ question = await spark_generate(session, question_prompt, "")
 
459
 
460
+ # Clean up the response
461
+ question = _trim_response(question)
462
+
463
+ log(f"πŸ€– Generated smart question for {params_to_ask}: {question}")
464
+
465
+ return question
466
+
467
+ # ───────────────────────── PARAMETER HANDLING ───────────────────────── #
468
+ async def _extract_parameters(session: Session, intent_config, user_input: str) -> str:
469
+ """Extract parameters from user input"""
470
  missing = _get_missing_parameters(session, intent_config)
471
  log(f"πŸ” Missing parameters: {missing}")
472
 
 
476
  return await _execute_api_call(session, intent_config)
477
 
478
  # Build parameter extraction prompt
479
+ prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history)
480
  raw = await spark_generate(session, prompt, user_input)
481
 
482
  # Try processing with flexible parsing
 
489
  log("⚠️ Failed to extract parameters from response")
490
 
491
  if missing:
492
+ # Smart parameter collection
493
+ if cfg.global_config.parameter_collection_config.smart_grouping:
494
+ # Reset parameter tracking for new intent
495
+ session.reset_parameter_tracking()
496
+ return await _generate_smart_parameter_question(session, intent_config, missing)
497
+ else:
498
+ # Simple parameter collection
499
+ session.state = "await_param"
500
+ session.awaiting_parameters = missing
501
+ session.missing_ask_count = 0
502
+ param = next(p for p in intent_config.parameters if p.name == missing[0])
503
+ log(f"❓ Asking for parameter: {param.name} ({param.caption})")
504
+ return f"{param.caption} bilgisini alabilir miyim?"
 
 
 
505
 
506
  # All parameters collected
507
  log("βœ… All parameters collected after extraction")
508
  return await _execute_api_call(session, intent_config)
509
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
  def _get_missing_parameters(session: Session, intent_config) -> List[str]:
511
  """Get list of missing required parameters"""
512
  missing = [
 
581
  if not param_config:
582
  log(f"⚠️ Parameter config not found for: {param_name}")
583
  continue
 
 
 
 
 
 
 
 
 
 
584
 
585
  # Validate parameter
586
  if validate(str(param_value), param_config):
 
595
  except json.JSONDecodeError as e:
596
  log(f"❌ JSON parsing error: {e}")
597
  log(f"❌ Failed to parse: {raw[:200]}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
598
  return False
599
  except Exception as e:
600
  log(f"❌ Parameter processing error: {e}")
601
  return False
602
+
603
  # ───────────────────────── API EXECUTION ───────────────────────── #
604
  async def _execute_api_call(session: Session, intent_config) -> str:
605
  """Execute API call and return humanized response"""
606
  try:
607
  session.state = "call_api"
608
  api_name = intent_config.action
 
 
 
609
  api_config = cfg.get_api(api_name)
610
 
611
  if not api_config:
 
629
  json.dumps(api_json, ensure_ascii=False)
630
  )
631
  human_response = await spark_generate(session, prompt, json.dumps(api_json))
 
 
632
  session.reset_flow()
633
+ return human_response if human_response else f"İşlem sonucu: {api_json}"
634
  else:
635
  session.reset_flow()
636
  return f"İşlem tamamlandΔ±: {api_json}"
 
642
  except Exception as e:
643
  log(f"❌ API call error: {e}")
644
  session.reset_flow()
645
+ return intent_config.fallback_error_prompt or "İşlem sırasında bir hata oluştu."
646
+
647
+ # Initialize LLM on module load
648
+ setup_llm_provider()