File size: 27,426 Bytes
a252004
ddb14f5
c7293c2
dd6bafd
447c15c
8b347b0
c7293c2
14352db
 
01da95c
447c15c
1a54e0b
c7293c2
5fd1071
e6e1d37
1a54e0b
25df053
c7293c2
9fa5636
12f4c32
9fa5636
341a67b
 
 
 
 
 
 
 
 
 
 
 
 
9fa5636
 
f454973
e72db51
 
 
 
 
 
 
 
 
 
 
 
3f2fcc0
7279c20
 
c7293c2
e72db51
 
 
 
 
66e7c4d
 
 
 
e72db51
f454973
e72db51
341a67b
 
 
 
9fa5636
 
8b942e9
341a67b
8b942e9
341a67b
 
8b942e9
 
 
 
 
 
4c88d20
 
8b942e9
4c88d20
 
 
 
 
 
 
 
 
 
341a67b
 
4c88d20
 
 
d4ac652
4c88d20
 
 
 
 
 
9fa5636
4c88d20
 
 
 
 
 
 
 
 
 
d4ac652
4c88d20
8b942e9
 
 
 
4c88d20
 
 
 
 
 
 
 
 
8b942e9
 
4c88d20
8b942e9
 
 
4c88d20
 
 
 
8b942e9
 
 
 
 
 
d4ac652
341a67b
e72db51
341a67b
 
 
 
 
 
c7293c2
341a67b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7293c2
9fa5636
 
341a67b
 
 
 
c7293c2
9fa5636
c7293c2
14352db
341a67b
 
 
e72db51
0040505
cf59f92
341a67b
 
 
11a0f16
c7293c2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341a67b
 
3f9f820
 
341a67b
 
 
3f9f820
c7293c2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f454973
 
9acabb0
f454973
c7293c2
9acabb0
 
 
c7293c2
9acabb0
0a42fdb
c7293c2
 
f454973
341a67b
c7293c2
f454973
341a67b
c7293c2
 
 
 
 
 
 
 
 
 
 
 
 
 
9acabb0
c7293c2
9acabb0
 
 
 
 
 
 
c7293c2
 
 
 
 
341a67b
c7293c2
 
3ad4750
c7293c2
 
 
 
3ad4750
c7293c2
 
 
e72db51
c7293c2
f454973
c7293c2
 
 
 
 
341a67b
 
 
c7293c2
 
 
 
f454973
c7293c2
 
 
 
 
f454973
c7293c2
 
 
 
f454973
 
 
 
c7293c2
 
 
 
341a67b
 
c7293c2
f454973
c7293c2
 
ddc1f1d
 
 
 
 
 
 
c7293c2
 
 
 
f454973
c7293c2
 
 
341a67b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7293c2
 
f454973
 
ddc1f1d
c7293c2
 
ddb14f5
 
 
5fd1071
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7293c2
 
 
5fd1071
341a67b
 
c7293c2
341a67b
f454973
c7293c2
 
01da95c
341a67b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fd1071
341a67b
 
5fd1071
341a67b
 
 
 
 
 
 
 
 
 
c7293c2
f454973
 
c7293c2
 
f454973
c7293c2
 
 
341a67b
c7293c2
 
ddb14f5
 
 
 
 
 
f454973
ddb14f5
c7293c2
14352db
341a67b
 
 
 
 
 
 
 
 
 
 
 
 
25df053
c7293c2
f454973
c7293c2
 
 
 
f454973
c7293c2
 
 
f454973
 
c7293c2
 
ddb14f5
c7293c2
ddb14f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c7293c2
 
 
f454973
 
c7293c2
25df053
c7293c2
 
 
 
 
f454973
c7293c2
 
 
 
 
 
 
 
f454973
c7293c2
25df053
c7293c2
 
 
 
f454973
c7293c2
 
 
 
 
ddb14f5
 
 
 
c7293c2
 
 
341a67b
c7293c2
 
 
25df053
c7293c2
 
 
 
 
f454973
c7293c2
f4dc293
c7293c2
 
f454973
c7293c2
3569e8a
f454973
25df053
f454973
25df053
 
c7293c2
 
 
 
 
 
 
 
341a67b
25df053
c7293c2
 
25df053
3569e8a
f454973
c7293c2
f4dc293
25df053
 
c7293c2
341a67b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
"""
Flare – Chat Handler  (v1.7 · parameter parsing düzeltmesi)
==========================================
"""

import os
import re, json, sys, httpx
from datetime import datetime
from typing import Dict, List, Optional
from fastapi import APIRouter, HTTPException, Header
from pydantic import BaseModel
import requests

from prompt_builder import build_intent_prompt, build_parameter_prompt, build_smart_parameter_question_prompt, extract_params_from_question
from utils import log
from api_executor import call_api as execute_api  
from validation_engine import validate
from session import session_store, Session
from llm_interface import LLMInterface, SparkLLM, GPT4oLLM
from config_provider import ConfigProvider

# ─────────────────────────  CONFIG ───────────────────────── #
# Global config reference
cfg = None

def get_config():
    """Always get fresh config"""
    global cfg
    cfg = ConfigProvider.get()
    return cfg

# Initialize on module load
cfg = get_config()

# Global LLM instance
llm_provider: Optional[LLMInterface] = None

# ─────────────────────────  HELPERS ───────────────────────── #
def _trim_response(raw: str) -> str:
    """
    Remove everything after the first logical assistant block or intent tag.
    Also strips trailing 'assistant' artifacts and prompt injections.
    """
    # Stop at our own rules if model leaked them
    for stop in ["#DETECTED_INTENT", "⚠️", "\nassistant", "assistant\n", "assistant"]:
        idx = raw.find(stop)
        if idx != -1:
            raw = raw[:idx]
    # Normalise selamlama
    raw = re.sub(r"Hoş[\s-]?geldin(iz)?", "Hoş geldiniz", raw, flags=re.IGNORECASE)
    return raw.strip()

def _safe_intent_parse(raw: str) -> tuple[str, str]:
    """Extract intent name and extra tail."""
    m = re.search(r"#DETECTED_INTENT:\s*([A-Za-z0-9_-]+)", raw)
    if not m:
        return "", raw
    name = m.group(1)
    # Remove 'assistant' suffix if exists
    if name.endswith("assistant"):
        name = name[:-9]  # Remove last 9 chars ("assistant")
        log(f"🔧 Removed 'assistant' suffix from intent name")
    tail = raw[m.end():]
    log(f"🎯 Parsed intent: {name}")
    return name, tail

# ─────────────────────────  LLM SETUP ───────────────────────── #
def setup_llm_provider():
    """Initialize LLM provider based on internal_prompt config"""
    global llm_provider
    
    cfg = ConfigProvider.get()
    internal_prompt = cfg.global_config.internal_prompt
    
    if not internal_prompt:
        log("⚠️ No internal_prompt configured, using default Spark")
        # Get Spark token
        spark_token = _get_spark_token()
        if not spark_token:
            log("❌ SPARK_TOKEN not found")
            raise ValueError("SPARK_TOKEN not configured")
        
        spark_endpoint = str(cfg.global_config.spark_endpoint).rstrip("/")
        work_mode = cfg.global_config.work_mode
        
        log(f"🔌 Initializing SparkLLM: {spark_endpoint}")
        log(f"🔧 Work mode: {work_mode}")
        
        llm_provider = SparkLLM(
            spark_endpoint=spark_endpoint,
            spark_token=spark_token,
            work_mode=work_mode
        )
        
        log("✅ SparkLLM initialized")
        return
    
    # Check if it's a GPT-4o config
    if internal_prompt.get("provider") == "gpt-4o":
        api_key = internal_prompt.get("api_key")
        if not api_key:
            if cfg.global_config.is_cloud_mode():
                api_key = os.environ.get("OPENAI_API_KEY")
            else:
                from dotenv import load_dotenv
                load_dotenv()
                api_key = os.getenv("OPENAI_API_KEY")
        
        if not api_key:
            raise ValueError("OpenAI API key not configured")
        
        llm_provider = GPT4oLLM(
            api_key=api_key,
            model=internal_prompt.get("model", "gpt-4o"),
            max_tokens=internal_prompt.get("max_tokens", 4096),
            temperature=internal_prompt.get("temperature", 0.7)
        )
        log("✅ GPT-4o LLM initialized")
    else:
        # Default to Spark
        spark_token = _get_spark_token()
        if not spark_token:
            raise ValueError("SPARK_TOKEN not configured")
        
        spark_endpoint = str(cfg.global_config.spark_endpoint).rstrip("/")
        work_mode = cfg.global_config.work_mode
        
        llm_provider = SparkLLM(
            spark_endpoint=spark_endpoint,
            spark_token=spark_token,
            work_mode=work_mode
        )
        log("✅ SparkLLM initialized (via internal_prompt)")

def _get_spark_token() -> Optional[str]:
    """Get Spark token based on work mode"""
    cfg = ConfigProvider.get()
    
    if cfg.global_config.is_cloud_mode():
        # Cloud mode - use HuggingFace secrets
        token = os.environ.get("SPARK_TOKEN")
        if token:
            log("🔑 Using SPARK_TOKEN from environment")
        return token
    else:
        # On-premise mode - use .env file
        from dotenv import load_dotenv
        load_dotenv()
        return os.getenv("SPARK_TOKEN")

# ─────────────────────────  SPARK/LLM CALL ───────────────────────── #
async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
    """Call LLM (Spark or configured provider) with proper error handling"""
    global llm_provider
    
    if llm_provider is None:
        setup_llm_provider()
    
    try:
        # Get version config from session
        version = s.get_version_config()
        if not version:
            # Fallback: get from project config
            project = next((p for p in cfg.projects if p.name == s.project_name), None)
            if not project:
                raise ValueError(f"Project not found: {s.project_name}")
            version = next((v for v in project.versions if v.published), None)
            if not version:
                raise ValueError("No published version found")
        
        log(f"🚀 Calling LLM for session {s.session_id[:8]}...")
        log(f"📋 Prompt preview (first 200 chars): {prompt[:200]}...")
        
        # Call the configured LLM provider
        raw = await llm_provider.generate(
            project_name=s.project_name,
            user_input=user_msg,
            system_prompt=prompt,
            context=s.chat_history[-10:],
            version_config=version
        )
        
        log(f"🪄 LLM raw response: {raw[:120]!r}")
        return raw
            
    except httpx.TimeoutException:
        log(f"⏱️ LLM timeout for session {s.session_id[:8]}")
        raise
    except Exception as e:
        log(f"❌ LLM error: {e}")
        raise

# ─────────────────────────  ALLOWED INTENTS ───────────────────────── #
ALLOWED_INTENTS = {"flight-booking", "flight-info", "booking-cancel"}

# ─────────────────────────  FASTAPI ───────────────────────── #
router = APIRouter()

@router.get("/")
def health(): 
    return {"status": "ok", "sessions": len(session_store._sessions)}

class StartRequest(BaseModel): 
    project_name: str

class ChatRequest(BaseModel):  
    user_input: str

class ChatResponse(BaseModel):  
    session_id: str
    answer: str

@router.post("/start_session", response_model=ChatResponse)
async def start_session(req: StartRequest):
    """Create new session"""
    try:
        # Validate project exists
        project = next((p for p in cfg.projects if p.name == req.project_name and p.enabled), None)
        if not project:
            raise HTTPException(404, f"Project '{req.project_name}' not found or disabled")
        
        # Find published version
        version = next((v for v in project.versions if v.published), None)
        if not version:
            raise HTTPException(404, f"No published version for project '{req.project_name}'")
        
        # Create session with version config
        session = session_store.create_session(req.project_name, version)
        greeting = "Hoş geldiniz! Size nasıl yardımcı olabilirim?"
        session.add_turn("assistant", greeting)
        
        return ChatResponse(session_id=session.session_id, answer=greeting)
        
    except Exception as e:
        log(f"❌ Error creating session: {e}")
        raise HTTPException(500, str(e))

@router.post("/chat", response_model=ChatResponse)
async def chat(body: ChatRequest, x_session_id: str = Header(...)):
    """Process chat message"""
    try:
        # Get session
        session = session_store.get_session(x_session_id)
        if not session:
            raise HTTPException(404, "Session not found")
        
        user_input = body.user_input.strip()
        if not user_input:
            raise HTTPException(400, "Empty message")
            
        log(f"💬 User input: {user_input}")
        log(f"📊 Session state: {session.state}, last_intent: {session.last_intent}")
        log(f"📊 Session version: {session.version_number}")
        
        session.add_turn("user", user_input)
        
        # Get version config from session
        version = session.get_version_config()
        if not version:
            raise HTTPException(500, "Version configuration lost")

        # Handle based on state
        if session.state == "await_param":
            log(f"🔄 Handling parameter followup for missing: {session.awaiting_parameters}")
            answer = await _handle_parameter_followup(session, user_input)
        else:
            log("🆕 Handling new message")
            answer = await _handle_new_message(session, user_input)
        
        session.add_turn("assistant", answer)
        return ChatResponse(session_id=session.session_id, answer=answer)
        
    except HTTPException:
        raise
    except Exception as e:
        log(f"❌ Chat error: {e}")
        session.reset_flow()
        error_msg = "Bir hata oluştu. Lütfen tekrar deneyin."
        session.add_turn("assistant", error_msg)
        return ChatResponse(session_id=x_session_id, answer=error_msg)

# ─────────────────────────  MESSAGE HANDLERS ───────────────────────── #
async def _handle_new_message(session: Session, user_input: str) -> str:
    """Handle new message (not parameter followup)"""
    
    # Get version config from session
    version = session.get_version_config()
    if not version:
        log("❌ Version config not found")
        return "Bir hata oluştu. Lütfen tekrar deneyin."
    
    # Build intent detection prompt
    prompt = build_intent_prompt(
        version.general_prompt,
        session.chat_history,
        user_input,
        version.intents
    )
    
    # Get LLM response
    raw = await spark_generate(session, prompt, user_input)
    
    # Empty response fallback
    if not raw:
        log("⚠️ Empty response from LLM")
        return "Üzgünüm, mesajınızı anlayamadım. Lütfen tekrar dener misiniz?"
    
    # Check for intent
    if not raw.startswith("#DETECTED_INTENT"):
        # Small talk response
        log("💬 No intent detected, returning small talk")
        return _trim_response(raw)
    
    # Parse intent
    intent_name, tail = _safe_intent_parse(raw)
    
    # Validate intent
    if intent_name not in ALLOWED_INTENTS:
        log(f"⚠️ Invalid intent: {intent_name}")
        return _trim_response(tail) if tail else "Size nasıl yardımcı olabilirim?"
    
    # Short message guard (less than 3 words usually means incomplete request)
    if len(user_input.split()) < 3 and intent_name != "flight-info":
        log(f"⚠️ Message too short ({len(user_input.split())} words) for intent {intent_name}")
        return _trim_response(tail) if tail else "Lütfen talebinizi biraz daha detaylandırır mısınız?"
    
    # Find intent config
    intent_config = next((i for i in version.intents if i.name == intent_name), None)
    if not intent_config:
        log(f"❌ Intent config not found for: {intent_name}")
        return "Üzgünüm, bu işlemi gerçekleştiremiyorum."
    
    # Set intent in session
    session.last_intent = intent_name
    log(f"✅ Intent set: {intent_name}")
    
    # Log intent parameters
    log(f"📋 Intent parameters: {[p.name for p in intent_config.parameters]}")
    
    # Extract parameters
    return await _extract_parameters(session, intent_config, user_input)

async def _handle_parameter_followup(session: Session, user_input: str) -> str:
    """Handle parameter collection followup"""
    if not session.last_intent:
        log("⚠️ No last intent in session")
        session.reset_flow()
        return "Üzgünüm, hangi işlem için bilgi istediğimi unuttum. Baştan başlayalım."

    # Get version config from session
    version = session.get_version_config()
    if not version:
        log("❌ Version config not found")
        session.reset_flow()
        return "Bir hata oluştu. Lütfen tekrar deneyin."    
    
    # Get intent config
    intent_config = next((i for i in version.intents if i.name == session.last_intent), None)
    if not intent_config:
        log(f"❌ Intent config not found for: {session.last_intent}")
        session.reset_flow()
        return "Bir hata oluştu. Lütfen tekrar deneyin."
    
    # Smart parameter collection
    if cfg.global_config.parameter_collection_config.smart_grouping:
        return await _handle_smart_parameter_collection(session, intent_config, user_input)
    else:
        return await _handle_simple_parameter_collection(session, intent_config, user_input)

async def _handle_simple_parameter_collection(session: Session, intent_config, user_input: str) -> str:
    """Original simple parameter collection logic"""
    # Try to extract missing parameters
    missing = session.awaiting_parameters
    log(f"🔍 Trying to extract missing params: {missing}")
    
    prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history, intent_config.locale)
    raw = await spark_generate(session, prompt, user_input)
    
    # Try parsing with or without #PARAMETERS: prefix
    success = _process_parameters(session, intent_config, raw)
    
    if not success:
        # Increment miss count
        session.missing_ask_count += 1
        log(f"⚠️ No parameters extracted, miss count: {session.missing_ask_count}")
        
        if session.missing_ask_count >= 3:
            session.reset_flow()
            return "Üzgünüm, istediğiniz bilgileri anlayamadım. Başka bir konuda yardımcı olabilir miyim?"
        return "Üzgünüm, anlayamadım. Lütfen tekrar söyler misiniz?"
    
    # Check if we have all required parameters
    missing = _get_missing_parameters(session, intent_config)
    log(f"📊 Still missing params: {missing}")
    
    if missing:
        session.awaiting_parameters = missing
        param = next(p for p in intent_config.parameters if p.name == missing[0])
        return f"{param.caption} bilgisini alabilir miyim?"
    
    # All parameters collected, call API
    log("✅ All parameters collected, calling API")
    session.state = "call_api"
    return await _execute_api_call(session, intent_config)

async def _handle_smart_parameter_collection(session: Session, intent_config, user_input: str) -> str:
    """Smart parameter collection with grouping and retry logic"""
    
    # Try to extract missing parameters
    missing = session.awaiting_parameters
    log(f"🔍 Trying to extract missing params: {missing}")
    
    prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history, intent_config.locale)
    raw = await spark_generate(session, prompt, user_input)
    
    # Try parsing with or without #PARAMETERS: prefix
    success = _process_parameters(session, intent_config, raw)
    
    # Hangi parametreler hala eksik?
    still_missing = _get_missing_parameters(session, intent_config)
    
    # Sorulan ama cevaplanmayan parametreleri belirle
    asked_but_not_answered = []
    for param in session.awaiting_parameters:
        if param in still_missing:
            asked_but_not_answered.append(param)
    
    # Cevaplanmayanları session'a kaydet
    if asked_but_not_answered:
        session.mark_parameters_unanswered(asked_but_not_answered)
        log(f"❓ Parameters not answered: {asked_but_not_answered}")
    
    # Cevaplananları işaretle
    for param in session.awaiting_parameters:
        if param not in still_missing:
            session.mark_parameter_answered(param)
            log(f"✅ Parameter answered: {param}")
    
    if still_missing:
        # Maksimum deneme kontrolü
        if session.missing_ask_count >= 3:
            session.reset_flow()
            return "Üzgünüm, istediğiniz bilgileri anlayamadım. Başka bir konuda yardımcı olabilir miyim?"
        
        # Smart parameter question oluştur
        return await _generate_smart_parameter_question(session, intent_config, still_missing)
    
    # Tüm parametreler toplandı
    log("✅ All parameters collected, calling API")
    session.state = "call_api"
    return await _execute_api_call(session, intent_config)

async def _generate_smart_parameter_question(session: Session, intent_config, missing_params: List[str]) -> str:
    """Generate smart parameter collection question"""
    
    # Kaç parametre soracağımızı belirle
    max_params = cfg.global_config.parameter_collection_config.max_params_per_question
    
    # Öncelik sırasına göre parametreleri seç
    params_to_ask = []
    
    # Önce daha önce sorulmamış parametreler
    for param in missing_params:
        if session.get_parameter_ask_count(param) == 0:
            params_to_ask.append(param)
            if len(params_to_ask) >= max_params:
                break
    
    # Hala yer varsa, daha önce sorulmuş ama cevaplanmamış parametreler
    if len(params_to_ask) < max_params and cfg.global_config.parameter_collection_config.retry_unanswered:
        for param in session.unanswered_parameters:
            if param in missing_params and param not in params_to_ask:
                params_to_ask.append(param)
                if len(params_to_ask) >= max_params:
                    break
    
    # Hala yer varsa, kalan parametreler
    if len(params_to_ask) < max_params:
        for param in missing_params:
            if param not in params_to_ask:
                params_to_ask.append(param)
                if len(params_to_ask) >= max_params:
                    break
    
    # Parametreleri session'a kaydet
    session.record_parameter_question(params_to_ask)
    session.awaiting_parameters = params_to_ask
    session.missing_ask_count += 1
    
    # Build smart question prompt
    collected_params = {
        p.name: session.variables.get(p.variable_name, "")
        for p in intent_config.parameters
        if p.variable_name in session.variables
    }
    
    question_prompt = build_smart_parameter_question_prompt(
        intent_config,
        params_to_ask,
        session.chat_history,
        collected_params,
        session.unanswered_parameters,
        cfg.global_config.parameter_collection_config.collection_prompt
    )
    
    # Generate natural question
    question = await spark_generate(session, question_prompt, "")
    
    # Clean up the response
    question = _trim_response(question)
    
    log(f"🤖 Generated smart question for {params_to_ask}: {question}")
    
    return question

# ─────────────────────────  PARAMETER HANDLING ───────────────────────── #
async def _extract_parameters(session: Session, intent_config, user_input: str) -> str:
    """Extract parameters from user input"""
    missing = _get_missing_parameters(session, intent_config)
    log(f"🔍 Missing parameters: {missing}")
    
    if not missing:
        # All parameters already available
        log("✅ All parameters already available")
        return await _execute_api_call(session, intent_config)
    
    # Build parameter extraction prompt
    prompt = build_parameter_prompt(intent_config, missing, user_input, session.chat_history)
    raw = await spark_generate(session, prompt, user_input)
    
    # Try processing with flexible parsing
    success = _process_parameters(session, intent_config, raw)
    
    if success:
        missing = _get_missing_parameters(session, intent_config)
        log(f"📊 After extraction, missing: {missing}")
    else:
        log("⚠️ Failed to extract parameters from response")
    
    if missing:
        # Smart parameter collection
        if cfg.global_config.parameter_collection_config.smart_grouping:
            # Reset parameter tracking for new intent
            session.reset_parameter_tracking()
            return await _generate_smart_parameter_question(session, intent_config, missing)
        else:
            # Simple parameter collection
            session.state = "await_param"
            session.awaiting_parameters = missing
            session.missing_ask_count = 0
            param = next(p for p in intent_config.parameters if p.name == missing[0])
            log(f"❓ Asking for parameter: {param.name} ({param.caption})")
            return f"{param.caption} bilgisini alabilir miyim?"
    
    # All parameters collected
    log("✅ All parameters collected after extraction")
    return await _execute_api_call(session, intent_config)

def _get_missing_parameters(session: Session, intent_config) -> List[str]:
    """Get list of missing required parameters"""
    missing = [
        p.name for p in intent_config.parameters 
        if p.required and p.variable_name not in session.variables
    ]
    log(f"📊 Session variables: {list(session.variables.keys())}")
    return missing

def _process_parameters(session: Session, intent_config, raw: str) -> bool:
    """Process parameter extraction response with flexible parsing"""
    try:
        # Try to parse JSON, handling both with and without #PARAMETERS: prefix
        json_str = raw
        if raw.startswith("#PARAMETERS:"):
            json_str = raw[len("#PARAMETERS:"):]
            log(f"🔍 Found #PARAMETERS: prefix, removing it")
        
        # Clean up any trailing content after JSON
        # Find the closing brace for the JSON object
        brace_count = 0
        json_end = -1
        in_string = False
        escape_next = False
        
        for i, char in enumerate(json_str):
            if escape_next:
                escape_next = False
                continue
                
            if char == '\\':
                escape_next = True
                continue
                
            if char == '"' and not escape_next:
                in_string = not in_string
                continue
                
            if not in_string:
                if char == '{':
                    brace_count += 1
                elif char == '}':
                    brace_count -= 1
                    if brace_count == 0:
                        json_end = i + 1
                        break
        
        if json_end > 0:
            json_str = json_str[:json_end]
            log(f"🔍 Cleaned JSON string: {json_str[:200]}")
        
        data = json.loads(json_str)
        
        extracted = data.get("extracted", [])
        log(f"📦 Extracted data: {extracted}")
        
        any_valid = False
        
        for param_data in extracted:
            param_name = param_data.get("name")
            param_value = param_data.get("value")
            
            if not param_name or not param_value:
                log(f"⚠️ Invalid param data: {param_data}")
                continue
            
            # Find parameter config
            param_config = next(
                (p for p in intent_config.parameters if p.name == param_name),
                None
            )
            if not param_config:
                log(f"⚠️ Parameter config not found for: {param_name}")
                continue
            
            # Validate parameter
            if validate(str(param_value), param_config):
                session.variables[param_config.variable_name] = str(param_value)
                any_valid = True
                log(f"✅ Extracted {param_name}={param_value}{param_config.variable_name}")
            else:
                log(f"❌ Invalid {param_name}={param_value}")
        
        return any_valid
        
    except json.JSONDecodeError as e:
        log(f"❌ JSON parsing error: {e}")
        log(f"❌ Failed to parse: {raw[:200]}")
        return False
    except Exception as e:
        log(f"❌ Parameter processing error: {e}")
        return False

# ─────────────────────────  API EXECUTION ───────────────────────── #
async def _execute_api_call(session: Session, intent_config) -> str:
    """Execute API call and return humanized response"""
    try:
        session.state = "call_api"
        api_name = intent_config.action
        api_config = cfg.get_api(api_name)
        
        if not api_config:
            log(f"❌ API config not found: {api_name}")
            session.reset_flow()
            return intent_config.fallback_error_prompt or "İşlem başarısız oldu."
        
        log(f"📡 Calling API: {api_name}")
        log(f"📦 API variables: {session.variables}")
        
        # Execute API call with session
        response = execute_api(api_config, session)
        api_json = response.json()
        log(f"✅ API response: {api_json}")
        
        # Humanize response
        session.state = "humanize"
        if api_config.response_prompt:
            prompt = api_config.response_prompt.replace(
                "{{api_response}}", 
                json.dumps(api_json, ensure_ascii=False)
            )
            human_response = await spark_generate(session, prompt, json.dumps(api_json))
            session.reset_flow()
            return human_response if human_response else f"İşlem sonucu: {api_json}"
        else:
            session.reset_flow()
            return f"İşlem tamamlandı: {api_json}"
            
    except requests.exceptions.Timeout:
        log(f"⏱️ API timeout: {api_name}")
        session.reset_flow()
        return intent_config.fallback_timeout_prompt or "İşlem zaman aşımına uğradı."
    except Exception as e:
        log(f"❌ API call error: {e}")
        session.reset_flow()
        return intent_config.fallback_error_prompt or "İşlem sırasında bir hata oluştu."

# Initialize LLM on module load
setup_llm_provider()