ciyidogan commited on
Commit
9fa5636
Β·
verified Β·
1 Parent(s): ab7e98d

Update chat_handler.py

Browse files
Files changed (1) hide show
  1. chat_handler.py +38 -55
chat_handler.py CHANGED
@@ -17,6 +17,10 @@ from api_executor import call_api as execute_api
17
  from config_provider import ConfigProvider
18
  from validation_engine import validate
19
  from session import session_store, Session
 
 
 
 
20
 
21
  # ───────────────────────── HELPERS ───────────────────────── #
22
  def _trim_response(raw: str) -> str:
@@ -52,6 +56,32 @@ cfg = ConfigProvider.get()
52
  SPARK_URL = str(cfg.global_config.spark_endpoint).rstrip("/")
53
  ALLOWED_INTENTS = {"flight-booking", "flight-info", "booking-cancel"}
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  # ───────────────────────── SPARK ───────────────────────── #
56
  def _get_spark_token() -> Optional[str]:
57
  """Get Spark token based on work_mode"""
@@ -68,65 +98,18 @@ def _get_spark_token() -> Optional[str]:
68
  return os.getenv("SPARK_TOKEN")
69
 
70
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
71
- """Call Spark with proper error handling"""
72
  try:
73
- project = next((p for p in cfg.projects if p.name == s.project_name), None)
74
- if not project:
75
- raise ValueError(f"Project not found: {s.project_name}")
76
-
77
- version = next((v for v in project.versions if v.published), None)
78
- if not version:
79
- raise ValueError("No published version found")
80
-
81
- # Get Spark token
82
- spark_token = _get_spark_token()
83
- if not spark_token:
84
- log("❌ SPARK_TOKEN not configured!")
85
- raise ValueError("Spark authentication token not configured")
86
-
87
- # Prepare headers with authorization
88
- headers = {
89
- "Authorization": f"Bearer {spark_token}",
90
- "Content-Type": "application/json"
91
- }
92
-
93
- # Spark'a gΓΆnderilecek payload'Δ± hazΔ±rla
94
- payload = {
95
- "project_name": s.project_name,
96
- "system_prompt": prompt,
97
- "user_input": user_msg,
98
- "context": s.chat_history[-10:]
99
- }
100
 
101
- log(f"πŸš€ Calling Spark for session {s.session_id[:8]}...")
102
- log(f"πŸ“‹ Prompt preview (first 200 chars): {prompt[:200]}...")
 
 
103
 
104
- # Doğru endpoint'e istek at
105
- spark_url = SPARK_URL + "/generate"
106
- log(f"🌐 Spark URL: {spark_url}")
107
-
108
- async with httpx.AsyncClient(timeout=60) as client:
109
- response = await client.post(spark_url, json=payload, headers=headers)
110
- response.raise_for_status()
111
- data = response.json()
112
-
113
- # Spark'tan gelen yanΔ±tΔ± parse et
114
- raw = data.get("model_answer", "").strip()
115
- if not raw:
116
- # Fallback to other possible fields
117
- raw = (data.get("assistant") or data.get("text", "")).strip()
118
-
119
- log(f"πŸͺ„ Spark raw: {raw[:120]!r}")
120
- return raw
121
-
122
- except httpx.HTTPStatusError as e:
123
- log(f"❌ Spark HTTP error: {e.response.status_code} - {e.response.text}")
124
- raise
125
- except httpx.TimeoutException:
126
- log(f"⏱️ Spark timeout for session {s.session_id[:8]}")
127
- raise
128
  except Exception as e:
129
- log(f"❌ Spark error: {e}")
130
  raise
131
 
132
  # ───────────────────────── FASTAPI ───────────────────────── #
 
17
  from config_provider import ConfigProvider
18
  from validation_engine import validate
19
  from session import session_store, Session
20
+ from llm_interface import LLMInterface, SparkLLM, GPT4oLLM
21
+
22
+ # Global LLM instance
23
+ llm_provider: Optional[LLMInterface] = None
24
 
25
  # ───────────────────────── HELPERS ───────────────────────── #
26
  def _trim_response(raw: str) -> str:
 
56
  SPARK_URL = str(cfg.global_config.spark_endpoint).rstrip("/")
57
  ALLOWED_INTENTS = {"flight-booking", "flight-info", "booking-cancel"}
58
 
59
+ # ───────────────────────── SPARK ───────────────────────── #
60
+ def initialize_llm():
61
+ """Initialize LLM provider based on work_mode"""
62
+ global llm_provider
63
+
64
+ work_mode = cfg.global_config.work_mode
65
+
66
+ if cfg.global_config.is_gpt_mode():
67
+ # GPT mode
68
+ api_key = cfg.global_config.get_plain_token()
69
+ if not api_key:
70
+ raise ValueError("OpenAI API key not configured")
71
+
72
+ model = cfg.global_config.get_gpt_model()
73
+ llm_provider = GPT4oLLM(api_key, model)
74
+ log(f"βœ… Initialized {model} provider")
75
+ else:
76
+ # Spark mode
77
+ spark_token = _get_spark_token()
78
+ if not spark_token:
79
+ raise ValueError("Spark token not configured")
80
+
81
+ spark_endpoint = str(cfg.global_config.spark_endpoint)
82
+ llm_provider = SparkLLM(spark_endpoint, spark_token)
83
+ log("βœ… Initialized Spark provider")
84
+
85
  # ───────────────────────── SPARK ───────────────────────── #
86
  def _get_spark_token() -> Optional[str]:
87
  """Get Spark token based on work_mode"""
 
98
  return os.getenv("SPARK_TOKEN")
99
 
100
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
101
+ """Call LLM provider with proper error handling"""
102
  try:
103
+ if not llm_provider:
104
+ initialize_llm()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
+ # Use the abstract interface
107
+ raw = await llm_provider.generate(prompt, user_msg, s.chat_history)
108
+ log(f"πŸͺ„ LLM raw response: {raw[:120]!r}")
109
+ return raw
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  except Exception as e:
112
+ log(f"❌ LLM error: {e}")
113
  raise
114
 
115
  # ───────────────────────── FASTAPI ───────────────────────── #