ciyidogan commited on
Commit
535efdd
Β·
verified Β·
1 Parent(s): 97d64d5

Update chat_handler.py

Browse files
Files changed (1) hide show
  1. chat_handler.py +40 -22
chat_handler.py CHANGED
@@ -52,39 +52,52 @@ def _safe_intent_parse(raw: str) -> tuple[str, str]:
52
  return name, tail
53
 
54
  # ───────────────────────── CONFIG ───────────────────────── #
55
- cfg = ConfigProvider.get()
56
  SPARK_URL = str(cfg.global_config.spark_endpoint).rstrip("/")
57
 
 
 
 
 
 
 
 
 
58
  # ───────────────────────── SPARK ───────────────────────── #
59
- def initialize_llm():
60
  """Initialize LLM provider based on work_mode"""
61
  global llm_provider
62
 
63
- work_mode = cfg.global_config.work_mode
64
-
65
- if cfg.global_config.is_gpt_mode():
66
- # GPT mode
67
- api_key = cfg.global_config.get_plain_token()
68
- if not api_key:
69
- raise ValueError("OpenAI API key not configured")
70
 
71
- model = cfg.global_config.get_gpt_model()
72
- llm_provider = GPT4oLLM(api_key, model)
73
- log(f"βœ… Initialized {model} provider")
74
- else:
75
- # Spark mode
76
- spark_token = _get_spark_token()
77
- if not spark_token:
78
- raise ValueError("Spark token not configured")
79
 
80
- spark_endpoint = str(cfg.global_config.spark_endpoint)
81
- llm_provider = SparkLLM(spark_endpoint, spark_token)
82
- log("βœ… Initialized Spark provider")
 
 
 
 
 
 
 
 
 
83
 
84
  # ───────────────────────── SPARK ───────────────────────── #
85
  def _get_spark_token() -> Optional[str]:
86
  """Get Spark token based on work_mode"""
87
- if cfg.global_config.is_cloud_mode():
 
 
 
88
  # Cloud mode - use HuggingFace Secrets
89
  token = os.getenv("SPARK_TOKEN")
90
  if not token:
@@ -99,8 +112,11 @@ def _get_spark_token() -> Optional[str]:
99
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
100
  """Call LLM provider with proper error handling"""
101
  try:
 
 
 
102
  if not llm_provider:
103
- initialize_llm()
104
 
105
  # Use the abstract interface
106
  raw = await llm_provider.generate(prompt, user_msg, s.chat_history)
@@ -203,6 +219,8 @@ async def chat(body: ChatRequest, x_session_id: str = Header(...)):
203
  # ───────────────────────── MESSAGE HANDLERS ───────────────────────── #
204
  async def _handle_new_message(session: Session, user_input: str, version) -> str:
205
  """Handle new message (not parameter followup)"""
 
 
206
  # Build intent detection prompt
207
  prompt = build_intent_prompt(
208
  version.general_prompt,
 
52
  return name, tail
53
 
54
  # ───────────────────────── CONFIG ───────────────────────── #
 
55
  SPARK_URL = str(cfg.global_config.spark_endpoint).rstrip("/")
56
 
57
+ # βœ… Her kullanΔ±mda fresh config al
58
+ def get_fresh_config():
59
+ """Get fresh config on each call"""
60
+ from config_provider import ConfigProvider
61
+ # Force reload
62
+ ConfigProvider._instance = None
63
+ return ConfigProvider.get()
64
+
65
  # ───────────────────────── SPARK ───────────────────────── #
66
+ def initialize_llm(force_reload=False):
67
  """Initialize LLM provider based on work_mode"""
68
  global llm_provider
69
 
70
+ # Force reload config
71
+ if force_reload or llm_provider is None:
72
+ cfg = get_fresh_config()
73
+ work_mode = cfg.global_config.work_mode
 
 
 
74
 
75
+ if cfg.global_config.is_gpt_mode():
76
+ # GPT mode
77
+ api_key = cfg.global_config.get_plain_token()
78
+ if not api_key:
79
+ raise ValueError("OpenAI API key not configured")
 
 
 
80
 
81
+ model = cfg.global_config.get_gpt_model()
82
+ llm_provider = GPT4oLLM(api_key, model)
83
+ log(f"βœ… Initialized {model} provider")
84
+ else:
85
+ # Spark mode
86
+ spark_token = _get_spark_token()
87
+ if not spark_token:
88
+ raise ValueError("Spark token not configured")
89
+
90
+ spark_endpoint = str(cfg.global_config.spark_endpoint)
91
+ llm_provider = SparkLLM(spark_endpoint, spark_token)
92
+ log("βœ… Initialized Spark provider")
93
 
94
  # ───────────────────────── SPARK ───────────────────────── #
95
  def _get_spark_token() -> Optional[str]:
96
  """Get Spark token based on work_mode"""
97
+ cfg = get_fresh_config() # Fresh config al
98
+ work_mode = cfg.global_config.work_mode
99
+
100
+ if work_mode in ("hfcloud", "cloud"):
101
  # Cloud mode - use HuggingFace Secrets
102
  token = os.getenv("SPARK_TOKEN")
103
  if not token:
 
112
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
113
  """Call LLM provider with proper error handling"""
114
  try:
115
+ # Always reinitialize to get fresh config
116
+ initialize_llm(force_reload=True)
117
+
118
  if not llm_provider:
119
+ raise ValueError("Failed to initialize LLM provider")
120
 
121
  # Use the abstract interface
122
  raw = await llm_provider.generate(prompt, user_msg, s.chat_history)
 
219
  # ───────────────────────── MESSAGE HANDLERS ───────────────────────── #
220
  async def _handle_new_message(session: Session, user_input: str, version) -> str:
221
  """Handle new message (not parameter followup)"""
222
+ cfg = get_fresh_config() # Fresh config
223
+
224
  # Build intent detection prompt
225
  prompt = build_intent_prompt(
226
  version.general_prompt,