ciyidogan commited on
Commit
8b942e9
Β·
verified Β·
1 Parent(s): 341a67b

Update chat_handler.py

Browse files
Files changed (1) hide show
  1. chat_handler.py +46 -5
chat_handler.py CHANGED
@@ -69,18 +69,32 @@ def setup_llm_provider():
69
  """Initialize LLM provider based on internal_prompt config"""
70
  global llm_provider
71
 
72
- cfg = ConfigProvider.get() # Her zaman gΓΌncel config'i al
73
  internal_prompt = cfg.global_config.internal_prompt
 
74
  if not internal_prompt:
75
  log("⚠️ No internal_prompt configured, using default Spark")
76
- llm_provider = SparkLLM(cfg)
 
 
 
 
 
 
 
77
  return
78
 
79
  # Parse internal prompt format: "provider:model"
80
  parts = internal_prompt.split(":", 1)
81
  if len(parts) != 2:
82
  log(f"⚠️ Invalid internal_prompt format: {internal_prompt}, using Spark")
83
- llm_provider = SparkLLM(cfg)
 
 
 
 
 
 
84
  return
85
 
86
  provider, model = parts[0].lower(), parts[1]
@@ -90,14 +104,41 @@ def setup_llm_provider():
90
  api_key = os.getenv("OPENAI_API_KEY")
91
  if not api_key:
92
  log("❌ OPENAI_API_KEY not found in environment")
93
- llm_provider = SparkLLM(cfg)
 
 
 
 
 
 
94
  return
95
 
96
  log(f"πŸ€– Using OpenAI with model: {model}")
97
  llm_provider = GPT4oLLM(api_key, model)
98
  else:
99
  log(f"⚠️ Unknown provider: {provider}, using Spark")
100
- llm_provider = SparkLLM(cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
  # ───────────────────────── SPARK/LLM CALL ───────────────────────── #
103
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str:
 
69
  """Initialize LLM provider based on internal_prompt config"""
70
  global llm_provider
71
 
72
+ cfg = ConfigProvider.get()
73
  internal_prompt = cfg.global_config.internal_prompt
74
+
75
  if not internal_prompt:
76
  log("⚠️ No internal_prompt configured, using default Spark")
77
+ # Get Spark token
78
+ spark_token = _get_spark_token()
79
+ if not spark_token:
80
+ log("❌ SPARK_TOKEN not found")
81
+ raise ValueError("SPARK_TOKEN not configured")
82
+
83
+ spark_endpoint = str(cfg.global_config.spark_endpoint)
84
+ llm_provider = SparkLLM(spark_endpoint, spark_token)
85
  return
86
 
87
  # Parse internal prompt format: "provider:model"
88
  parts = internal_prompt.split(":", 1)
89
  if len(parts) != 2:
90
  log(f"⚠️ Invalid internal_prompt format: {internal_prompt}, using Spark")
91
+ spark_token = _get_spark_token()
92
+ if not spark_token:
93
+ log("❌ SPARK_TOKEN not found")
94
+ raise ValueError("SPARK_TOKEN not configured")
95
+
96
+ spark_endpoint = str(cfg.global_config.spark_endpoint)
97
+ llm_provider = SparkLLM(spark_endpoint, spark_token)
98
  return
99
 
100
  provider, model = parts[0].lower(), parts[1]
 
104
  api_key = os.getenv("OPENAI_API_KEY")
105
  if not api_key:
106
  log("❌ OPENAI_API_KEY not found in environment")
107
+ # Fallback to Spark
108
+ spark_token = _get_spark_token()
109
+ if not spark_token:
110
+ raise ValueError("Neither OPENAI_API_KEY nor SPARK_TOKEN configured")
111
+
112
+ spark_endpoint = str(cfg.global_config.spark_endpoint)
113
+ llm_provider = SparkLLM(spark_endpoint, spark_token)
114
  return
115
 
116
  log(f"πŸ€– Using OpenAI with model: {model}")
117
  llm_provider = GPT4oLLM(api_key, model)
118
  else:
119
  log(f"⚠️ Unknown provider: {provider}, using Spark")
120
+ spark_token = _get_spark_token()
121
+ if not spark_token:
122
+ raise ValueError("SPARK_TOKEN not configured")
123
+
124
+ spark_endpoint = str(cfg.global_config.spark_endpoint)
125
+ llm_provider = SparkLLM(spark_endpoint, spark_token)
126
+
127
+ def _get_spark_token() -> Optional[str]:
128
+ """Get Spark token based on work_mode"""
129
+ cfg = ConfigProvider.get()
130
+
131
+ if cfg.global_config.is_cloud_mode():
132
+ # Cloud mode - use HuggingFace Secrets
133
+ token = os.getenv("SPARK_TOKEN")
134
+ if not token:
135
+ log("❌ SPARK_TOKEN not found in HuggingFace Secrets!")
136
+ return token
137
+ else:
138
+ # On-premise mode - use .env file
139
+ from dotenv import load_dotenv
140
+ load_dotenv()
141
+ return os.getenv("SPARK_TOKEN")
142
 
143
  # ───────────────────────── SPARK/LLM CALL ───────────────────────── #
144
  async def spark_generate(s: Session, prompt: str, user_msg: str) -> str: