dragg2 commited on
Commit
92e7d88
·
verified ·
1 Parent(s): d0caf24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +260 -207
app.py CHANGED
@@ -26,8 +26,8 @@ class Logger:
26
  )
27
 
28
  logger.add(
29
- sys.stderr,
30
- level=level,
31
  format=format,
32
  colorize=colorize,
33
  backtrace=True,
@@ -43,33 +43,33 @@ class Logger:
43
  full_path = caller_frame.f_code.co_filename
44
  function = caller_frame.f_code.co_name
45
  lineno = caller_frame.f_lineno
46
-
47
  filename = os.path.basename(full_path)
48
-
49
  return {
50
  'filename': filename,
51
  'function': function,
52
  'lineno': lineno
53
  }
54
  finally:
55
- del frame
56
 
57
  def info(self, message, source="API"):
58
  caller_info = self._get_caller_info()
59
  self.logger.bind(**caller_info).info(f"[{source}] {message}")
60
-
61
  def error(self, message, source="API"):
62
  caller_info = self._get_caller_info()
63
-
64
  if isinstance(message, Exception):
65
  self.logger.bind(**caller_info).exception(f"[{source}] {str(message)}")
66
  else:
67
  self.logger.bind(**caller_info).error(f"[{source}] {message}")
68
-
69
  def warning(self, message, source="API"):
70
  caller_info = self._get_caller_info()
71
  self.logger.bind(**caller_info).warning(f"[{source}] {message}")
72
-
73
  def debug(self, message, source="API"):
74
  caller_info = self._get_caller_info()
75
  self.logger.bind(**caller_info).debug(f"[{source}] {message}")
@@ -93,6 +93,7 @@ CONFIG = {
93
  "grok-3-reasoning": "grok-3"
94
  },
95
  "API": {
 
96
  "IS_CUSTOM_SSO": os.environ.get("IS_CUSTOM_SSO", "false").lower() == "true",
97
  "BASE_URL": "https://grok.com",
98
  "API_KEY": os.environ.get("API_KEY", "sk-123456"),
@@ -124,9 +125,10 @@ DEFAULT_HEADERS = {
124
  'Connection': 'keep-alive',
125
  'Origin': 'https://grok.com',
126
  'Priority': 'u=1, i',
127
- 'Sec-Ch-Ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
 
128
  'Sec-Ch-Ua-Mobile': '?0',
129
- 'Sec-Ch-Ua-Platform': '"Windows"',
130
  'Sec-Fetch-Dest': 'empty',
131
  'Sec-Fetch-Mode': 'cors',
132
  'Sec-Fetch-Site': 'same-origin',
@@ -167,9 +169,9 @@ class AuthTokenManager:
167
  self.token_model_map[model] = []
168
  if sso not in self.token_status_map:
169
  self.token_status_map[sso] = {}
170
-
171
  existing_token_entry = next((entry for entry in self.token_model_map[model] if entry["token"] == token), None)
172
-
173
  if not existing_token_entry:
174
  self.token_model_map[model].append({
175
  "token": token,
@@ -177,14 +179,14 @@ class AuthTokenManager:
177
  "AddedTime": int(time.time() * 1000),
178
  "StartCallTime": None
179
  })
180
-
181
  if model not in self.token_status_map[sso]:
182
  self.token_status_map[sso][model] = {
183
  "isValid": True,
184
  "invalidatedTime": None,
185
  "totalRequestCount": 0
186
  }
187
-
188
  def set_token(self, token):
189
  models = list(self.model_config.keys())
190
  self.token_model_map = {model: [{
@@ -193,73 +195,105 @@ class AuthTokenManager:
193
  "AddedTime": int(time.time() * 1000),
194
  "StartCallTime": None
195
  }] for model in models}
196
-
197
  sso = token.split("sso=")[1].split(";")[0]
198
  self.token_status_map[sso] = {model: {
199
  "isValid": True,
200
  "invalidatedTime": None,
201
  "totalRequestCount": 0
202
  } for model in models}
203
-
204
  def delete_token(self, token):
205
  try:
206
  sso = token.split("sso=")[1].split(";")[0]
207
  for model in self.token_model_map:
208
  self.token_model_map[model] = [entry for entry in self.token_model_map[model] if entry["token"] != token]
209
-
210
  if sso in self.token_status_map:
211
  del self.token_status_map[sso]
212
-
213
  logger.info(f"令牌已成功移除: {token}", "TokenManager")
214
  return True
215
  except Exception as error:
216
  logger.error(f"令牌删除失败: {str(error)}")
217
  return False
218
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  def get_next_token_for_model(self, model_id):
220
  normalized_model = self.normalize_model_name(model_id)
221
-
222
  if normalized_model not in self.token_model_map or not self.token_model_map[normalized_model]:
223
  return None
224
-
225
  token_entry = self.token_model_map[normalized_model][0]
226
-
227
  if token_entry:
228
  if token_entry["StartCallTime"] is None:
229
  token_entry["StartCallTime"] = int(time.time() * 1000)
230
-
231
  if not self.token_reset_switch:
232
  self.start_token_reset_process()
233
  self.token_reset_switch = True
234
-
235
  token_entry["RequestCount"] += 1
236
-
237
  if token_entry["RequestCount"] > self.model_config[normalized_model]["RequestFrequency"]:
238
  self.remove_token_from_model(normalized_model, token_entry["token"])
239
  next_token_entry = self.token_model_map[normalized_model][0] if self.token_model_map[normalized_model] else None
240
  return next_token_entry["token"] if next_token_entry else None
241
-
242
  sso = token_entry["token"].split("sso=")[1].split(";")[0]
243
  if sso in self.token_status_map and normalized_model in self.token_status_map[sso]:
244
  if token_entry["RequestCount"] == self.model_config[normalized_model]["RequestFrequency"]:
245
  self.token_status_map[sso][normalized_model]["isValid"] = False
246
  self.token_status_map[sso][normalized_model]["invalidatedTime"] = int(time.time() * 1000)
247
  self.token_status_map[sso][normalized_model]["totalRequestCount"] += 1
248
-
249
  return token_entry["token"]
250
-
251
  return None
252
-
253
  def remove_token_from_model(self, model_id, token):
254
  normalized_model = self.normalize_model_name(model_id)
255
-
256
  if normalized_model not in self.token_model_map:
257
  logger.error(f"模型 {normalized_model} 不存在", "TokenManager")
258
  return False
259
-
260
  model_tokens = self.token_model_map[normalized_model]
261
  token_index = next((i for i, entry in enumerate(model_tokens) if entry["token"] == token), -1)
262
-
263
  if token_index != -1:
264
  removed_token_entry = model_tokens.pop(token_index)
265
  self.expired_tokens.add((
@@ -267,86 +301,86 @@ class AuthTokenManager:
267
  normalized_model,
268
  int(time.time() * 1000)
269
  ))
270
-
271
  if not self.token_reset_switch:
272
  self.start_token_reset_process()
273
  self.token_reset_switch = True
274
-
275
  logger.info(f"模型{model_id}的令牌已失效,已成功移除令牌: {token}", "TokenManager")
276
  return True
277
-
278
  logger.error(f"在模型 {normalized_model} 中未找到 token: {token}", "TokenManager")
279
  return False
280
-
281
  def get_expired_tokens(self):
282
  return list(self.expired_tokens)
283
-
284
  def normalize_model_name(self, model):
285
  if model.startswith('grok-') and 'deepsearch' not in model and 'reasoning' not in model:
286
  return '-'.join(model.split('-')[:2])
287
  return model
288
-
289
  def get_token_count_for_model(self, model_id):
290
  normalized_model = self.normalize_model_name(model_id)
291
  return len(self.token_model_map.get(normalized_model, []))
292
-
293
  def get_remaining_token_request_capacity(self):
294
  remaining_capacity_map = {}
295
-
296
  for model in self.model_config.keys():
297
  model_tokens = self.token_model_map.get(model, [])
298
  model_request_frequency = self.model_config[model]["RequestFrequency"]
299
-
300
  total_used_requests = sum(token_entry.get("RequestCount", 0) for token_entry in model_tokens)
301
-
302
  remaining_capacity = (len(model_tokens) * model_request_frequency) - total_used_requests
303
  remaining_capacity_map[model] = max(0, remaining_capacity)
304
-
305
  return remaining_capacity_map
306
-
307
  def get_token_array_for_model(self, model_id):
308
  normalized_model = self.normalize_model_name(model_id)
309
  return self.token_model_map.get(normalized_model, [])
310
-
311
  def start_token_reset_process(self):
312
  def reset_expired_tokens():
313
  now = int(time.time() * 1000)
314
-
315
  tokens_to_remove = set()
316
  for token_info in self.expired_tokens:
317
  token, model, expired_time = token_info
318
  expiration_time = self.model_config[model]["ExpirationTime"]
319
-
320
  if now - expired_time >= expiration_time:
321
  if not any(entry["token"] == token for entry in self.token_model_map.get(model, [])):
322
  if model not in self.token_model_map:
323
  self.token_model_map[model] = []
324
-
325
  self.token_model_map[model].append({
326
  "token": token,
327
  "RequestCount": 0,
328
  "AddedTime": now,
329
  "StartCallTime": None
330
  })
331
-
332
  sso = token.split("sso=")[1].split(";")[0]
333
  if sso in self.token_status_map and model in self.token_status_map[sso]:
334
  self.token_status_map[sso][model]["isValid"] = True
335
  self.token_status_map[sso][model]["invalidatedTime"] = None
336
  self.token_status_map[sso][model]["totalRequestCount"] = 0
337
-
338
  tokens_to_remove.add(token_info)
339
-
340
  self.expired_tokens -= tokens_to_remove
341
-
342
  for model in self.model_config.keys():
343
  if model not in self.token_model_map:
344
  continue
345
-
346
  for token_entry in self.token_model_map[model]:
347
  if not token_entry.get("StartCallTime"):
348
  continue
349
-
350
  expiration_time = self.model_config[model]["ExpirationTime"]
351
  if now - token_entry["StartCallTime"] >= expiration_time:
352
  sso = token_entry["token"].split("sso=")[1].split(";")[0]
@@ -354,67 +388,72 @@ class AuthTokenManager:
354
  self.token_status_map[sso][model]["isValid"] = True
355
  self.token_status_map[sso][model]["invalidatedTime"] = None
356
  self.token_status_map[sso][model]["totalRequestCount"] = 0
357
-
358
  token_entry["RequestCount"] = 0
359
  token_entry["StartCallTime"] = None
360
-
361
  import threading
362
  # 启动一个线程执行定时任务,每小时执行一次
363
  def run_timer():
364
  while True:
365
  reset_expired_tokens()
366
  time.sleep(3600)
367
-
368
  timer_thread = threading.Thread(target=run_timer)
369
  timer_thread.daemon = True
370
  timer_thread.start()
371
-
372
  def get_all_tokens(self):
373
  all_tokens = set()
374
  for model_tokens in self.token_model_map.values():
375
  for entry in model_tokens:
376
  all_tokens.add(entry["token"])
377
  return list(all_tokens)
378
-
379
  def get_token_status_map(self):
380
  return self.token_status_map
381
-
382
  class Utils:
383
  @staticmethod
384
  def organize_search_results(search_results):
385
  if not search_results or 'results' not in search_results:
386
  return ''
387
-
388
  results = search_results['results']
389
  formatted_results = []
390
-
391
  for index, result in enumerate(results):
392
  title = result.get('title', '未知标题')
393
  url = result.get('url', '#')
394
  preview = result.get('preview', '无预览内容')
395
-
396
  formatted_result = f"\r\n<details><summary>资料[{index}]: {title}</summary>\r\n{preview}\r\n\n[Link]({url})\r\n</details>"
397
  formatted_results.append(formatted_result)
398
-
399
  return '\n\n'.join(formatted_results)
400
-
401
  @staticmethod
402
  def create_auth_headers(model):
403
  return token_manager.get_next_token_for_model(model)
404
-
405
  @staticmethod
406
  def get_proxy_options():
407
  proxy = CONFIG["API"]["PROXY"]
408
  proxy_options = {}
409
-
410
  if proxy:
411
  logger.info(f"使用代理: {proxy}", "Server")
412
- proxy_options["proxies"] = {"https": proxy, "http": proxy}
413
-
414
  if proxy.startswith("socks5://"):
 
 
 
 
 
 
 
 
415
  proxy_options["proxies"] = {"https": proxy, "http": proxy}
416
- proxy_options["proxy_type"] = "socks5"
417
-
418
  return proxy_options
419
 
420
  class GrokApiClient:
@@ -422,12 +461,12 @@ class GrokApiClient:
422
  if model_id not in CONFIG["MODELS"]:
423
  raise ValueError(f"不支持的模型: {model_id}")
424
  self.model_id = CONFIG["MODELS"][model_id]
425
-
426
  def process_message_content(self, content):
427
  if isinstance(content, str):
428
  return content
429
  return None
430
-
431
  def get_image_type(self, base64_string):
432
  mime_type = 'image/jpeg'
433
  if 'data:image' in base64_string:
@@ -435,26 +474,26 @@ class GrokApiClient:
435
  matches = re.search(r'data:([a-zA-Z0-9]+\/[a-zA-Z0-9-.+]+);base64,', base64_string)
436
  if matches:
437
  mime_type = matches.group(1)
438
-
439
  extension = mime_type.split('/')[1]
440
  file_name = f"image.{extension}"
441
-
442
  return {
443
  "mimeType": mime_type,
444
  "fileName": file_name
445
  }
446
-
447
  def upload_base64_image(self, base64_data, url):
448
  try:
449
  if 'data:image' in base64_data:
450
  image_buffer = base64_data.split(',')[1]
451
  else:
452
  image_buffer = base64_data
453
-
454
  image_info = self.get_image_type(base64_data)
455
  mime_type = image_info["mimeType"]
456
  file_name = image_info["fileName"]
457
-
458
  upload_data = {
459
  "rpc": "uploadFile",
460
  "req": {
@@ -463,9 +502,9 @@ class GrokApiClient:
463
  "content": image_buffer
464
  }
465
  }
466
-
467
  logger.info("发送图片请求", "Server")
468
-
469
  proxy_options = Utils.get_proxy_options()
470
  response = curl_requests.post(
471
  url,
@@ -474,48 +513,48 @@ class GrokApiClient:
474
  "Cookie": CONFIG["API"]["SIGNATURE_COOKIE"]
475
  },
476
  json=upload_data,
477
- impersonate="chrome120",
478
  **proxy_options
479
  )
480
-
481
  if response.status_code != 200:
482
  logger.error(f"上传图片失败,状态码:{response.status_code}", "Server")
483
  return ''
484
-
485
  result = response.json()
486
  logger.info(f"上传图片成功: {result}", "Server")
487
  return result.get("fileMetadataId", "")
488
-
489
  except Exception as error:
490
  logger.error(str(error), "Server")
491
  return ''
492
-
493
  def prepare_chat_request(self, request):
494
- if ((request["model"] == 'grok-2-imageGen' or request["model"] == 'grok-3-imageGen') and
495
- not CONFIG["API"]["PICGO_KEY"] and not CONFIG["API"]["TUMY_KEY"] and
496
  request.get("stream", False)):
497
  raise ValueError("该模型流式输出需要配置PICGO或者TUMY图床密钥!")
498
-
499
  todo_messages = request["messages"]
500
  if request["model"] in ['grok-2-imageGen', 'grok-3-imageGen', 'grok-3-deepsearch']:
501
  last_message = todo_messages[-1]
502
  if last_message["role"] != 'user':
503
  raise ValueError('此模型最后一条消息必须是用户消息!')
504
  todo_messages = [last_message]
505
-
506
  file_attachments = []
507
  messages = ''
508
  last_role = None
509
  last_content = ''
510
  search = request["model"] in ['grok-2-search', 'grok-3-search']
511
-
512
  # 移除<think>标签及其内容和base64图片
513
  def remove_think_tags(text):
514
  import re
515
  text = re.sub(r'<think>[\s\S]*?<\/think>', '', text).strip()
516
  text = re.sub(r'!\[image\]\(data:.*?base64,.*?\)', '[图片]', text)
517
  return text
518
-
519
  def process_content(content):
520
  if isinstance(content, list):
521
  text_content = ''
@@ -531,11 +570,11 @@ class GrokApiClient:
531
  elif content["type"] == 'text':
532
  return remove_think_tags(content["text"])
533
  return remove_think_tags(self.process_message_content(content))
534
-
535
  for current in todo_messages:
536
  role = 'assistant' if current["role"] == 'assistant' else 'user'
537
  is_last_message = current == todo_messages[-1]
538
-
539
  if is_last_message and "content" in current:
540
  if isinstance(current["content"], list):
541
  for item in current["content"]:
@@ -553,10 +592,10 @@ class GrokApiClient:
553
  )
554
  if processed_image:
555
  file_attachments.append(processed_image)
556
-
557
 
558
  text_content = process_content(current.get("content", ""))
559
-
560
  if text_content or (is_last_message and file_attachments):
561
  if role == last_role and text_content:
562
  last_content += '\n' + text_content
@@ -565,7 +604,7 @@ class GrokApiClient:
565
  messages += f"{role.upper()}: {text_content or '[图片]'}\n"
566
  last_content = text_content
567
  last_role = role
568
-
569
  return {
570
  "temporary": CONFIG["API"].get("IS_TEMP_CONVERSATION", False),
571
  "modelName": self.model_id,
@@ -603,7 +642,7 @@ class MessageProcessor:
603
  "created": int(time.time()),
604
  "model": model
605
  }
606
-
607
  if is_stream:
608
  return {
609
  **base_response,
@@ -615,7 +654,7 @@ class MessageProcessor:
615
  }
616
  }]
617
  }
618
-
619
  return {
620
  **base_response,
621
  "object": "chat.completion",
@@ -632,12 +671,12 @@ class MessageProcessor:
632
 
633
  def process_model_response(response, model):
634
  result = {"token": None, "imageUrl": None}
635
-
636
  if CONFIG["IS_IMG_GEN"]:
637
  if response.get("cachedImageGenerationResponse") and not CONFIG["IS_IMG_GEN2"]:
638
  result["imageUrl"] = response["cachedImageGenerationResponse"]["imageUrl"]
639
  return result
640
-
641
  if model == 'grok-2':
642
  result["token"] = response.get("token")
643
  elif model in ['grok-2-search', 'grok-3-search']:
@@ -657,11 +696,11 @@ def process_model_response(response, model):
657
  result["token"] = "</think>" + response.get("token", "")
658
  CONFIG["IS_THINKING"] = False
659
  elif (response.get("messageStepId") and CONFIG["IS_THINKING"] and response.get("messageTag") == "assistant") or response.get("messageTag") == "final":
660
- result["token"] = response.get("token")
661
  elif model == 'grok-3-reasoning':
662
  if response.get("isThinking") and not CONFIG["SHOW_THINKING"]:
663
  return result
664
-
665
  if response.get("isThinking") and not CONFIG["IS_THINKING"]:
666
  result["token"] = "<think>" + response.get("token", "")
667
  CONFIG["IS_THINKING"] = True
@@ -670,14 +709,14 @@ def process_model_response(response, model):
670
  CONFIG["IS_THINKING"] = False
671
  else:
672
  result["token"] = response.get("token")
673
-
674
  return result
675
 
676
  def handle_image_response(image_url):
677
  max_retries = 2
678
  retry_count = 0
679
  image_base64_response = None
680
-
681
  while retry_count < max_retries:
682
  try:
683
  proxy_options = Utils.get_proxy_options()
@@ -690,52 +729,52 @@ def handle_image_response(image_url):
690
  impersonate="chrome120",
691
  **proxy_options
692
  )
693
-
694
  if image_base64_response.status_code == 200:
695
  break
696
-
697
  retry_count += 1
698
  if retry_count == max_retries:
699
  raise Exception(f"上游服务请求失败! status: {image_base64_response.status_code}")
700
-
701
  time.sleep(CONFIG["API"]["RETRY_TIME"] / 1000 * retry_count)
702
-
703
  except Exception as error:
704
  logger.error(str(error), "Server")
705
  retry_count += 1
706
  if retry_count == max_retries:
707
  raise
708
-
709
  time.sleep(CONFIG["API"]["RETRY_TIME"] / 1000 * retry_count)
710
-
711
  image_buffer = image_base64_response.content
712
-
713
  if not CONFIG["API"]["PICGO_KEY"] and not CONFIG["API"]["TUMY_KEY"]:
714
  base64_image = base64.b64encode(image_buffer).decode('utf-8')
715
  image_content_type = image_base64_response.headers.get('content-type', 'image/jpeg')
716
  return f"![image](data:{image_content_type};base64,{base64_image})"
717
-
718
  logger.info("开始上传图床", "Server")
719
-
720
  if CONFIG["API"]["PICGO_KEY"]:
721
  files = {'source': ('image.jpg', image_buffer, 'image/jpeg')}
722
  headers = {
723
  "X-API-Key": CONFIG["API"]["PICGO_KEY"]
724
  }
725
-
726
  response_url = requests.post(
727
  "https://www.picgo.net/api/1/upload",
728
  files=files,
729
  headers=headers
730
  )
731
-
732
  if response_url.status_code != 200:
733
  return "生图失败,请查看PICGO图床密钥是否设置正确"
734
  else:
735
  logger.info("生图成功", "Server")
736
  result = response_url.json()
737
  return f"![image]({result['image']['url']})"
738
-
739
 
740
  elif CONFIG["API"]["TUMY_KEY"]:
741
  files = {'file': ('image.jpg', image_buffer, 'image/jpeg')}
@@ -743,13 +782,13 @@ def handle_image_response(image_url):
743
  "Accept": "application/json",
744
  'Authorization': f"Bearer {CONFIG['API']['TUMY_KEY']}"
745
  }
746
-
747
  response_url = requests.post(
748
  "https://tu.my/api/v1/upload",
749
  files=files,
750
  headers=headers
751
  )
752
-
753
  if response_url.status_code != 200:
754
  return "生图失败,请查看TUMY图床密钥是否设置正确"
755
  else:
@@ -761,101 +800,94 @@ def handle_image_response(image_url):
761
  logger.error(str(error), "Server")
762
  return "生图失败,请查看TUMY图床密钥是否设置正确"
763
 
764
-
765
  def handle_non_stream_response(response, model):
766
  try:
767
- content = response.text
768
- lines = content.split('\n')
 
769
  full_response = ""
770
 
771
  CONFIG["IS_THINKING"] = False
772
  CONFIG["IS_IMG_GEN"] = False
773
  CONFIG["IS_IMG_GEN2"] = False
774
-
775
- logger.info("开始处理非流式响应", "Server")
776
-
777
- for line in lines:
778
- if not line.strip():
779
  continue
780
-
781
  try:
782
- line_json = json.loads(line.strip())
783
  if line_json.get("error"):
784
  logger.error(json.dumps(line_json, indent=2), "Server")
785
- if line_json.get("error", {}).get("name") == "RateLimitError":
786
- CONFIG["API"]["TEMP_COOKIE"] = None
787
- raise ValueError("RateLimitError")
788
-
789
  response_data = line_json.get("result", {}).get("response")
790
  if not response_data:
791
  continue
792
-
793
  if response_data.get("doImgGen") or response_data.get("imageAttachmentInfo"):
794
  CONFIG["IS_IMG_GEN"] = True
795
-
796
  result = process_model_response(response_data, model)
797
-
798
  if result["token"]:
799
  full_response += result["token"]
800
-
801
  if result["imageUrl"]:
802
  CONFIG["IS_IMG_GEN2"] = True
803
  return handle_image_response(result["imageUrl"])
804
-
805
  except json.JSONDecodeError:
806
  continue
807
  except Exception as e:
808
- logger.error(f"处理响应行时出错: {str(e)}", "Server")
809
  continue
810
-
811
  return full_response
812
-
813
  except Exception as error:
814
  logger.error(str(error), "Server")
815
  raise
816
-
817
  def handle_stream_response(response, model):
818
  def generate():
819
- stream = response.iter_lines()
820
 
 
821
  CONFIG["IS_THINKING"] = False
822
  CONFIG["IS_IMG_GEN"] = False
823
  CONFIG["IS_IMG_GEN2"] = False
824
- logger.info("开始处理流式响应", "Server")
825
 
826
  for chunk in stream:
827
  if not chunk:
828
  continue
829
  try:
830
- line_json = json.loads(chunk.decode("utf-8").strip())
831
  if line_json.get("error"):
832
  logger.error(json.dumps(line_json, indent=2), "Server")
833
  yield json.dumps({"error": "RateLimitError"}) + "\n\n"
834
  return
835
-
836
  response_data = line_json.get("result", {}).get("response")
837
  if not response_data:
838
  continue
839
-
840
  if response_data.get("doImgGen") or response_data.get("imageAttachmentInfo"):
841
  CONFIG["IS_IMG_GEN"] = True
842
-
843
  result = process_model_response(response_data, model)
844
-
845
  if result["token"]:
846
  yield f"data: {json.dumps(MessageProcessor.create_chat_response(result['token'], model, True))}\n\n"
847
-
848
  if result["imageUrl"]:
849
  CONFIG["IS_IMG_GEN2"] = True
850
  image_data = handle_image_response(result["imageUrl"])
851
  yield f"data: {json.dumps(MessageProcessor.create_chat_response(image_data, model, True))}\n\n"
852
-
853
  except json.JSONDecodeError:
854
  continue
855
  except Exception as e:
856
  logger.error(f"处理流式响应行时出错: {str(e)}", "Server")
857
  continue
858
-
859
  yield "data: [DONE]\n\n"
860
  return generate()
861
 
@@ -863,15 +895,15 @@ def initialization():
863
  sso_array = os.environ.get("SSO", "").split(',')
864
  logger.info("开始加载令牌", "Server")
865
  for sso in sso_array:
866
- if sso:
867
  token_manager.add_token(f"sso-rw={sso};sso={sso}")
868
-
869
  logger.info(f"成功加载令牌: {json.dumps(token_manager.get_all_tokens(), indent=2)}", "Server")
870
  logger.info(f"令牌加载完成,共加载: {len(token_manager.get_all_tokens())}个令牌", "Server")
871
-
872
  if CONFIG["API"]["PROXY"]:
873
  logger.info(f"代理已设置: {CONFIG['API']['PROXY']}", "Server")
874
-
875
  logger.info("初始化完成", "Server")
876
 
877
 
@@ -890,7 +922,7 @@ def get_tokens():
890
  return jsonify({"error": '自定义的SSO令牌模式无法获取轮询sso令牌状态'}), 403
891
  elif auth_token != CONFIG["API"]["API_KEY"]:
892
  return jsonify({"error": 'Unauthorized'}), 401
893
-
894
  return jsonify(token_manager.get_token_status_map())
895
 
896
  @app.route('/add/token', methods=['POST'])
@@ -900,7 +932,7 @@ def add_token():
900
  return jsonify({"error": '自定义的SSO令牌模式无法添加sso令牌'}), 403
901
  elif auth_token != CONFIG["API"]["API_KEY"]:
902
  return jsonify({"error": 'Unauthorized'}), 401
903
-
904
  try:
905
  sso = request.json.get('sso')
906
  token_manager.add_token(f"sso-rw={sso};sso={sso}")
@@ -916,7 +948,7 @@ def delete_token():
916
  return jsonify({"error": '自定义的SSO令牌模式无法删除sso令牌'}), 403
917
  elif auth_token != CONFIG["API"]["API_KEY"]:
918
  return jsonify({"error": 'Unauthorized'}), 401
919
-
920
  try:
921
  sso = request.json.get('sso')
922
  token_manager.delete_token(f"sso-rw={sso};sso={sso}")
@@ -935,7 +967,7 @@ def get_models():
935
  "object": "model",
936
  "created": int(time.time()),
937
  "owned_by": "grok"
938
- }
939
  for model in CONFIG["MODELS"].keys()
940
  ]
941
  })
@@ -943,7 +975,8 @@ def get_models():
943
  @app.route('/v1/chat/completions', methods=['POST'])
944
  def chat_completions():
945
  try:
946
- auth_token = request.headers.get('Authorization', '').replace('Bearer ', '')
 
947
  if auth_token:
948
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
949
  result = f"sso={auth_token};sso-rw={auth_token}"
@@ -952,97 +985,117 @@ def chat_completions():
952
  return jsonify({"error": 'Unauthorized'}), 401
953
  else:
954
  return jsonify({"error": 'API_KEY缺失'}), 401
955
-
956
  data = request.json
957
  model = data.get("model")
958
  stream = data.get("stream", False)
959
-
960
  retry_count = 0
961
  grok_client = GrokApiClient(model)
962
  request_payload = grok_client.prepare_chat_request(data)
963
-
 
964
  while retry_count < CONFIG["RETRY"]["MAX_ATTEMPTS"]:
965
  retry_count += 1
966
- CONFIG["API"]["SIGNATURE_COOKIE"] = Utils.create_auth_headers(model)
967
-
 
968
  if not CONFIG["API"]["SIGNATURE_COOKIE"]:
969
  raise ValueError('该模型无可用令牌')
970
-
971
- logger.info(f"当前令牌: {json.dumps(CONFIG['API']['SIGNATURE_COOKIE'], indent=2)}", "Server")
972
- logger.info(f"当前可用模型的全部可用数量: {json.dumps(token_manager.get_remaining_token_request_capacity(), indent=2)}", "Server")
973
-
 
 
 
 
974
  try:
975
  proxy_options = Utils.get_proxy_options()
976
  response = curl_requests.post(
977
  f"{CONFIG['API']['BASE_URL']}/rest/app-chat/conversations/new",
978
  headers={
979
- "Accept": "text/event-stream",
980
- "Baggage": "sentry-public_key=b311e0f2690c81f25e2c4cf6d4f7ce1c",
981
- "Content-Type": "text/plain;charset=UTF-8",
982
- "Connection": "keep-alive",
983
- "Cookie": CONFIG["API"]["SIGNATURE_COOKIE"]
984
  },
985
  data=json.dumps(request_payload),
986
- impersonate="chrome120",
987
  stream=True,
988
- **proxy_options
989
- )
990
-
991
  if response.status_code == 200:
 
992
  logger.info("请求成功", "Server")
993
- logger.info(f"当前{model}剩余可用令牌数: {token_manager.get_token_count_for_model(model)}", "Server")
994
-
 
 
995
  try:
996
  if stream:
997
- return Response(
998
- stream_with_context(handle_stream_response(response, model)),
999
- content_type='text/event-stream'
1000
- )
1001
  else:
1002
- content = handle_non_stream_response(response, model)
1003
- return jsonify(MessageProcessor.create_chat_response(content, model))
1004
-
 
 
 
1005
  except Exception as error:
1006
  logger.error(str(error), "Server")
1007
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1008
  raise ValueError(f"自定义SSO令牌当前模型{model}的请求次数已失效")
1009
-
1010
- token_manager.remove_token_from_model(model, CONFIG["API"]["SIGNATURE_COOKIE"])
 
1011
  if token_manager.get_token_count_for_model(model) == 0:
1012
  raise ValueError(f"{model} 次数已达上限,请切换其他模型或者重新对话")
1013
-
 
 
 
 
 
1014
  elif response.status_code == 429:
 
 
1015
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1016
  raise ValueError(f"自定义SSO令牌当前模型{model}的请求次数已失效")
1017
-
1018
- token_manager.remove_token_from_model(model, CONFIG["API"]["SIGNATURE_COOKIE"])
 
1019
  if token_manager.get_token_count_for_model(model) == 0:
1020
  raise ValueError(f"{model} 次数已达上限,请切换其他模型或者重新对话")
1021
-
1022
  else:
1023
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1024
  raise ValueError(f"自定义SSO令牌当前模型{model}的请求次数已失效")
1025
-
1026
- logger.error(f"令牌异常错误状态!status: {response.status_code}", "Server")
1027
- token_manager.remove_token_from_model(model, CONFIG["API"]["SIGNATURE_COOKIE"])
1028
- logger.info(f"当前{model}剩余可用令牌数: {token_manager.get_token_count_for_model(model)}", "Server")
1029
-
 
 
 
 
1030
  except Exception as e:
1031
  logger.error(f"请求处理异常: {str(e)}", "Server")
1032
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1033
  raise
1034
  continue
1035
-
1036
- raise ValueError('当前模型所有令牌都已耗尽')
1037
-
 
 
1038
  except Exception as error:
1039
  logger.error(str(error), "ChatAPI")
1040
- return jsonify({
1041
- "error": {
1042
  "message": str(error),
1043
  "type": "server_error"
1044
- }
1045
- }), 500
1046
 
1047
  @app.route('/', defaults={'path': ''})
1048
  @app.route('/<path:path>')
@@ -1052,7 +1105,7 @@ def catch_all(path):
1052
  if __name__ == '__main__':
1053
  token_manager = AuthTokenManager()
1054
  initialization()
1055
-
1056
  app.run(
1057
  host='0.0.0.0',
1058
  port=CONFIG["SERVER"]["PORT"],
 
26
  )
27
 
28
  logger.add(
29
+ sys.stderr,
30
+ level=level,
31
  format=format,
32
  colorize=colorize,
33
  backtrace=True,
 
43
  full_path = caller_frame.f_code.co_filename
44
  function = caller_frame.f_code.co_name
45
  lineno = caller_frame.f_lineno
46
+
47
  filename = os.path.basename(full_path)
48
+
49
  return {
50
  'filename': filename,
51
  'function': function,
52
  'lineno': lineno
53
  }
54
  finally:
55
+ del frame
56
 
57
  def info(self, message, source="API"):
58
  caller_info = self._get_caller_info()
59
  self.logger.bind(**caller_info).info(f"[{source}] {message}")
60
+
61
  def error(self, message, source="API"):
62
  caller_info = self._get_caller_info()
63
+
64
  if isinstance(message, Exception):
65
  self.logger.bind(**caller_info).exception(f"[{source}] {str(message)}")
66
  else:
67
  self.logger.bind(**caller_info).error(f"[{source}] {message}")
68
+
69
  def warning(self, message, source="API"):
70
  caller_info = self._get_caller_info()
71
  self.logger.bind(**caller_info).warning(f"[{source}] {message}")
72
+
73
  def debug(self, message, source="API"):
74
  caller_info = self._get_caller_info()
75
  self.logger.bind(**caller_info).debug(f"[{source}] {message}")
 
93
  "grok-3-reasoning": "grok-3"
94
  },
95
  "API": {
96
+ "IS_TEMP_CONVERSATION": os.environ.get("IS_TEMP_CONVERSATION", "true").lower() == "true",
97
  "IS_CUSTOM_SSO": os.environ.get("IS_CUSTOM_SSO", "false").lower() == "true",
98
  "BASE_URL": "https://grok.com",
99
  "API_KEY": os.environ.get("API_KEY", "sk-123456"),
 
125
  'Connection': 'keep-alive',
126
  'Origin': 'https://grok.com',
127
  'Priority': 'u=1, i',
128
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
129
+ 'Sec-Ch-Ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
130
  'Sec-Ch-Ua-Mobile': '?0',
131
+ 'Sec-Ch-Ua-Platform': '"macOS"',
132
  'Sec-Fetch-Dest': 'empty',
133
  'Sec-Fetch-Mode': 'cors',
134
  'Sec-Fetch-Site': 'same-origin',
 
169
  self.token_model_map[model] = []
170
  if sso not in self.token_status_map:
171
  self.token_status_map[sso] = {}
172
+
173
  existing_token_entry = next((entry for entry in self.token_model_map[model] if entry["token"] == token), None)
174
+
175
  if not existing_token_entry:
176
  self.token_model_map[model].append({
177
  "token": token,
 
179
  "AddedTime": int(time.time() * 1000),
180
  "StartCallTime": None
181
  })
182
+
183
  if model not in self.token_status_map[sso]:
184
  self.token_status_map[sso][model] = {
185
  "isValid": True,
186
  "invalidatedTime": None,
187
  "totalRequestCount": 0
188
  }
189
+
190
  def set_token(self, token):
191
  models = list(self.model_config.keys())
192
  self.token_model_map = {model: [{
 
195
  "AddedTime": int(time.time() * 1000),
196
  "StartCallTime": None
197
  }] for model in models}
198
+
199
  sso = token.split("sso=")[1].split(";")[0]
200
  self.token_status_map[sso] = {model: {
201
  "isValid": True,
202
  "invalidatedTime": None,
203
  "totalRequestCount": 0
204
  } for model in models}
205
+
206
  def delete_token(self, token):
207
  try:
208
  sso = token.split("sso=")[1].split(";")[0]
209
  for model in self.token_model_map:
210
  self.token_model_map[model] = [entry for entry in self.token_model_map[model] if entry["token"] != token]
211
+
212
  if sso in self.token_status_map:
213
  del self.token_status_map[sso]
214
+
215
  logger.info(f"令牌已成功移除: {token}", "TokenManager")
216
  return True
217
  except Exception as error:
218
  logger.error(f"令牌删除失败: {str(error)}")
219
  return False
220
+ def reduce_token_request_count(self, model_id, count):
221
+ try:
222
+ normalized_model = self.normalize_model_name(model_id)
223
+
224
+ if normalized_model not in self.token_model_map:
225
+ logger.error(f"模型 {normalized_model} 不存在", "TokenManager")
226
+ return False
227
+
228
+ if not self.token_model_map[normalized_model]:
229
+ logger.error(f"模型 {normalized_model} 没有可用的token", "TokenManager")
230
+ return False
231
+
232
+ token_entry = self.token_model_map[normalized_model][0]
233
+
234
+ # 确保RequestCount不会小于0
235
+ new_count = max(0, token_entry["RequestCount"] - count)
236
+ reduction = token_entry["RequestCount"] - new_count
237
+
238
+ token_entry["RequestCount"] = new_count
239
+
240
+ # 更新token状态
241
+ if token_entry["token"]:
242
+ sso = token_entry["token"].split("sso=")[1].split(";")[0]
243
+ if sso in self.token_status_map and normalized_model in self.token_status_map[sso]:
244
+ self.token_status_map[sso][normalized_model]["totalRequestCount"] = max(
245
+ 0,
246
+ self.token_status_map[sso][normalized_model]["totalRequestCount"] - reduction
247
+ )
248
+ return True
249
+
250
+ except Exception as error:
251
+ logger.error(f"重置校对token请求次数时发生错误: {str(error)}", "TokenManager")
252
+ return False
253
  def get_next_token_for_model(self, model_id):
254
  normalized_model = self.normalize_model_name(model_id)
255
+
256
  if normalized_model not in self.token_model_map or not self.token_model_map[normalized_model]:
257
  return None
258
+
259
  token_entry = self.token_model_map[normalized_model][0]
260
+
261
  if token_entry:
262
  if token_entry["StartCallTime"] is None:
263
  token_entry["StartCallTime"] = int(time.time() * 1000)
264
+
265
  if not self.token_reset_switch:
266
  self.start_token_reset_process()
267
  self.token_reset_switch = True
268
+
269
  token_entry["RequestCount"] += 1
270
+
271
  if token_entry["RequestCount"] > self.model_config[normalized_model]["RequestFrequency"]:
272
  self.remove_token_from_model(normalized_model, token_entry["token"])
273
  next_token_entry = self.token_model_map[normalized_model][0] if self.token_model_map[normalized_model] else None
274
  return next_token_entry["token"] if next_token_entry else None
275
+
276
  sso = token_entry["token"].split("sso=")[1].split(";")[0]
277
  if sso in self.token_status_map and normalized_model in self.token_status_map[sso]:
278
  if token_entry["RequestCount"] == self.model_config[normalized_model]["RequestFrequency"]:
279
  self.token_status_map[sso][normalized_model]["isValid"] = False
280
  self.token_status_map[sso][normalized_model]["invalidatedTime"] = int(time.time() * 1000)
281
  self.token_status_map[sso][normalized_model]["totalRequestCount"] += 1
282
+
283
  return token_entry["token"]
284
+
285
  return None
286
+
287
  def remove_token_from_model(self, model_id, token):
288
  normalized_model = self.normalize_model_name(model_id)
289
+
290
  if normalized_model not in self.token_model_map:
291
  logger.error(f"模型 {normalized_model} 不存在", "TokenManager")
292
  return False
293
+
294
  model_tokens = self.token_model_map[normalized_model]
295
  token_index = next((i for i, entry in enumerate(model_tokens) if entry["token"] == token), -1)
296
+
297
  if token_index != -1:
298
  removed_token_entry = model_tokens.pop(token_index)
299
  self.expired_tokens.add((
 
301
  normalized_model,
302
  int(time.time() * 1000)
303
  ))
304
+
305
  if not self.token_reset_switch:
306
  self.start_token_reset_process()
307
  self.token_reset_switch = True
308
+
309
  logger.info(f"模型{model_id}的令牌已失效,已成功移除令牌: {token}", "TokenManager")
310
  return True
311
+
312
  logger.error(f"在模型 {normalized_model} 中未找到 token: {token}", "TokenManager")
313
  return False
314
+
315
  def get_expired_tokens(self):
316
  return list(self.expired_tokens)
317
+
318
  def normalize_model_name(self, model):
319
  if model.startswith('grok-') and 'deepsearch' not in model and 'reasoning' not in model:
320
  return '-'.join(model.split('-')[:2])
321
  return model
322
+
323
  def get_token_count_for_model(self, model_id):
324
  normalized_model = self.normalize_model_name(model_id)
325
  return len(self.token_model_map.get(normalized_model, []))
326
+
327
  def get_remaining_token_request_capacity(self):
328
  remaining_capacity_map = {}
329
+
330
  for model in self.model_config.keys():
331
  model_tokens = self.token_model_map.get(model, [])
332
  model_request_frequency = self.model_config[model]["RequestFrequency"]
333
+
334
  total_used_requests = sum(token_entry.get("RequestCount", 0) for token_entry in model_tokens)
335
+
336
  remaining_capacity = (len(model_tokens) * model_request_frequency) - total_used_requests
337
  remaining_capacity_map[model] = max(0, remaining_capacity)
338
+
339
  return remaining_capacity_map
340
+
341
  def get_token_array_for_model(self, model_id):
342
  normalized_model = self.normalize_model_name(model_id)
343
  return self.token_model_map.get(normalized_model, [])
344
+
345
  def start_token_reset_process(self):
346
  def reset_expired_tokens():
347
  now = int(time.time() * 1000)
348
+
349
  tokens_to_remove = set()
350
  for token_info in self.expired_tokens:
351
  token, model, expired_time = token_info
352
  expiration_time = self.model_config[model]["ExpirationTime"]
353
+
354
  if now - expired_time >= expiration_time:
355
  if not any(entry["token"] == token for entry in self.token_model_map.get(model, [])):
356
  if model not in self.token_model_map:
357
  self.token_model_map[model] = []
358
+
359
  self.token_model_map[model].append({
360
  "token": token,
361
  "RequestCount": 0,
362
  "AddedTime": now,
363
  "StartCallTime": None
364
  })
365
+
366
  sso = token.split("sso=")[1].split(";")[0]
367
  if sso in self.token_status_map and model in self.token_status_map[sso]:
368
  self.token_status_map[sso][model]["isValid"] = True
369
  self.token_status_map[sso][model]["invalidatedTime"] = None
370
  self.token_status_map[sso][model]["totalRequestCount"] = 0
371
+
372
  tokens_to_remove.add(token_info)
373
+
374
  self.expired_tokens -= tokens_to_remove
375
+
376
  for model in self.model_config.keys():
377
  if model not in self.token_model_map:
378
  continue
379
+
380
  for token_entry in self.token_model_map[model]:
381
  if not token_entry.get("StartCallTime"):
382
  continue
383
+
384
  expiration_time = self.model_config[model]["ExpirationTime"]
385
  if now - token_entry["StartCallTime"] >= expiration_time:
386
  sso = token_entry["token"].split("sso=")[1].split(";")[0]
 
388
  self.token_status_map[sso][model]["isValid"] = True
389
  self.token_status_map[sso][model]["invalidatedTime"] = None
390
  self.token_status_map[sso][model]["totalRequestCount"] = 0
391
+
392
  token_entry["RequestCount"] = 0
393
  token_entry["StartCallTime"] = None
394
+
395
  import threading
396
  # 启动一个线程执行定时任务,每小时执行一次
397
  def run_timer():
398
  while True:
399
  reset_expired_tokens()
400
  time.sleep(3600)
401
+
402
  timer_thread = threading.Thread(target=run_timer)
403
  timer_thread.daemon = True
404
  timer_thread.start()
405
+
406
  def get_all_tokens(self):
407
  all_tokens = set()
408
  for model_tokens in self.token_model_map.values():
409
  for entry in model_tokens:
410
  all_tokens.add(entry["token"])
411
  return list(all_tokens)
412
+
413
  def get_token_status_map(self):
414
  return self.token_status_map
415
+
416
  class Utils:
417
  @staticmethod
418
  def organize_search_results(search_results):
419
  if not search_results or 'results' not in search_results:
420
  return ''
421
+
422
  results = search_results['results']
423
  formatted_results = []
424
+
425
  for index, result in enumerate(results):
426
  title = result.get('title', '未知标题')
427
  url = result.get('url', '#')
428
  preview = result.get('preview', '无预览内容')
429
+
430
  formatted_result = f"\r\n<details><summary>资料[{index}]: {title}</summary>\r\n{preview}\r\n\n[Link]({url})\r\n</details>"
431
  formatted_results.append(formatted_result)
432
+
433
  return '\n\n'.join(formatted_results)
434
+
435
  @staticmethod
436
  def create_auth_headers(model):
437
  return token_manager.get_next_token_for_model(model)
438
+
439
  @staticmethod
440
  def get_proxy_options():
441
  proxy = CONFIG["API"]["PROXY"]
442
  proxy_options = {}
443
+
444
  if proxy:
445
  logger.info(f"使用代理: {proxy}", "Server")
 
 
446
  if proxy.startswith("socks5://"):
447
+ proxy_options["proxy"] = proxy
448
+
449
+ if '@' in proxy:
450
+ auth_part = proxy.split('@')[0].split('://')[1]
451
+ if ':' in auth_part:
452
+ username, password = auth_part.split(':')
453
+ proxy_options["proxy_auth"] = (username, password)
454
+ else:
455
  proxy_options["proxies"] = {"https": proxy, "http": proxy}
456
+ print(proxy_options)
 
457
  return proxy_options
458
 
459
  class GrokApiClient:
 
461
  if model_id not in CONFIG["MODELS"]:
462
  raise ValueError(f"不支持的模型: {model_id}")
463
  self.model_id = CONFIG["MODELS"][model_id]
464
+
465
  def process_message_content(self, content):
466
  if isinstance(content, str):
467
  return content
468
  return None
469
+
470
  def get_image_type(self, base64_string):
471
  mime_type = 'image/jpeg'
472
  if 'data:image' in base64_string:
 
474
  matches = re.search(r'data:([a-zA-Z0-9]+\/[a-zA-Z0-9-.+]+);base64,', base64_string)
475
  if matches:
476
  mime_type = matches.group(1)
477
+
478
  extension = mime_type.split('/')[1]
479
  file_name = f"image.{extension}"
480
+
481
  return {
482
  "mimeType": mime_type,
483
  "fileName": file_name
484
  }
485
+
486
  def upload_base64_image(self, base64_data, url):
487
  try:
488
  if 'data:image' in base64_data:
489
  image_buffer = base64_data.split(',')[1]
490
  else:
491
  image_buffer = base64_data
492
+
493
  image_info = self.get_image_type(base64_data)
494
  mime_type = image_info["mimeType"]
495
  file_name = image_info["fileName"]
496
+
497
  upload_data = {
498
  "rpc": "uploadFile",
499
  "req": {
 
502
  "content": image_buffer
503
  }
504
  }
505
+
506
  logger.info("发送图片请求", "Server")
507
+
508
  proxy_options = Utils.get_proxy_options()
509
  response = curl_requests.post(
510
  url,
 
513
  "Cookie": CONFIG["API"]["SIGNATURE_COOKIE"]
514
  },
515
  json=upload_data,
516
+ impersonate="chrome133a",
517
  **proxy_options
518
  )
519
+
520
  if response.status_code != 200:
521
  logger.error(f"上传图片失败,状态码:{response.status_code}", "Server")
522
  return ''
523
+
524
  result = response.json()
525
  logger.info(f"上传图片成功: {result}", "Server")
526
  return result.get("fileMetadataId", "")
527
+
528
  except Exception as error:
529
  logger.error(str(error), "Server")
530
  return ''
531
+
532
  def prepare_chat_request(self, request):
533
+ if ((request["model"] == 'grok-2-imageGen' or request["model"] == 'grok-3-imageGen') and
534
+ not CONFIG["API"]["PICGO_KEY"] and not CONFIG["API"]["TUMY_KEY"] and
535
  request.get("stream", False)):
536
  raise ValueError("该模型流式输出需要配置PICGO或者TUMY图床密钥!")
537
+
538
  todo_messages = request["messages"]
539
  if request["model"] in ['grok-2-imageGen', 'grok-3-imageGen', 'grok-3-deepsearch']:
540
  last_message = todo_messages[-1]
541
  if last_message["role"] != 'user':
542
  raise ValueError('此模型最后一条消息必须是用户消息!')
543
  todo_messages = [last_message]
544
+
545
  file_attachments = []
546
  messages = ''
547
  last_role = None
548
  last_content = ''
549
  search = request["model"] in ['grok-2-search', 'grok-3-search']
550
+
551
  # 移除<think>标签及其内容和base64图片
552
  def remove_think_tags(text):
553
  import re
554
  text = re.sub(r'<think>[\s\S]*?<\/think>', '', text).strip()
555
  text = re.sub(r'!\[image\]\(data:.*?base64,.*?\)', '[图片]', text)
556
  return text
557
+
558
  def process_content(content):
559
  if isinstance(content, list):
560
  text_content = ''
 
570
  elif content["type"] == 'text':
571
  return remove_think_tags(content["text"])
572
  return remove_think_tags(self.process_message_content(content))
573
+
574
  for current in todo_messages:
575
  role = 'assistant' if current["role"] == 'assistant' else 'user'
576
  is_last_message = current == todo_messages[-1]
577
+
578
  if is_last_message and "content" in current:
579
  if isinstance(current["content"], list):
580
  for item in current["content"]:
 
592
  )
593
  if processed_image:
594
  file_attachments.append(processed_image)
595
+
596
 
597
  text_content = process_content(current.get("content", ""))
598
+
599
  if text_content or (is_last_message and file_attachments):
600
  if role == last_role and text_content:
601
  last_content += '\n' + text_content
 
604
  messages += f"{role.upper()}: {text_content or '[图片]'}\n"
605
  last_content = text_content
606
  last_role = role
607
+
608
  return {
609
  "temporary": CONFIG["API"].get("IS_TEMP_CONVERSATION", False),
610
  "modelName": self.model_id,
 
642
  "created": int(time.time()),
643
  "model": model
644
  }
645
+
646
  if is_stream:
647
  return {
648
  **base_response,
 
654
  }
655
  }]
656
  }
657
+
658
  return {
659
  **base_response,
660
  "object": "chat.completion",
 
671
 
672
  def process_model_response(response, model):
673
  result = {"token": None, "imageUrl": None}
674
+
675
  if CONFIG["IS_IMG_GEN"]:
676
  if response.get("cachedImageGenerationResponse") and not CONFIG["IS_IMG_GEN2"]:
677
  result["imageUrl"] = response["cachedImageGenerationResponse"]["imageUrl"]
678
  return result
679
+
680
  if model == 'grok-2':
681
  result["token"] = response.get("token")
682
  elif model in ['grok-2-search', 'grok-3-search']:
 
696
  result["token"] = "</think>" + response.get("token", "")
697
  CONFIG["IS_THINKING"] = False
698
  elif (response.get("messageStepId") and CONFIG["IS_THINKING"] and response.get("messageTag") == "assistant") or response.get("messageTag") == "final":
699
+ result["token"] = response.get("token")
700
  elif model == 'grok-3-reasoning':
701
  if response.get("isThinking") and not CONFIG["SHOW_THINKING"]:
702
  return result
703
+
704
  if response.get("isThinking") and not CONFIG["IS_THINKING"]:
705
  result["token"] = "<think>" + response.get("token", "")
706
  CONFIG["IS_THINKING"] = True
 
709
  CONFIG["IS_THINKING"] = False
710
  else:
711
  result["token"] = response.get("token")
712
+
713
  return result
714
 
715
  def handle_image_response(image_url):
716
  max_retries = 2
717
  retry_count = 0
718
  image_base64_response = None
719
+
720
  while retry_count < max_retries:
721
  try:
722
  proxy_options = Utils.get_proxy_options()
 
729
  impersonate="chrome120",
730
  **proxy_options
731
  )
732
+
733
  if image_base64_response.status_code == 200:
734
  break
735
+
736
  retry_count += 1
737
  if retry_count == max_retries:
738
  raise Exception(f"上游服务请求失败! status: {image_base64_response.status_code}")
739
+
740
  time.sleep(CONFIG["API"]["RETRY_TIME"] / 1000 * retry_count)
741
+
742
  except Exception as error:
743
  logger.error(str(error), "Server")
744
  retry_count += 1
745
  if retry_count == max_retries:
746
  raise
747
+
748
  time.sleep(CONFIG["API"]["RETRY_TIME"] / 1000 * retry_count)
749
+
750
  image_buffer = image_base64_response.content
751
+
752
  if not CONFIG["API"]["PICGO_KEY"] and not CONFIG["API"]["TUMY_KEY"]:
753
  base64_image = base64.b64encode(image_buffer).decode('utf-8')
754
  image_content_type = image_base64_response.headers.get('content-type', 'image/jpeg')
755
  return f"![image](data:{image_content_type};base64,{base64_image})"
756
+
757
  logger.info("开始上传图床", "Server")
758
+
759
  if CONFIG["API"]["PICGO_KEY"]:
760
  files = {'source': ('image.jpg', image_buffer, 'image/jpeg')}
761
  headers = {
762
  "X-API-Key": CONFIG["API"]["PICGO_KEY"]
763
  }
764
+
765
  response_url = requests.post(
766
  "https://www.picgo.net/api/1/upload",
767
  files=files,
768
  headers=headers
769
  )
770
+
771
  if response_url.status_code != 200:
772
  return "生图失败,请查看PICGO图床密钥是否设置正确"
773
  else:
774
  logger.info("生图成功", "Server")
775
  result = response_url.json()
776
  return f"![image]({result['image']['url']})"
777
+
778
 
779
  elif CONFIG["API"]["TUMY_KEY"]:
780
  files = {'file': ('image.jpg', image_buffer, 'image/jpeg')}
 
782
  "Accept": "application/json",
783
  'Authorization': f"Bearer {CONFIG['API']['TUMY_KEY']}"
784
  }
785
+
786
  response_url = requests.post(
787
  "https://tu.my/api/v1/upload",
788
  files=files,
789
  headers=headers
790
  )
791
+
792
  if response_url.status_code != 200:
793
  return "生图失败,请查看TUMY图床密钥是否设置正确"
794
  else:
 
800
  logger.error(str(error), "Server")
801
  return "生图失败,请查看TUMY图床密钥是否设置正确"
802
 
 
803
  def handle_non_stream_response(response, model):
804
  try:
805
+ logger.info("开始处理非流式响应", "Server")
806
+
807
+ stream = response.iter_lines()
808
  full_response = ""
809
 
810
  CONFIG["IS_THINKING"] = False
811
  CONFIG["IS_IMG_GEN"] = False
812
  CONFIG["IS_IMG_GEN2"] = False
813
+
814
+ for chunk in stream:
815
+ if not chunk:
 
 
816
  continue
 
817
  try:
818
+ line_json = json.loads(chunk.decode("utf-8").strip())
819
  if line_json.get("error"):
820
  logger.error(json.dumps(line_json, indent=2), "Server")
821
+ return json.dumps({"error": "RateLimitError"}) + "\n\n"
822
+
 
 
823
  response_data = line_json.get("result", {}).get("response")
824
  if not response_data:
825
  continue
826
+
827
  if response_data.get("doImgGen") or response_data.get("imageAttachmentInfo"):
828
  CONFIG["IS_IMG_GEN"] = True
829
+
830
  result = process_model_response(response_data, model)
831
+
832
  if result["token"]:
833
  full_response += result["token"]
834
+
835
  if result["imageUrl"]:
836
  CONFIG["IS_IMG_GEN2"] = True
837
  return handle_image_response(result["imageUrl"])
838
+
839
  except json.JSONDecodeError:
840
  continue
841
  except Exception as e:
842
+ logger.error(f"处理流式响应行时出错: {str(e)}", "Server")
843
  continue
844
+
845
  return full_response
 
846
  except Exception as error:
847
  logger.error(str(error), "Server")
848
  raise
 
849
  def handle_stream_response(response, model):
850
  def generate():
851
+ logger.info("开始处理流式响应", "Server")
852
 
853
+ stream = response.iter_lines()
854
  CONFIG["IS_THINKING"] = False
855
  CONFIG["IS_IMG_GEN"] = False
856
  CONFIG["IS_IMG_GEN2"] = False
 
857
 
858
  for chunk in stream:
859
  if not chunk:
860
  continue
861
  try:
862
+ line_json = json.loads(chunk.decode("utf-8").strip())
863
  if line_json.get("error"):
864
  logger.error(json.dumps(line_json, indent=2), "Server")
865
  yield json.dumps({"error": "RateLimitError"}) + "\n\n"
866
  return
867
+
868
  response_data = line_json.get("result", {}).get("response")
869
  if not response_data:
870
  continue
871
+
872
  if response_data.get("doImgGen") or response_data.get("imageAttachmentInfo"):
873
  CONFIG["IS_IMG_GEN"] = True
874
+
875
  result = process_model_response(response_data, model)
876
+
877
  if result["token"]:
878
  yield f"data: {json.dumps(MessageProcessor.create_chat_response(result['token'], model, True))}\n\n"
879
+
880
  if result["imageUrl"]:
881
  CONFIG["IS_IMG_GEN2"] = True
882
  image_data = handle_image_response(result["imageUrl"])
883
  yield f"data: {json.dumps(MessageProcessor.create_chat_response(image_data, model, True))}\n\n"
884
+
885
  except json.JSONDecodeError:
886
  continue
887
  except Exception as e:
888
  logger.error(f"处理流式响应行时出错: {str(e)}", "Server")
889
  continue
890
+
891
  yield "data: [DONE]\n\n"
892
  return generate()
893
 
 
895
  sso_array = os.environ.get("SSO", "").split(',')
896
  logger.info("开始加载令牌", "Server")
897
  for sso in sso_array:
898
+ if sso:
899
  token_manager.add_token(f"sso-rw={sso};sso={sso}")
900
+
901
  logger.info(f"成功加载令牌: {json.dumps(token_manager.get_all_tokens(), indent=2)}", "Server")
902
  logger.info(f"令牌加载完成,共加载: {len(token_manager.get_all_tokens())}个令牌", "Server")
903
+
904
  if CONFIG["API"]["PROXY"]:
905
  logger.info(f"代理已设置: {CONFIG['API']['PROXY']}", "Server")
906
+
907
  logger.info("初始化完成", "Server")
908
 
909
 
 
922
  return jsonify({"error": '自定义的SSO令牌模式无法获取轮询sso令牌状态'}), 403
923
  elif auth_token != CONFIG["API"]["API_KEY"]:
924
  return jsonify({"error": 'Unauthorized'}), 401
925
+
926
  return jsonify(token_manager.get_token_status_map())
927
 
928
  @app.route('/add/token', methods=['POST'])
 
932
  return jsonify({"error": '自定义的SSO令牌模式无法添加sso令牌'}), 403
933
  elif auth_token != CONFIG["API"]["API_KEY"]:
934
  return jsonify({"error": 'Unauthorized'}), 401
935
+
936
  try:
937
  sso = request.json.get('sso')
938
  token_manager.add_token(f"sso-rw={sso};sso={sso}")
 
948
  return jsonify({"error": '自定义的SSO令牌模式无法删除sso令牌'}), 403
949
  elif auth_token != CONFIG["API"]["API_KEY"]:
950
  return jsonify({"error": 'Unauthorized'}), 401
951
+
952
  try:
953
  sso = request.json.get('sso')
954
  token_manager.delete_token(f"sso-rw={sso};sso={sso}")
 
967
  "object": "model",
968
  "created": int(time.time()),
969
  "owned_by": "grok"
970
+ }
971
  for model in CONFIG["MODELS"].keys()
972
  ]
973
  })
 
975
  @app.route('/v1/chat/completions', methods=['POST'])
976
  def chat_completions():
977
  try:
978
+ auth_token = request.headers.get('Authorization',
979
+ '').replace('Bearer ', '')
980
  if auth_token:
981
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
982
  result = f"sso={auth_token};sso-rw={auth_token}"
 
985
  return jsonify({"error": 'Unauthorized'}), 401
986
  else:
987
  return jsonify({"error": 'API_KEY缺失'}), 401
988
+
989
  data = request.json
990
  model = data.get("model")
991
  stream = data.get("stream", False)
992
+
993
  retry_count = 0
994
  grok_client = GrokApiClient(model)
995
  request_payload = grok_client.prepare_chat_request(data)
996
+ response_status_code = 500
997
+
998
  while retry_count < CONFIG["RETRY"]["MAX_ATTEMPTS"]:
999
  retry_count += 1
1000
+ CONFIG["API"]["SIGNATURE_COOKIE"] = Utils.create_auth_headers(
1001
+ model)
1002
+
1003
  if not CONFIG["API"]["SIGNATURE_COOKIE"]:
1004
  raise ValueError('该模型无可用令牌')
1005
+
1006
+ logger.info(
1007
+ f"当前令牌: {json.dumps(CONFIG['API']['SIGNATURE_COOKIE'], indent=2)}",
1008
+ "Server")
1009
+ logger.info(
1010
+ f"当前可用模型的全部可用数量: {json.dumps(token_manager.get_remaining_token_request_capacity(), indent=2)}",
1011
+ "Server")
1012
+
1013
  try:
1014
  proxy_options = Utils.get_proxy_options()
1015
  response = curl_requests.post(
1016
  f"{CONFIG['API']['BASE_URL']}/rest/app-chat/conversations/new",
1017
  headers={
1018
+ **DEFAULT_HEADERS, "Cookie":
1019
+ CONFIG["API"]["SIGNATURE_COOKIE"]
 
 
 
1020
  },
1021
  data=json.dumps(request_payload),
1022
+ impersonate="chrome133a",
1023
  stream=True,
1024
+ **proxy_options)
 
 
1025
  if response.status_code == 200:
1026
+ response_status_code = 200
1027
  logger.info("请求成功", "Server")
1028
+ logger.info(
1029
+ f"当前{model}剩余可用令牌数: {token_manager.get_token_count_for_model(model)}",
1030
+ "Server")
1031
+
1032
  try:
1033
  if stream:
1034
+ return Response(stream_with_context(
1035
+ handle_stream_response(response, model)),
1036
+ content_type='text/event-stream')
 
1037
  else:
1038
+ content = handle_non_stream_response(
1039
+ response, model)
1040
+ return jsonify(
1041
+ MessageProcessor.create_chat_response(
1042
+ content, model))
1043
+
1044
  except Exception as error:
1045
  logger.error(str(error), "Server")
1046
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1047
  raise ValueError(f"自定义SSO令牌当前模型{model}的请求次数已失效")
1048
+
1049
+ token_manager.remove_token_from_model(
1050
+ model, CONFIG["API"]["SIGNATURE_COOKIE"])
1051
  if token_manager.get_token_count_for_model(model) == 0:
1052
  raise ValueError(f"{model} 次数已达上限,请切换其他模型或者重新对话")
1053
+ elif response.status_code == 403:
1054
+ response_status_code = 403
1055
+ token_manager.reduce_token_request_count(model,1)#重置去除当前因为错误未成功请求的次数,确保不会因为错误未成功请求的次数导致次数上限
1056
+ if token_manager.get_token_count_for_model(model) == 0:
1057
+ raise ValueError(f"{model} 次数已达上限,请切换其他模型或者重新对话")
1058
+ raise ValueError(f"IP暂时被封黑无法破盾,请稍后重试或者更换ip")
1059
  elif response.status_code == 429:
1060
+ response_status_code = 429
1061
+ token_manager.reduce_token_request_count(model,1)
1062
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1063
  raise ValueError(f"自定义SSO令牌当前模型{model}的请求次数已失效")
1064
+
1065
+ token_manager.remove_token_from_model(
1066
+ model, CONFIG["API"]["SIGNATURE_COOKIE"])
1067
  if token_manager.get_token_count_for_model(model) == 0:
1068
  raise ValueError(f"{model} 次数已达上限,请切换其他模型或者重新对话")
1069
+
1070
  else:
1071
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1072
  raise ValueError(f"自定义SSO令牌当前模型{model}的请求次数已失效")
1073
+
1074
+ logger.error(f"令牌异常错误状态!status: {response.status_code}",
1075
+ "Server")
1076
+ token_manager.remove_token_from_model(
1077
+ model, CONFIG["API"]["SIGNATURE_COOKIE"])
1078
+ logger.info(
1079
+ f"当前{model}剩余可用令牌数: {token_manager.get_token_count_for_model(model)}",
1080
+ "Server")
1081
+
1082
  except Exception as e:
1083
  logger.error(f"请求处理异常: {str(e)}", "Server")
1084
  if CONFIG["API"]["IS_CUSTOM_SSO"]:
1085
  raise
1086
  continue
1087
+ if response_status_code == 403:
1088
+ raise ValueError('IP暂时被封黑无法破盾,请稍后重试或者更换ip')
1089
+ elif response_status_code == 500:
1090
+ raise ValueError('当前模型所有令牌暂无可用,请稍后重试')
1091
+
1092
  except Exception as error:
1093
  logger.error(str(error), "ChatAPI")
1094
+ return jsonify(
1095
+ {"error": {
1096
  "message": str(error),
1097
  "type": "server_error"
1098
+ }}), response_status_code
 
1099
 
1100
  @app.route('/', defaults={'path': ''})
1101
  @app.route('/<path:path>')
 
1105
  if __name__ == '__main__':
1106
  token_manager = AuthTokenManager()
1107
  initialization()
1108
+
1109
  app.run(
1110
  host='0.0.0.0',
1111
  port=CONFIG["SERVER"]["PORT"],