Niansuh commited on
Commit
69e4e8b
·
verified ·
1 Parent(s): 849a7bc

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +138 -78
api/utils.py CHANGED
@@ -1,24 +1,35 @@
1
  from datetime import datetime
2
- from http.client import HTTPException
3
  import json
4
- from typing import Any, Dict, Optional
5
  import uuid
 
 
 
 
6
 
7
  import httpx
8
- from api import validate
9
- from api.config import MODEL_MAPPING, headers
10
- from fastapi import Depends, security
11
- from fastapi.security import HTTPAuthorizationCredentials
12
-
13
- from api.config import APP_SECRET, BASE_URL
14
- from api.config import MODEL_MAPPING, headers, agentMode, trendingAgentMode
 
 
 
 
 
15
  from api.models import ChatRequest
16
-
17
  from api.logger import setup_logger
18
 
19
  logger = setup_logger(__name__)
20
 
 
 
 
 
21
 
 
22
  def create_chat_completion_data(
23
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
24
  ) -> Dict[str, Any]:
@@ -37,52 +48,79 @@ def create_chat_completion_data(
37
  "usage": None,
38
  }
39
 
40
-
41
- def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
42
- if credentials.credentials != APP_SECRET:
43
- raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
44
- return credentials.credentials
45
-
46
-
47
- def message_to_dict(message):
48
- if isinstance(message.content, str):
49
- return {"role": message.role, "content": message.content}
50
- elif isinstance(message.content, list) and len(message.content) == 2:
51
  return {
52
  "role": message.role,
53
- "content": message.content[0]["text"],
54
  "data": {
55
  "imageBase64": message.content[1]["image_url"]["url"],
56
  "fileText": "",
57
- "title": "snapshoot",
58
  },
59
  }
60
- else:
61
- return {"role": message.role, "content": message.content}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- async def process_streaming_response(request: ChatRequest):
65
  json_data = {
66
- "messages": [message_to_dict(msg) for msg in request.messages],
67
- "previewToken": None,
68
- "userId": None,
 
69
  "codeModelMode": True,
70
- "agentMode": agentMode,
71
- "trendingAgentMode": trendingAgentMode,
 
72
  "isMicMode": False,
73
- "userSystemPrompt": None,
74
  "maxTokens": request.max_tokens,
75
- "playgroundTopP": request.top_p,
 
76
  "playgroundTemperature": request.temperature,
77
- "isChromeExt": False,
78
- "githubToken": None,
79
- "clickedAnswer2": False,
80
- "clickedAnswer3": False,
81
- "clickedForceWebSearch": False,
 
 
82
  "visitFromDelta": False,
83
- "mobileClient": False,
84
- "userSelectedModel": MODEL_MAPPING.get(request.model),
85
- "validated": validate.getHid()
86
  }
87
 
88
  async with httpx.AsyncClient() as client:
@@ -90,7 +128,7 @@ async def process_streaming_response(request: ChatRequest):
90
  async with client.stream(
91
  "POST",
92
  f"{BASE_URL}/api/chat",
93
- headers=headers,
94
  json=json_data,
95
  timeout=100,
96
  ) as response:
@@ -98,62 +136,84 @@ async def process_streaming_response(request: ChatRequest):
98
  async for line in response.aiter_lines():
99
  timestamp = int(datetime.now().timestamp())
100
  if line:
101
- content = line + "\n"
102
- if "https://www.blackbox.ai" in content:
103
- validate.getHid(True)
104
- content = "hid已刷新,重新对话即可\n"
105
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
106
- break
107
  if content.startswith("$@$v=undefined-rv1$@$"):
108
- yield f"data: {json.dumps(create_chat_completion_data(content[21:], request.model, timestamp))}\n\n"
109
- else:
110
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
111
 
112
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
113
  yield "data: [DONE]\n\n"
114
  except httpx.HTTPStatusError as e:
115
- logger.error(f"HTTP error occurred: {e}")
116
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
117
  except httpx.RequestError as e:
118
- logger.error(f"Error occurred during request: {e}")
119
  raise HTTPException(status_code=500, detail=str(e))
120
 
121
-
122
  async def process_non_streaming_response(request: ChatRequest):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  json_data = {
124
- "messages": [message_to_dict(msg) for msg in request.messages],
125
- "previewToken": None,
126
- "userId": None,
 
127
  "codeModelMode": True,
128
- "agentMode": agentMode,
129
- "trendingAgentMode": trendingAgentMode,
 
130
  "isMicMode": False,
131
- "userSystemPrompt": None,
132
  "maxTokens": request.max_tokens,
133
- "playgroundTopP": request.top_p,
 
134
  "playgroundTemperature": request.temperature,
135
- "isChromeExt": False,
136
- "githubToken": None,
137
- "clickedAnswer2": False,
138
- "clickedAnswer3": False,
139
- "clickedForceWebSearch": False,
 
 
140
  "visitFromDelta": False,
141
- "mobileClient": False,
142
- "userSelectedModel": MODEL_MAPPING.get(request.model),
143
- "validated": validate.getHid()
144
  }
 
145
  full_response = ""
146
  async with httpx.AsyncClient() as client:
147
- async with client.stream(
148
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
149
- ) as response:
150
- async for chunk in response.aiter_text():
151
- full_response += chunk
152
- if "https://www.blackbox.ai" in full_response:
153
- validate.getHid(True)
154
- full_response = "hid已刷新,重新对话即可"
 
 
 
 
 
155
  if full_response.startswith("$@$v=undefined-rv1$@$"):
156
  full_response = full_response[21:]
 
 
 
157
  return {
158
  "id": f"chatcmpl-{uuid.uuid4()}",
159
  "object": "chat.completion",
@@ -162,7 +222,7 @@ async def process_non_streaming_response(request: ChatRequest):
162
  "choices": [
163
  {
164
  "index": 0,
165
- "message": {"role": "assistant", "content": full_response},
166
  "finish_reason": "stop",
167
  }
168
  ],
 
1
  from datetime import datetime
 
2
  import json
 
3
  import uuid
4
+ import asyncio
5
+ import random
6
+ import string
7
+ from typing import Any, Dict, Optional
8
 
9
  import httpx
10
+ from fastapi import HTTPException
11
+ from api import validate # Import validate to use getHid
12
+ from api.config import (
13
+ MODEL_MAPPING,
14
+ get_headers_api_chat,
15
+ get_headers_chat,
16
+ BASE_URL,
17
+ AGENT_MODE,
18
+ TRENDING_AGENT_MODE,
19
+ MODEL_PREFIXES,
20
+ MODEL_REFERERS
21
+ )
22
  from api.models import ChatRequest
 
23
  from api.logger import setup_logger
24
 
25
  logger = setup_logger(__name__)
26
 
27
+ # Helper function to create a random alphanumeric chat ID
28
+ def generate_chat_id(length: int = 7) -> str:
29
+ characters = string.ascii_letters + string.digits
30
+ return ''.join(random.choices(characters, k=length))
31
 
32
+ # Helper function to create chat completion data
33
  def create_chat_completion_data(
34
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
35
  ) -> Dict[str, Any]:
 
48
  "usage": None,
49
  }
50
 
51
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
52
+ def message_to_dict(message, model_prefix: Optional[str] = None):
53
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
54
+ if model_prefix:
55
+ content = f"{model_prefix} {content}"
56
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
57
+ # Ensure base64 images are always included for all models
 
 
 
 
58
  return {
59
  "role": message.role,
60
+ "content": content,
61
  "data": {
62
  "imageBase64": message.content[1]["image_url"]["url"],
63
  "fileText": "",
64
+ "title": "snapshot",
65
  },
66
  }
67
+ return {"role": message.role, "content": content}
68
+
69
+ # Function to strip model prefix from content if present
70
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
71
+ """Remove the model prefix from the response content if present."""
72
+ if model_prefix and content.startswith(model_prefix):
73
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
74
+ return content[len(model_prefix):].strip()
75
+ return content
76
+
77
+ # Function to get the correct referer URL for logging
78
+ def get_referer_url(chat_id: str, model: str) -> str:
79
+ """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
80
+ if model in MODEL_REFERERS:
81
+ return f"{BASE_URL}/chat/{chat_id}?model={model}"
82
+ return BASE_URL
83
+
84
+ # Process streaming response with headers from config.py
85
+ async def process_streaming_response(request: ChatRequest):
86
+ chat_id = generate_chat_id()
87
+ referer_url = get_referer_url(chat_id, request.model)
88
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
89
 
90
+ agent_mode = AGENT_MODE.get(request.model, {})
91
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
92
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
93
+
94
+ headers_api_chat = get_headers_api_chat(referer_url)
95
+ validated_token = validate.getHid() # Get the validated token from validate.py
96
+
97
+ if request.model == 'o1-preview':
98
+ delay_seconds = random.randint(1, 60)
99
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
100
+ await asyncio.sleep(delay_seconds)
101
 
 
102
  json_data = {
103
+ "agentMode": agent_mode,
104
+ "clickedAnswer2": False,
105
+ "clickedAnswer3": False,
106
+ "clickedForceWebSearch": False,
107
  "codeModelMode": True,
108
+ "githubToken": None,
109
+ "id": chat_id,
110
+ "isChromeExt": False,
111
  "isMicMode": False,
 
112
  "maxTokens": request.max_tokens,
113
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
114
+ "mobileClient": False,
115
  "playgroundTemperature": request.temperature,
116
+ "playgroundTopP": request.top_p,
117
+ "previewToken": None,
118
+ "trendingAgentMode": trending_agent_mode,
119
+ "userId": None,
120
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
121
+ "userSystemPrompt": None,
122
+ "validated": validated_token,
123
  "visitFromDelta": False,
 
 
 
124
  }
125
 
126
  async with httpx.AsyncClient() as client:
 
128
  async with client.stream(
129
  "POST",
130
  f"{BASE_URL}/api/chat",
131
+ headers=headers_api_chat,
132
  json=json_data,
133
  timeout=100,
134
  ) as response:
 
136
  async for line in response.aiter_lines():
137
  timestamp = int(datetime.now().timestamp())
138
  if line:
139
+ content = line
 
 
 
 
 
140
  if content.startswith("$@$v=undefined-rv1$@$"):
141
+ content = content[21:]
142
+ cleaned_content = strip_model_prefix(content, model_prefix)
143
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
144
 
145
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
146
  yield "data: [DONE]\n\n"
147
  except httpx.HTTPStatusError as e:
148
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
149
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
150
  except httpx.RequestError as e:
151
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
152
  raise HTTPException(status_code=500, detail=str(e))
153
 
154
+ # Process non-streaming response with headers from config.py
155
  async def process_non_streaming_response(request: ChatRequest):
156
+ chat_id = generate_chat_id()
157
+ referer_url = get_referer_url(chat_id, request.model)
158
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
159
+
160
+ agent_mode = AGENT_MODE.get(request.model, {})
161
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
162
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
163
+
164
+ headers_api_chat = get_headers_api_chat(referer_url)
165
+ headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
166
+ validated_token = validate.getHid() # Get the validated token from validate.py
167
+
168
+ if request.model == 'o1-preview':
169
+ delay_seconds = random.randint(20, 60)
170
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
171
+ await asyncio.sleep(delay_seconds)
172
+
173
  json_data = {
174
+ "agentMode": agent_mode,
175
+ "clickedAnswer2": False,
176
+ "clickedAnswer3": False,
177
+ "clickedForceWebSearch": False,
178
  "codeModelMode": True,
179
+ "githubToken": None,
180
+ "id": chat_id,
181
+ "isChromeExt": False,
182
  "isMicMode": False,
 
183
  "maxTokens": request.max_tokens,
184
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
185
+ "mobileClient": False,
186
  "playgroundTemperature": request.temperature,
187
+ "playgroundTopP": request.top_p,
188
+ "previewToken": None,
189
+ "trendingAgentMode": trending_agent_mode,
190
+ "userId": None,
191
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
192
+ "userSystemPrompt": None,
193
+ "validated": validated_token,
194
  "visitFromDelta": False,
 
 
 
195
  }
196
+
197
  full_response = ""
198
  async with httpx.AsyncClient() as client:
199
+ try:
200
+ async with client.stream(
201
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
202
+ ) as response:
203
+ response.raise_for_status()
204
+ async for chunk in response.aiter_text():
205
+ full_response += chunk
206
+ except httpx.HTTPStatusError as e:
207
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
208
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
209
+ except httpx.RequestError as e:
210
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
211
+ raise HTTPException(status_code=500, detail=str(e))
212
  if full_response.startswith("$@$v=undefined-rv1$@$"):
213
  full_response = full_response[21:]
214
+
215
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
216
+
217
  return {
218
  "id": f"chatcmpl-{uuid.uuid4()}",
219
  "object": "chat.completion",
 
222
  "choices": [
223
  {
224
  "index": 0,
225
+ "message": {"role": "assistant", "content": cleaned_full_response},
226
  "finish_reason": "stop",
227
  }
228
  ],