Niansuh commited on
Commit
7b0af3f
·
verified ·
1 Parent(s): 9e790d4

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +85 -184
api/utils.py CHANGED
@@ -1,36 +1,25 @@
1
- # utils.py
2
-
3
  from datetime import datetime
 
4
  import json
5
  from typing import Any, Dict, Optional
 
6
 
7
  import httpx
8
- from fastapi import Depends, HTTPException
9
- from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
10
-
11
  from api import validate
12
- from api.config import (
13
- MODEL_MAPPING,
14
- headers,
15
- AGENT_MODE,
16
- TRENDING_AGENT_MODE,
17
- MODEL_PREFIXES,
18
- BASE_URL
19
- )
20
  from api.models import ChatRequest
21
 
22
  from api.logger import setup_logger
23
 
24
  logger = setup_logger(__name__)
25
 
26
- # Initialize the HTTPBearer security scheme
27
- bearer_scheme = HTTPBearer()
28
-
29
-
30
  def create_chat_completion_data(
31
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
32
  ) -> Dict[str, Any]:
33
- """Create a dictionary representing a chat completion chunk."""
34
  return {
35
  "id": f"chatcmpl-{uuid.uuid4()}",
36
  "object": "chat.completion.chunk",
@@ -46,122 +35,58 @@ def create_chat_completion_data(
46
  "usage": None,
47
  }
48
 
49
-
50
- def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(bearer_scheme)):
51
- """Verify the APP_SECRET from the authorization credentials."""
52
  if credentials.credentials != APP_SECRET:
53
- logger.warning("Invalid APP_SECRET provided.")
54
  raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
55
- logger.debug("APP_SECRET verified successfully.")
56
  return credentials.credentials
57
 
58
-
59
  def message_to_dict(message):
60
- """
61
- Convert a message object to a dictionary suitable for the API request.
62
- Handles different content types gracefully.
63
- """
64
- message_dict = {"role": message.role}
65
-
66
  if isinstance(message.content, str):
67
- message_dict["content"] = message.content
68
- elif isinstance(message.content, list):
69
- # Handle list content more robustly
70
- try:
71
- if len(message.content) >= 2:
72
- # Assuming the first element has 'text' and the second has 'image_url'
73
- text_content = message.content[0].get("text", "")
74
- image_url = message.content[1].get("image_url", {}).get("url", "")
75
- message_dict["content"] = text_content
76
- message_dict["data"] = {
77
- "imageBase64": image_url,
78
- "fileText": "",
79
- "title": "snapshot",
80
- }
81
- else:
82
- # Fallback if the list doesn't have expected structure
83
- message_dict["content"] = json.dumps(message.content)
84
- except (AttributeError, KeyError, TypeError) as e:
85
- logger.error(f"Error parsing message content: {e}")
86
- message_dict["content"] = "Invalid message format."
87
  else:
88
- # Fallback for unexpected content types
89
- message_dict["content"] = str(message.content)
90
-
91
- return message_dict
92
-
93
-
94
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
95
- """Remove the model prefix from the response content if present."""
96
- if model_prefix and content.startswith(model_prefix):
97
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
98
- return content[len(model_prefix):].strip()
99
- return content
100
-
101
-
102
- def get_agent_mode_config(model: str) -> Dict[str, Any]:
103
- """
104
- Retrieve the agent mode configuration.
105
- Logs a warning if the configuration is missing.
106
- """
107
- agent_mode = AGENT_MODE.get(model)
108
- if not agent_mode:
109
- logger.warning(f"No AGENT_MODE configuration found for model: {model}")
110
- return agent_mode or {}
111
-
112
-
113
- def get_trending_agent_mode_config(model: str) -> Dict[str, Any]:
114
- """
115
- Retrieve the trending agent mode configuration.
116
- Logs a warning if the configuration is missing.
117
- """
118
- trending_agent_mode = TRENDING_AGENT_MODE.get(model)
119
- if not trending_agent_mode:
120
- logger.warning(f"No TRENDING_AGENT_MODE configuration found for model: {model}")
121
- return trending_agent_mode or {}
122
-
123
 
124
  async def process_streaming_response(request: ChatRequest):
125
- """Process a streaming response for a chat completion request."""
126
- # No chat_id generation
127
- # referer_url is not used without MODEL_REFERERS
128
 
129
- logger.info(f"Processing streaming request for model: '{request.model}'")
130
-
131
- agent_mode = get_agent_mode_config(request.model)
132
- trending_agent_mode = get_trending_agent_mode_config(request.model)
133
- model_prefix = MODEL_PREFIXES.get(request.model, "")
134
-
135
- # Use headers from config.py
136
- headers_api_chat = headers
137
-
138
- # Introduce delay for specific models if needed
139
- if request.model == 'o1-preview':
140
- delay_seconds = random.randint(1, 60)
141
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
142
- await asyncio.sleep(delay_seconds)
143
 
144
  json_data = {
145
- "agentMode": agent_mode,
146
- "clickedAnswer2": False,
147
- "clickedAnswer3": False,
148
- "clickedForceWebSearch": False,
149
- "codeModelMode": True,
150
- "githubToken": None,
151
- "isChromeExt": False,
152
- "isMicMode": False,
153
- "maxTokens": request.max_tokens,
154
  "messages": [message_to_dict(msg) for msg in request.messages],
155
- "mobileClient": False,
156
- "playgroundTemperature": request.temperature,
157
- "playgroundTopP": request.top_p,
158
  "previewToken": None,
159
- "trendingAgentMode": trending_agent_mode,
160
  "userId": None,
161
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
 
 
 
162
  "userSystemPrompt": None,
163
- "validated": validate.getHid(),
 
 
 
 
 
 
 
164
  "visitFromDelta": False,
 
 
 
165
  }
166
 
167
  async with httpx.AsyncClient() as client:
@@ -169,103 +94,79 @@ async def process_streaming_response(request: ChatRequest):
169
  async with client.stream(
170
  "POST",
171
  f"{BASE_URL}/api/chat",
172
- headers=headers_api_chat,
173
  json=json_data,
174
- timeout=100.0,
175
  ) as response:
176
  response.raise_for_status()
177
- timestamp = int(datetime.now().timestamp())
178
  async for line in response.aiter_lines():
 
179
  if line:
180
- content = line.strip() + "\n"
181
  if "https://www.blackbox.ai" in content:
182
  validate.getHid(True)
183
  content = "hid已刷新,重新对话即可\n"
184
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
185
  break
186
  if content.startswith("$@$v=undefined-rv1$@$"):
187
- content = content[21:]
188
- cleaned_content = strip_model_prefix(content, model_prefix)
189
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
190
 
191
- # Indicate the end of the stream
192
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
193
  yield "data: [DONE]\n\n"
194
  except httpx.HTTPStatusError as e:
195
- logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
196
- raise HTTPException(status_code=e.response.status_code, detail="Error from upstream service.")
197
  except httpx.RequestError as e:
198
- logger.error(f"Request error occurred: {e}")
199
- raise HTTPException(status_code=500, detail="Internal server error.")
200
- except Exception as e:
201
- logger.error(f"Unexpected error: {e}")
202
- raise HTTPException(status_code=500, detail="Internal server error.")
203
-
204
 
205
  async def process_non_streaming_response(request: ChatRequest):
206
- """Process a non-streaming response for a chat completion request."""
207
- logger.info(f"Processing non-streaming request for model: '{request.model}'")
208
-
209
- agent_mode = get_agent_mode_config(request.model)
210
- trending_agent_mode = get_trending_agent_mode_config(request.model)
211
- model_prefix = MODEL_PREFIXES.get(request.model, "")
212
 
213
- # Use headers from config.py
214
- headers_api_chat = headers
215
-
216
- # Introduce delay for specific models if needed
217
- if request.model == 'o1-preview':
218
- delay_seconds = random.randint(20, 60)
219
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
220
- await asyncio.sleep(delay_seconds)
221
 
222
  json_data = {
223
- "agentMode": agent_mode,
224
- "clickedAnswer2": False,
225
- "clickedAnswer3": False,
226
- "clickedForceWebSearch": False,
227
- "codeModelMode": True,
228
- "githubToken": None,
229
- "isChromeExt": False,
230
- "isMicMode": False,
231
- "maxTokens": request.max_tokens,
232
  "messages": [message_to_dict(msg) for msg in request.messages],
233
- "mobileClient": False,
234
- "playgroundTemperature": request.temperature,
235
- "playgroundTopP": request.top_p,
236
  "previewToken": None,
237
- "trendingAgentMode": trending_agent_mode,
238
  "userId": None,
239
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
 
 
 
240
  "userSystemPrompt": None,
241
- "validated": validate.getHid(),
 
 
 
 
 
 
 
242
  "visitFromDelta": False,
 
 
 
243
  }
244
 
245
  full_response = ""
246
  async with httpx.AsyncClient() as client:
247
- try:
248
- async with client.stream(
249
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
250
- ) as response:
251
- response.raise_for_status()
252
- async for chunk in response.aiter_text():
253
- full_response += chunk
254
- except httpx.HTTPStatusError as e:
255
- logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
256
- raise HTTPException(status_code=e.response.status_code, detail="Error from upstream service.")
257
- except httpx.RequestError as e:
258
- logger.error(f"Request error occurred: {e}")
259
- raise HTTPException(status_code=500, detail="Internal server error.")
260
- except Exception as e:
261
- logger.error(f"Unexpected error: {e}")
262
- raise HTTPException(status_code=500, detail="Internal server error.")
263
-
264
  if full_response.startswith("$@$v=undefined-rv1$@$"):
265
  full_response = full_response[21:]
266
-
267
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
268
-
269
  return {
270
  "id": f"chatcmpl-{uuid.uuid4()}",
271
  "object": "chat.completion",
@@ -274,9 +175,9 @@ async def process_non_streaming_response(request: ChatRequest):
274
  "choices": [
275
  {
276
  "index": 0,
277
- "message": {"role": "assistant", "content": cleaned_full_response},
278
  "finish_reason": "stop",
279
  }
280
  ],
281
  "usage": None,
282
- }
 
 
 
1
  from datetime import datetime
2
+ from http.client import HTTPException
3
  import json
4
  from typing import Any, Dict, Optional
5
+ import uuid
6
 
7
  import httpx
 
 
 
8
  from api import validate
9
+ from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE
10
+ from fastapi import Depends, security
11
+ from fastapi.security import HTTPAuthorizationCredentials
12
+
13
+ from api.config import APP_SECRET, BASE_URL
 
 
 
14
  from api.models import ChatRequest
15
 
16
  from api.logger import setup_logger
17
 
18
  logger = setup_logger(__name__)
19
 
 
 
 
 
20
  def create_chat_completion_data(
21
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
22
  ) -> Dict[str, Any]:
 
23
  return {
24
  "id": f"chatcmpl-{uuid.uuid4()}",
25
  "object": "chat.completion.chunk",
 
35
  "usage": None,
36
  }
37
 
38
+ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
 
 
39
  if credentials.credentials != APP_SECRET:
 
40
  raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
 
41
  return credentials.credentials
42
 
 
43
  def message_to_dict(message):
 
 
 
 
 
 
44
  if isinstance(message.content, str):
45
+ return {"role": message.role, "content": message.content}
46
+ elif isinstance(message.content, list) and len(message.content) == 2:
47
+ return {
48
+ "role": message.role,
49
+ "content": message.content[0]["text"],
50
+ "data": {
51
+ "imageBase64": message.content[1]["image_url"]["url"],
52
+ "fileText": "",
53
+ "title": "snapshot",
54
+ },
55
+ }
 
 
 
 
 
 
 
 
 
56
  else:
57
+ return {"role": message.role, "content": message.content}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
  async def process_streaming_response(request: ChatRequest):
60
+ agent_mode = AGENT_MODE.get(request.model, {})
61
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
62
 
63
+ # Log reduced information
64
+ logger.info(
65
+ f"Streaming request for model: '{request.model}', "
66
+ f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
67
+ )
 
 
 
 
 
 
 
 
 
68
 
69
  json_data = {
 
 
 
 
 
 
 
 
 
70
  "messages": [message_to_dict(msg) for msg in request.messages],
 
 
 
71
  "previewToken": None,
 
72
  "userId": None,
73
+ "codeModelMode": True,
74
+ "agentMode": agent_mode,
75
+ "trendingAgentMode": trending_agent_mode,
76
+ "isMicMode": False,
77
  "userSystemPrompt": None,
78
+ "maxTokens": request.max_tokens,
79
+ "playgroundTopP": request.top_p,
80
+ "playgroundTemperature": request.temperature,
81
+ "isChromeExt": False,
82
+ "githubToken": None,
83
+ "clickedAnswer2": False,
84
+ "clickedAnswer3": False,
85
+ "clickedForceWebSearch": False,
86
  "visitFromDelta": False,
87
+ "mobileClient": False,
88
+ "userSelectedModel": MODEL_MAPPING.get(request.model),
89
+ "validated": validate.getHid()
90
  }
91
 
92
  async with httpx.AsyncClient() as client:
 
94
  async with client.stream(
95
  "POST",
96
  f"{BASE_URL}/api/chat",
97
+ headers=headers,
98
  json=json_data,
99
+ timeout=100,
100
  ) as response:
101
  response.raise_for_status()
 
102
  async for line in response.aiter_lines():
103
+ timestamp = int(datetime.now().timestamp())
104
  if line:
105
+ content = line + "\n"
106
  if "https://www.blackbox.ai" in content:
107
  validate.getHid(True)
108
  content = "hid已刷新,重新对话即可\n"
109
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
110
  break
111
  if content.startswith("$@$v=undefined-rv1$@$"):
112
+ yield f"data: {json.dumps(create_chat_completion_data(content[21:], request.model, timestamp))}\n\n"
113
+ else:
114
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
115
 
 
116
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
117
  yield "data: [DONE]\n\n"
118
  except httpx.HTTPStatusError as e:
119
+ logger.error(f"HTTP error occurred: {e}")
120
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
121
  except httpx.RequestError as e:
122
+ logger.error(f"Error occurred during request: {e}")
123
+ raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
124
 
125
  async def process_non_streaming_response(request: ChatRequest):
126
+ agent_mode = AGENT_MODE.get(request.model, {})
127
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
 
 
 
128
 
129
+ # Log reduced information
130
+ logger.info(
131
+ f"Non-streaming request for model: '{request.model}', "
132
+ f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
133
+ )
 
 
 
134
 
135
  json_data = {
 
 
 
 
 
 
 
 
 
136
  "messages": [message_to_dict(msg) for msg in request.messages],
 
 
 
137
  "previewToken": None,
 
138
  "userId": None,
139
+ "codeModelMode": True,
140
+ "agentMode": agent_mode,
141
+ "trendingAgentMode": trending_agent_mode,
142
+ "isMicMode": False,
143
  "userSystemPrompt": None,
144
+ "maxTokens": request.max_tokens,
145
+ "playgroundTopP": request.top_p,
146
+ "playgroundTemperature": request.temperature,
147
+ "isChromeExt": False,
148
+ "githubToken": None,
149
+ "clickedAnswer2": False,
150
+ "clickedAnswer3": False,
151
+ "clickedForceWebSearch": False,
152
  "visitFromDelta": False,
153
+ "mobileClient": False,
154
+ "userSelectedModel": MODEL_MAPPING.get(request.model),
155
+ "validated": validate.getHid()
156
  }
157
 
158
  full_response = ""
159
  async with httpx.AsyncClient() as client:
160
+ async with client.stream(
161
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
162
+ ) as response:
163
+ async for chunk in response.aiter_text():
164
+ full_response += chunk
165
+ if "https://www.blackbox.ai" in full_response:
166
+ validate.getHid(True)
167
+ full_response = "hid已刷新,重新对话即可"
 
 
 
 
 
 
 
 
 
168
  if full_response.startswith("$@$v=undefined-rv1$@$"):
169
  full_response = full_response[21:]
 
 
 
170
  return {
171
  "id": f"chatcmpl-{uuid.uuid4()}",
172
  "object": "chat.completion",
 
175
  "choices": [
176
  {
177
  "index": 0,
178
+ "message": {"role": "assistant", "content": full_response},
179
  "finish_reason": "stop",
180
  }
181
  ],
182
  "usage": None,
183
+ }