Niansuh commited on
Commit
5514c6a
·
verified ·
1 Parent(s): cb310c2

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +24 -106
api/utils.py CHANGED
@@ -3,7 +3,6 @@ import json
3
  import uuid
4
  import asyncio
5
  import random
6
- import string
7
  import os
8
  from typing import Any, Dict, Optional
9
 
@@ -12,7 +11,6 @@ from fastapi import HTTPException
12
  from api.config import (
13
  MODEL_MAPPING,
14
  get_headers_api_chat,
15
- get_headers_chat,
16
  BASE_URL,
17
  AGENT_MODE,
18
  TRENDING_AGENT_MODE,
@@ -25,16 +23,10 @@ from api.validate import getHid # Import the asynchronous getHid function
25
 
26
  logger = setup_logger(__name__)
27
 
28
- # Define the blocked message
29
  BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai"
30
-
31
- # Fetch the advertisement text from environment variable
32
  ADVERTISEMENT_TEXT = os.getenv("ADVERTISEMENT_TEXT", "")
33
 
34
- # Helper function to create chat completion data
35
- def create_chat_completion_data(
36
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
37
- ) -> Dict[str, Any]:
38
  return {
39
  "id": f"chatcmpl-{uuid.uuid4()}",
40
  "object": "chat.completion.chunk",
@@ -50,43 +42,19 @@ def create_chat_completion_data(
50
  "usage": None,
51
  }
52
 
53
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
54
  def message_to_dict(message, model_prefix: Optional[str] = None):
55
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
56
  if model_prefix:
57
  content = f"{model_prefix} {content}"
58
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
59
- # Ensure base64 images are always included for all models
60
- image_base64 = message.content[1]["image_url"]["url"]
61
- return {
62
- "role": message.role,
63
- "content": content,
64
- "data": {
65
- "imageBase64": image_base64,
66
- "fileText": "",
67
- "title": "snapshot",
68
- # Added imagesData field here
69
- "imagesData": [
70
- {
71
- "filePath": f"MultipleFiles/{uuid.uuid4().hex}.jpg",
72
- "contents": image_base64
73
- }
74
- ],
75
- },
76
- }
77
  return {"role": message.role, "content": content}
78
 
79
- # Function to strip model prefix from content if present
80
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
81
- """Remove the model prefix from the response content if present."""
82
  if model_prefix and content.startswith(model_prefix):
83
  logger.debug(f"Stripping prefix '{model_prefix}' from content.")
84
  return content[len(model_prefix):].strip()
85
  return content
86
 
87
- # Process streaming response with headers from config.py
88
  async def process_streaming_response(request: ChatRequest):
89
- # Generate a unique ID for this request
90
  request_id = f"chatcmpl-{uuid.uuid4()}"
91
  logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
92
 
@@ -94,24 +62,17 @@ async def process_streaming_response(request: ChatRequest):
94
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
95
  model_prefix = MODEL_PREFIXES.get(request.model, "")
96
 
97
- # Adjust headers_api_chat since referer_url is removed
98
  headers_api_chat = get_headers_api_chat(BASE_URL)
99
 
100
  if request.model == 'o1-preview':
101
  delay_seconds = random.randint(1, 60)
102
- logger.info(
103
- f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' "
104
- f"(Request ID: {request_id})"
105
- )
106
  await asyncio.sleep(delay_seconds)
107
 
108
- # Fetch the h-value for the 'validated' field
109
  h_value = await getHid()
110
  if not h_value:
111
  logger.error("Failed to retrieve h-value for validation.")
112
- raise HTTPException(
113
- status_code=500, detail="Validation failed due to missing h-value."
114
- )
115
 
116
  json_data = {
117
  "agentMode": agent_mode,
@@ -120,13 +81,11 @@ async def process_streaming_response(request: ChatRequest):
120
  "clickedForceWebSearch": False,
121
  "codeModelMode": True,
122
  "githubToken": None,
123
- "id": None, # Using request_id instead of chat_id
124
  "isChromeExt": False,
125
  "isMicMode": False,
126
  "maxTokens": request.max_tokens,
127
- "messages": [
128
- message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
129
- ],
130
  "mobileClient": False,
131
  "playgroundTemperature": request.temperature,
132
  "playgroundTopP": request.top_p,
@@ -135,24 +94,18 @@ async def process_streaming_response(request: ChatRequest):
135
  "userId": None,
136
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
137
  "userSystemPrompt": None,
138
- "validated": h_value, # Dynamically set the validated field
139
  "visitFromDelta": False,
140
  "webSearchModePrompt": False,
141
- "imageGenerationMode": False, # Added this line
142
  }
143
 
144
- response_content = "" # Variable to hold the full response content
145
- advertisement_added = False # Track if advertisement is added
146
 
147
  async with httpx.AsyncClient() as client:
148
  try:
149
- async with client.stream(
150
- "POST",
151
- f"{BASE_URL}/api/chat",
152
- headers=headers_api_chat,
153
- json=json_data,
154
- timeout=100,
155
- ) as response:
156
  response.raise_for_status()
157
 
158
  timestamp = int(datetime.now().timestamp())
@@ -162,44 +115,37 @@ async def process_streaming_response(request: ChatRequest):
162
  if content.startswith("$@$v=undefined-rv1$@$"):
163
  content = content[21:] # Remove unwanted prefix
164
 
165
- # Remove blocked message if present
166
  if BLOCKED_MESSAGE in content:
167
  logger.info(f"Blocked message detected in response for Request ID {request_id}.")
168
  content = content.replace(BLOCKED_MESSAGE, '').strip()
169
 
170
  if not content:
171
- continue # Skip if content is empty after removal
172
 
173
- # Clean up the content
174
  cleaned_content = strip_model_prefix(content, model_prefix)
175
 
176
- # Add the chunk to the full response content
177
  response_content += cleaned_content
178
 
179
- # Yield the cleaned chunk as part of the stream
180
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
181
 
182
- # After all chunks are processed, append the advertisement text at the end if it's not already included
183
  if ADVERTISEMENT_TEXT and not advertisement_added:
184
  response_content += "\n\n" + ADVERTISEMENT_TEXT
185
  advertisement_added = True
186
 
187
- # Now yield the final chunk with the advertisement text appended at the end
188
  yield f"data: {json.dumps(create_chat_completion_data(response_content, request.model, timestamp, 'stop'))}\n\n"
189
-
190
- # Add the final "done" marker
191
  yield "data: [DONE]\n\n"
192
 
193
  except httpx.HTTPStatusError as e:
194
  logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
195
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
196
  except httpx.RequestError as e:
197
- logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
198
  raise HTTPException(status_code=500, detail=str(e))
199
 
200
- # Process non-streaming response with headers from config.py
201
  async def process_non_streaming_response(request: ChatRequest):
202
- # Generate a unique ID for this request
203
  request_id = f"chatcmpl-{uuid.uuid4()}"
204
  logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
205
 
@@ -207,29 +153,17 @@ async def process_non_streaming_response(request: ChatRequest):
207
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
208
  model_prefix = MODEL_PREFIXES.get(request.model, "")
209
 
210
- # Adjust headers_api_chat and headers_chat since referer_url is removed
211
  headers_api_chat = get_headers_api_chat(BASE_URL)
212
- headers_chat = get_headers_chat(
213
- BASE_URL,
214
- next_action=str(uuid.uuid4()),
215
- next_router_state_tree=json.dumps([""]),
216
- )
217
 
218
  if request.model == 'o1-preview':
219
  delay_seconds = random.randint(20, 60)
220
- logger.info(
221
- f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' "
222
- f"(Request ID: {request_id})"
223
- )
224
  await asyncio.sleep(delay_seconds)
225
 
226
- # Fetch the h-value for the 'validated' field
227
  h_value = await getHid()
228
  if not h_value:
229
  logger.error("Failed to retrieve h-value for validation.")
230
- raise HTTPException(
231
- status_code=500, detail="Validation failed due to missing h-value."
232
- )
233
 
234
  json_data = {
235
  "agentMode": agent_mode,
@@ -238,13 +172,11 @@ async def process_non_streaming_response(request: ChatRequest):
238
  "clickedForceWebSearch": False,
239
  "codeModelMode": True,
240
  "githubToken": None,
241
- "id": None, # Using request_id instead of chat_id
242
  "isChromeExt": False,
243
  "isMicMode": False,
244
  "maxTokens": request.max_tokens,
245
- "messages": [
246
- message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
247
- ],
248
  "mobileClient": False,
249
  "playgroundTemperature": request.temperature,
250
  "playgroundTopP": request.top_p,
@@ -253,21 +185,16 @@ async def process_non_streaming_response(request: ChatRequest):
253
  "userId": None,
254
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
255
  "userSystemPrompt": None,
256
- "validated": h_value, # Dynamically set the validated field
257
  "visitFromDelta": False,
258
  "webSearchModePrompt": False,
259
- "imageGenerationMode": False, # Added this line
260
  }
261
 
262
  full_response = ""
263
  async with httpx.AsyncClient() as client:
264
  try:
265
- async with client.stream(
266
- method="POST",
267
- url=f"{BASE_URL}/api/chat",
268
- headers=headers_api_chat,
269
- json=json_data,
270
- ) as response:
271
  response.raise_for_status()
272
  async for chunk in response.aiter_text():
273
  full_response += chunk
@@ -275,28 +202,19 @@ async def process_non_streaming_response(request: ChatRequest):
275
  logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
276
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
277
  except httpx.RequestError as e:
278
- logger.error(
279
- f"Error occurred during request for Request ID {request_id}: {e}"
280
- )
281
  raise HTTPException(status_code=500, detail=str(e))
282
 
283
  if full_response.startswith("$@$v=undefined-rv1$@$"):
284
  full_response = full_response[21:]
285
 
286
- # Remove the blocked message if present
287
  if BLOCKED_MESSAGE in full_response:
288
- logger.info(
289
- f"Blocked message detected in response for Request ID {request_id}."
290
- )
291
  full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
292
  if not full_response:
293
- raise HTTPException(
294
- status_code=500, detail="Blocked message detected in response."
295
- )
296
 
297
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
298
 
299
- # Append the advertisement text only once at the end
300
  if ADVERTISEMENT_TEXT:
301
  cleaned_full_response += "\n\n" + ADVERTISEMENT_TEXT
302
 
 
3
  import uuid
4
  import asyncio
5
  import random
 
6
  import os
7
  from typing import Any, Dict, Optional
8
 
 
11
  from api.config import (
12
  MODEL_MAPPING,
13
  get_headers_api_chat,
 
14
  BASE_URL,
15
  AGENT_MODE,
16
  TRENDING_AGENT_MODE,
 
23
 
24
  logger = setup_logger(__name__)
25
 
 
26
  BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai"
 
 
27
  ADVERTISEMENT_TEXT = os.getenv("ADVERTISEMENT_TEXT", "")
28
 
29
+ def create_chat_completion_data(content: str, model: str, timestamp: int, finish_reason: Optional[str] = None) -> Dict[str, Any]:
 
 
 
30
  return {
31
  "id": f"chatcmpl-{uuid.uuid4()}",
32
  "object": "chat.completion.chunk",
 
42
  "usage": None,
43
  }
44
 
 
45
  def message_to_dict(message, model_prefix: Optional[str] = None):
46
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
47
  if model_prefix:
48
  content = f"{model_prefix} {content}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  return {"role": message.role, "content": content}
50
 
 
51
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
 
52
  if model_prefix and content.startswith(model_prefix):
53
  logger.debug(f"Stripping prefix '{model_prefix}' from content.")
54
  return content[len(model_prefix):].strip()
55
  return content
56
 
 
57
  async def process_streaming_response(request: ChatRequest):
 
58
  request_id = f"chatcmpl-{uuid.uuid4()}"
59
  logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
60
 
 
62
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
63
  model_prefix = MODEL_PREFIXES.get(request.model, "")
64
 
 
65
  headers_api_chat = get_headers_api_chat(BASE_URL)
66
 
67
  if request.model == 'o1-preview':
68
  delay_seconds = random.randint(1, 60)
69
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
 
 
 
70
  await asyncio.sleep(delay_seconds)
71
 
 
72
  h_value = await getHid()
73
  if not h_value:
74
  logger.error("Failed to retrieve h-value for validation.")
75
+ raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
 
 
76
 
77
  json_data = {
78
  "agentMode": agent_mode,
 
81
  "clickedForceWebSearch": False,
82
  "codeModelMode": True,
83
  "githubToken": None,
84
+ "id": request_id,
85
  "isChromeExt": False,
86
  "isMicMode": False,
87
  "maxTokens": request.max_tokens,
88
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
89
  "mobileClient": False,
90
  "playgroundTemperature": request.temperature,
91
  "playgroundTopP": request.top_p,
 
94
  "userId": None,
95
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
96
  "userSystemPrompt": None,
97
+ "validated": h_value,
98
  "visitFromDelta": False,
99
  "webSearchModePrompt": False,
100
+ "imageGenerationMode": False,
101
  }
102
 
103
+ response_content = ""
104
+ advertisement_added = False
105
 
106
  async with httpx.AsyncClient() as client:
107
  try:
108
+ async with client.stream("POST", f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data, timeout=100) as response:
 
 
 
 
 
 
109
  response.raise_for_status()
110
 
111
  timestamp = int(datetime.now().timestamp())
 
115
  if content.startswith("$@$v=undefined-rv1$@$"):
116
  content = content[21:] # Remove unwanted prefix
117
 
 
118
  if BLOCKED_MESSAGE in content:
119
  logger.info(f"Blocked message detected in response for Request ID {request_id}.")
120
  content = content.replace(BLOCKED_MESSAGE, '').strip()
121
 
122
  if not content:
123
+ continue # Skip empty content
124
 
 
125
  cleaned_content = strip_model_prefix(content, model_prefix)
126
 
127
+ # Add the chunk to the response content
128
  response_content += cleaned_content
129
 
130
+ # Yield each chunk of content
131
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
132
 
133
+ # Append advertisement at the end
134
  if ADVERTISEMENT_TEXT and not advertisement_added:
135
  response_content += "\n\n" + ADVERTISEMENT_TEXT
136
  advertisement_added = True
137
 
 
138
  yield f"data: {json.dumps(create_chat_completion_data(response_content, request.model, timestamp, 'stop'))}\n\n"
 
 
139
  yield "data: [DONE]\n\n"
140
 
141
  except httpx.HTTPStatusError as e:
142
  logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
143
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
144
  except httpx.RequestError as e:
145
+ logger.error(f"Request error occurred for Request ID {request_id}: {e}")
146
  raise HTTPException(status_code=500, detail=str(e))
147
 
 
148
  async def process_non_streaming_response(request: ChatRequest):
 
149
  request_id = f"chatcmpl-{uuid.uuid4()}"
150
  logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
151
 
 
153
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
154
  model_prefix = MODEL_PREFIXES.get(request.model, "")
155
 
 
156
  headers_api_chat = get_headers_api_chat(BASE_URL)
 
 
 
 
 
157
 
158
  if request.model == 'o1-preview':
159
  delay_seconds = random.randint(20, 60)
160
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
 
 
 
161
  await asyncio.sleep(delay_seconds)
162
 
 
163
  h_value = await getHid()
164
  if not h_value:
165
  logger.error("Failed to retrieve h-value for validation.")
166
+ raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
 
 
167
 
168
  json_data = {
169
  "agentMode": agent_mode,
 
172
  "clickedForceWebSearch": False,
173
  "codeModelMode": True,
174
  "githubToken": None,
175
+ "id": request_id,
176
  "isChromeExt": False,
177
  "isMicMode": False,
178
  "maxTokens": request.max_tokens,
179
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
180
  "mobileClient": False,
181
  "playgroundTemperature": request.temperature,
182
  "playgroundTopP": request.top_p,
 
185
  "userId": None,
186
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
187
  "userSystemPrompt": None,
188
+ "validated": h_value,
189
  "visitFromDelta": False,
190
  "webSearchModePrompt": False,
191
+ "imageGenerationMode": False,
192
  }
193
 
194
  full_response = ""
195
  async with httpx.AsyncClient() as client:
196
  try:
197
+ async with client.stream("POST", f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data) as response:
 
 
 
 
 
198
  response.raise_for_status()
199
  async for chunk in response.aiter_text():
200
  full_response += chunk
 
202
  logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
203
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
204
  except httpx.RequestError as e:
205
+ logger.error(f"Request error occurred for Request ID {request_id}: {e}")
 
 
206
  raise HTTPException(status_code=500, detail=str(e))
207
 
208
  if full_response.startswith("$@$v=undefined-rv1$@$"):
209
  full_response = full_response[21:]
210
 
 
211
  if BLOCKED_MESSAGE in full_response:
 
 
 
212
  full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
213
  if not full_response:
214
+ raise HTTPException(status_code=500, detail="Blocked message detected in response.")
 
 
215
 
216
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
217
 
 
218
  if ADVERTISEMENT_TEXT:
219
  cleaned_full_response += "\n\n" + ADVERTISEMENT_TEXT
220