rkihacker commited on
Commit
27a18c4
·
verified ·
1 Parent(s): cb78194

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +16 -102
api/utils.py CHANGED
@@ -4,7 +4,7 @@ import uuid
4
  import asyncio
5
  import random
6
  import string
7
- from typing import Any, Dict, Optional, List
8
 
9
  import httpx
10
  from fastapi import HTTPException
@@ -23,57 +23,11 @@ from api.logger import setup_logger
23
  from api.validate import getHid # Import the asynchronous getHid function
24
  import tiktoken
25
 
26
- # ---------------- NEW IMPORTS FOR CLOUDFLARE R2 UPLOAD ----------------
27
- import boto3
28
- import re
29
-
30
  logger = setup_logger(__name__)
31
 
32
  # Define the blocked message
33
  BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai"
34
 
35
- # ---------------- R2 CONFIG EXAMPLE ----------------
36
- R2_ACCESS_KEY_ID = "df9c9eb87e850a8eb27afd3968077b42"
37
- R2_SECRET_ACCESS_KEY = "14b08b0855263bb63d2618da3a6537e1b0446d89d51da03a568620b1e5342ea8"
38
- R2_ENDPOINT_URL = "https://f2f92ac53fae792c4155f6e93a514989.r2.cloudflarestorage.com"
39
- R2_BUCKET_NAME = "snapzion"
40
- R2_REPLACED_URLS_KEY_PREFIX = "nai" # or however you want to name your uploads
41
-
42
- # Initialize your R2 client
43
- s3 = boto3.client(
44
- "s3",
45
- endpoint_url=R2_ENDPOINT_URL,
46
- aws_access_key_id=R2_ACCESS_KEY_ID,
47
- aws_secret_access_key=R2_SECRET_ACCESS_KEY,
48
- )
49
-
50
- # Function to upload replaced URLs into R2 as a text file
51
- def upload_replaced_urls_to_r2(urls: List[str], request_id: str) -> None:
52
- """
53
- Given a list of replaced URLs, upload them as a .txt file to your R2 bucket.
54
- The file name includes the request_id so it's unique for each request.
55
- """
56
- if not urls:
57
- logger.info(f"No replaced URLs found for request {request_id}. Skipping upload.")
58
- return
59
-
60
- # Join all replaced URLs with a newline
61
- content_body = "\n".join(urls)
62
-
63
- # Create an object key, for example: replaced-urls/chatcmpl-<uuid>.txt
64
- object_key = f"{R2_REPLACED_URLS_KEY_PREFIX}/{request_id}.txt"
65
-
66
- try:
67
- s3.put_object(
68
- Bucket=R2_BUCKET_NAME,
69
- Key=object_key,
70
- Body=content_body.encode("utf-8"),
71
- )
72
- logger.info(f"Uploaded replaced URLs to R2: {object_key}")
73
- except Exception as e:
74
- logger.error(f"Failed to upload replaced URLs to R2: {e}")
75
-
76
-
77
  # Function to calculate tokens using tiktoken
78
  def calculate_tokens(text: str, model: str) -> int:
79
  try:
@@ -146,8 +100,7 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
146
  return content[len(model_prefix):].strip()
147
  return content
148
 
149
-
150
- # --------------------- PROCESS STREAMING RESPONSE ---------------------
151
  async def process_streaming_response(request: ChatRequest):
152
  # Generate a unique ID for this request
153
  request_id = f"chatcmpl-{uuid.uuid4()}"
@@ -170,7 +123,7 @@ async def process_streaming_response(request: ChatRequest):
170
  await asyncio.sleep(delay_seconds)
171
 
172
  # Fetch the h-value for the 'validated' field
173
- h_value = "00f37b34-a166-4efb-bce5-1312d87f2f94"
174
  if not h_value:
175
  logger.error("Failed to retrieve h-value for validation.")
176
  raise HTTPException(
@@ -215,10 +168,6 @@ async def process_streaming_response(request: ChatRequest):
215
  prompt_tokens += calculate_tokens(message['data']['imageBase64'], request.model)
216
 
217
  completion_tokens = 0
218
-
219
- # We'll keep track of any replaced URLs in this list
220
- replaced_urls = []
221
-
222
  async with httpx.AsyncClient() as client:
223
  try:
224
  async with client.stream(
@@ -232,39 +181,23 @@ async def process_streaming_response(request: ChatRequest):
232
  async for chunk in response.aiter_text():
233
  timestamp = int(datetime.now().timestamp())
234
  if chunk:
235
- # First handle any prefix from the server, if present
236
- if chunk.startswith("$@$v=undefined-rv1$@$"):
237
- chunk = chunk[21:]
238
-
239
  # Remove the blocked message if present
240
- if BLOCKED_MESSAGE in chunk:
241
  logger.info(
242
  f"Blocked message detected in response for Request ID {request_id}."
243
  )
244
- chunk = chunk.replace(BLOCKED_MESSAGE, '').strip()
245
- if not chunk:
246
  continue # Skip if content is empty after removal
247
-
248
- # ---------------- REPLACE STORAGE URLS ----------------
249
- # We'll search for "https://storage.googleapis.com" and replace
250
- # with "https://cdn.snapzion.com"
251
- # Also collect them for uploading to R2
252
- found_urls = re.findall(r"https://storage\.googleapis\.com\S*", chunk)
253
- if found_urls:
254
- replaced_urls.extend(found_urls)
255
- chunk = chunk.replace("https://storage.googleapis.com",
256
- "https://cdn.snapzion.com")
257
-
258
- cleaned_content = strip_model_prefix(chunk, model_prefix)
259
  completion_tokens += calculate_tokens(cleaned_content, request.model)
260
-
261
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp, request_id))}\n\n"
262
 
263
- # At the end of the stream, finalize the response
264
- # yield final piece with finish_reason="stop"
265
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'stop'))}\n\n"
266
  yield "data: [DONE]\n\n"
267
-
268
  except httpx.HTTPStatusError as e:
269
  logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
270
  error_message = f"HTTP error occurred: {e}"
@@ -276,7 +209,7 @@ async def process_streaming_response(request: ChatRequest):
276
 
277
  yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
278
  yield "data: [DONE]\n\n"
279
-
280
  except httpx.RequestError as e:
281
  logger.error(
282
  f"Error occurred during request for Request ID {request_id}: {e}"
@@ -284,18 +217,15 @@ async def process_streaming_response(request: ChatRequest):
284
  error_message = f"Request error occurred: {e}"
285
  yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
286
  yield "data: [DONE]\n\n"
287
-
288
  except Exception as e:
289
  logger.error(f"An unexpected error occurred for Request ID {request_id}: {e}")
290
  error_message = f"An unexpected error occurred: {e}"
291
  yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
292
  yield "data: [DONE]\n\n"
 
293
 
294
- # Once the entire stream is done, upload any replaced URLs to R2
295
- upload_replaced_urls_to_r2(replaced_urls, request_id)
296
-
297
-
298
- # --------------------- PROCESS NON-STREAMING RESPONSE ---------------------
299
  async def process_non_streaming_response(request: ChatRequest):
300
  # Generate a unique ID for this request
301
  request_id = f"chatcmpl-{uuid.uuid4()}"
@@ -323,7 +253,6 @@ async def process_non_streaming_response(request: ChatRequest):
323
  await asyncio.sleep(delay_seconds)
324
 
325
  # Fetch the h-value for the 'validated' field
326
- # Hard-coded for demonstration
327
  h_value = "00f37b34-a166-4efb-bce5-1312d87f2f94"
328
  if not h_value:
329
  logger.error("Failed to retrieve h-value for validation.")
@@ -369,9 +298,6 @@ async def process_non_streaming_response(request: ChatRequest):
369
  prompt_tokens += calculate_tokens(message['data']['imageBase64'], request.model)
370
 
371
  full_response = ""
372
- # We'll keep track of any replaced URLs here
373
- replaced_urls = []
374
-
375
  async with httpx.AsyncClient() as client:
376
  try:
377
  async with client.stream(
@@ -455,11 +381,10 @@ async def process_non_streaming_response(request: ChatRequest):
455
  },
456
  }
457
 
458
- # Clean up server prefix if needed
459
  if full_response.startswith("$@$v=undefined-rv1$@$"):
460
  full_response = full_response[21:]
461
 
462
- # Remove blocked message
463
  if BLOCKED_MESSAGE in full_response:
464
  logger.info(
465
  f"Blocked message detected in response for Request ID {request_id}."
@@ -470,21 +395,9 @@ async def process_non_streaming_response(request: ChatRequest):
470
  status_code=500, detail="Blocked message detected in response."
471
  )
472
 
473
- # ---------------- REPLACE STORAGE URLS ----------------
474
- # We'll search for "https://storage.googleapis.com" and replace
475
- # with "https://cdn.snapzion.com"
476
- found_urls = re.findall(r"https://storage\.googleapis\.com\S*", full_response)
477
- if found_urls:
478
- replaced_urls.extend(found_urls)
479
- full_response = full_response.replace("https://storage.googleapis.com", "https://cdn.snapzion.com")
480
-
481
- # Strip model prefix if present
482
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
483
  completion_tokens = calculate_tokens(cleaned_full_response, request.model)
484
 
485
- # Upload replaced URLs to R2
486
- upload_replaced_urls_to_r2(replaced_urls, request_id)
487
-
488
  return {
489
  "id": request_id,
490
  "object": "chat.completion",
@@ -503,3 +416,4 @@ async def process_non_streaming_response(request: ChatRequest):
503
  "total_tokens": prompt_tokens + completion_tokens,
504
  },
505
  }
 
 
4
  import asyncio
5
  import random
6
  import string
7
+ from typing import Any, Dict, Optional
8
 
9
  import httpx
10
  from fastapi import HTTPException
 
23
  from api.validate import getHid # Import the asynchronous getHid function
24
  import tiktoken
25
 
 
 
 
 
26
  logger = setup_logger(__name__)
27
 
28
  # Define the blocked message
29
  BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai"
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  # Function to calculate tokens using tiktoken
32
  def calculate_tokens(text: str, model: str) -> int:
33
  try:
 
100
  return content[len(model_prefix):].strip()
101
  return content
102
 
103
+ # Process streaming response with headers from config.py
 
104
  async def process_streaming_response(request: ChatRequest):
105
  # Generate a unique ID for this request
106
  request_id = f"chatcmpl-{uuid.uuid4()}"
 
123
  await asyncio.sleep(delay_seconds)
124
 
125
  # Fetch the h-value for the 'validated' field
126
+ h_value = await getHid()
127
  if not h_value:
128
  logger.error("Failed to retrieve h-value for validation.")
129
  raise HTTPException(
 
168
  prompt_tokens += calculate_tokens(message['data']['imageBase64'], request.model)
169
 
170
  completion_tokens = 0
 
 
 
 
171
  async with httpx.AsyncClient() as client:
172
  try:
173
  async with client.stream(
 
181
  async for chunk in response.aiter_text():
182
  timestamp = int(datetime.now().timestamp())
183
  if chunk:
184
+ content = chunk
185
+ if content.startswith("$@$v=undefined-rv1$@$"):
186
+ content = content[21:]
 
187
  # Remove the blocked message if present
188
+ if BLOCKED_MESSAGE in content:
189
  logger.info(
190
  f"Blocked message detected in response for Request ID {request_id}."
191
  )
192
+ content = content.replace(BLOCKED_MESSAGE, '').strip()
193
+ if not content:
194
  continue # Skip if content is empty after removal
195
+ cleaned_content = strip_model_prefix(content, model_prefix)
 
 
 
 
 
 
 
 
 
 
 
196
  completion_tokens += calculate_tokens(cleaned_content, request.model)
 
197
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp, request_id))}\n\n"
198
 
 
 
199
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'stop'))}\n\n"
200
  yield "data: [DONE]\n\n"
 
201
  except httpx.HTTPStatusError as e:
202
  logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
203
  error_message = f"HTTP error occurred: {e}"
 
209
 
210
  yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
211
  yield "data: [DONE]\n\n"
212
+ # raise HTTPException(status_code=e.response.status_code, detail=error_message)
213
  except httpx.RequestError as e:
214
  logger.error(
215
  f"Error occurred during request for Request ID {request_id}: {e}"
 
217
  error_message = f"Request error occurred: {e}"
218
  yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
219
  yield "data: [DONE]\n\n"
220
+ # raise HTTPException(status_code=500, detail=error_message)
221
  except Exception as e:
222
  logger.error(f"An unexpected error occurred for Request ID {request_id}: {e}")
223
  error_message = f"An unexpected error occurred: {e}"
224
  yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
225
  yield "data: [DONE]\n\n"
226
+ # raise HTTPException(status_code=500, detail=error_message)
227
 
228
+ # Process non-streaming response with headers from config.py
 
 
 
 
229
  async def process_non_streaming_response(request: ChatRequest):
230
  # Generate a unique ID for this request
231
  request_id = f"chatcmpl-{uuid.uuid4()}"
 
253
  await asyncio.sleep(delay_seconds)
254
 
255
  # Fetch the h-value for the 'validated' field
 
256
  h_value = "00f37b34-a166-4efb-bce5-1312d87f2f94"
257
  if not h_value:
258
  logger.error("Failed to retrieve h-value for validation.")
 
298
  prompt_tokens += calculate_tokens(message['data']['imageBase64'], request.model)
299
 
300
  full_response = ""
 
 
 
301
  async with httpx.AsyncClient() as client:
302
  try:
303
  async with client.stream(
 
381
  },
382
  }
383
 
 
384
  if full_response.startswith("$@$v=undefined-rv1$@$"):
385
  full_response = full_response[21:]
386
 
387
+ # Remove the blocked message if present
388
  if BLOCKED_MESSAGE in full_response:
389
  logger.info(
390
  f"Blocked message detected in response for Request ID {request_id}."
 
395
  status_code=500, detail="Blocked message detected in response."
396
  )
397
 
 
 
 
 
 
 
 
 
 
398
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
399
  completion_tokens = calculate_tokens(cleaned_full_response, request.model)
400
 
 
 
 
401
  return {
402
  "id": request_id,
403
  "object": "chat.completion",
 
416
  "total_tokens": prompt_tokens + completion_tokens,
417
  },
418
  }
419
+