Update main.py
Browse files
main.py
CHANGED
@@ -8,12 +8,12 @@ import logging
|
|
8 |
import asyncio
|
9 |
import time
|
10 |
from collections import defaultdict
|
11 |
-
from typing import List, Dict, Any, Optional,
|
12 |
from datetime import datetime
|
13 |
|
14 |
-
from aiohttp import ClientSession,
|
15 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
16 |
-
from fastapi.responses import
|
17 |
from pydantic import BaseModel
|
18 |
|
19 |
# Configure logging
|
@@ -27,18 +27,11 @@ logger = logging.getLogger(__name__)
|
|
27 |
# Load environment variables
|
28 |
API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
|
29 |
RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
|
30 |
-
AVAILABLE_MODELS = os.getenv('AVAILABLE_MODELS', '') # Comma-separated available models
|
31 |
|
32 |
if not API_KEYS or API_KEYS == ['']:
|
33 |
logger.error("No API keys found. Please set the API_KEYS environment variable.")
|
34 |
raise Exception("API_KEYS environment variable not set.")
|
35 |
|
36 |
-
# Process available models
|
37 |
-
if AVAILABLE_MODELS:
|
38 |
-
AVAILABLE_MODELS = [model.strip() for model in AVAILABLE_MODELS.split(',') if model.strip()]
|
39 |
-
else:
|
40 |
-
AVAILABLE_MODELS = [] # If empty, all models are available
|
41 |
-
|
42 |
# Simple in-memory rate limiter based solely on IP addresses
|
43 |
rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
44 |
|
@@ -46,27 +39,19 @@ rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
|
46 |
CLEANUP_INTERVAL = 60 # seconds
|
47 |
RATE_LIMIT_WINDOW = 60 # seconds
|
48 |
|
49 |
-
class ImageResponse:
|
50 |
-
def __init__(self, images: str, alt: str):
|
51 |
-
self.images = images
|
52 |
-
self.alt = alt
|
53 |
-
|
54 |
class Blackbox:
|
55 |
label = "Blackbox AI"
|
56 |
url = "https://www.blackbox.ai"
|
57 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
58 |
working = True
|
59 |
supports_gpt_4 = True
|
60 |
-
supports_stream = True
|
61 |
supports_system_message = True
|
62 |
supports_message_history = True
|
63 |
|
64 |
default_model = 'blackboxai'
|
65 |
-
image_models = ['ImageGeneration']
|
66 |
models = [
|
67 |
default_model,
|
68 |
'blackboxai-pro',
|
69 |
-
*image_models,
|
70 |
"llama-3.1-8b",
|
71 |
'llama-3.1-70b',
|
72 |
'llama-3.1-405b',
|
@@ -74,25 +59,9 @@ class Blackbox:
|
|
74 |
'gemini-pro',
|
75 |
'gemini-1.5-flash',
|
76 |
'claude-sonnet-3.5',
|
77 |
-
'PythonAgent',
|
78 |
-
'JavaAgent',
|
79 |
-
'JavaScriptAgent',
|
80 |
-
'HTMLAgent',
|
81 |
-
'GoogleCloudAgent',
|
82 |
-
'AndroidDeveloper',
|
83 |
-
'SwiftDeveloper',
|
84 |
-
'Next.jsAgent',
|
85 |
-
'MongoDBAgent',
|
86 |
-
'PyTorchAgent',
|
87 |
-
'ReactAgent',
|
88 |
-
'XcodeAgent',
|
89 |
-
'AngularJSAgent',
|
90 |
]
|
91 |
|
92 |
-
agentMode = {
|
93 |
-
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
94 |
-
}
|
95 |
-
|
96 |
trendingAgentMode = {
|
97 |
"blackboxai": {},
|
98 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
@@ -100,19 +69,6 @@ class Blackbox:
|
|
100 |
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
101 |
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
|
102 |
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
103 |
-
'PythonAgent': {'mode': True, 'id': "Python Agent"},
|
104 |
-
'JavaAgent': {'mode': True, 'id': "Java Agent"},
|
105 |
-
'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
|
106 |
-
'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
|
107 |
-
'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
|
108 |
-
'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
|
109 |
-
'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
|
110 |
-
'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
|
111 |
-
'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
|
112 |
-
'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
|
113 |
-
'ReactAgent': {'mode': True, 'id': "React Agent"},
|
114 |
-
'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
|
115 |
-
'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
|
116 |
}
|
117 |
|
118 |
userSelectedModel = {
|
@@ -125,21 +81,7 @@ class Blackbox:
|
|
125 |
'gpt-4o': '@GPT-4o',
|
126 |
'gemini-pro': '@Gemini-PRO',
|
127 |
'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
|
128 |
-
'PythonAgent': '@Python Agent',
|
129 |
-
'JavaAgent': '@Java Agent',
|
130 |
-
'JavaScriptAgent': '@JavaScript Agent',
|
131 |
-
'HTMLAgent': '@HTML Agent',
|
132 |
-
'GoogleCloudAgent': '@Google Cloud Agent',
|
133 |
-
'AndroidDeveloper': '@Android Developer',
|
134 |
-
'SwiftDeveloper': '@Swift Developer',
|
135 |
-
'Next.jsAgent': '@Next.js Agent',
|
136 |
-
'MongoDBAgent': '@MongoDB Agent',
|
137 |
-
'PyTorchAgent': '@PyTorch Agent',
|
138 |
-
'ReactAgent': '@React Agent',
|
139 |
-
'XcodeAgent': '@Xcode Agent',
|
140 |
-
'AngularJSAgent': '@AngularJS Agent',
|
141 |
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
142 |
-
'ImageGeneration': '@Image Generation',
|
143 |
}
|
144 |
|
145 |
model_referers = {
|
@@ -152,7 +94,6 @@ class Blackbox:
|
|
152 |
model_aliases = {
|
153 |
"gemini-flash": "gemini-1.5-flash",
|
154 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
155 |
-
"flux": "ImageGeneration",
|
156 |
}
|
157 |
|
158 |
@classmethod
|
@@ -201,30 +142,14 @@ class Blackbox:
|
|
201 |
return cleaned_text
|
202 |
|
203 |
@classmethod
|
204 |
-
async def
|
205 |
cls,
|
206 |
model: str,
|
207 |
messages: List[Dict[str, str]],
|
208 |
proxy: Optional[str] = None,
|
209 |
-
web_search_mode: bool = False,
|
210 |
**kwargs
|
211 |
-
) ->
|
212 |
-
"""
|
213 |
-
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
214 |
-
Parameters:
|
215 |
-
model (str): Model to use for generating responses.
|
216 |
-
messages (List[Dict[str, str]]): Message history.
|
217 |
-
proxy (Optional[str]): Proxy URL, if needed.
|
218 |
-
web_search_mode (bool): Enables or disables web search mode.
|
219 |
-
**kwargs: Additional keyword arguments.
|
220 |
-
Yields:
|
221 |
-
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
222 |
-
"""
|
223 |
model = cls.get_model(model)
|
224 |
-
if model is None:
|
225 |
-
logger.error(f"Model {model} is not available.")
|
226 |
-
raise ModelNotWorkingException(model)
|
227 |
-
|
228 |
chat_id = cls.generate_random_string()
|
229 |
next_action = cls.generate_next_action()
|
230 |
next_router_state_tree = cls.generate_next_router_state_tree()
|
@@ -297,22 +222,10 @@ class Blackbox:
|
|
297 |
"clickedForceWebSearch": False,
|
298 |
"visitFromDelta": False,
|
299 |
"mobileClient": False,
|
300 |
-
"webSearchMode":
|
301 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
302 |
}
|
303 |
|
304 |
-
headers_chat = {
|
305 |
-
'Accept': 'text/x-component',
|
306 |
-
'Content-Type': 'text/plain;charset=UTF-8',
|
307 |
-
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
308 |
-
'next-action': next_action,
|
309 |
-
'next-router-state-tree': next_router_state_tree,
|
310 |
-
'next-url': '/'
|
311 |
-
}
|
312 |
-
headers_chat_combined = {**common_headers, **headers_chat}
|
313 |
-
|
314 |
-
data_chat = '[]'
|
315 |
-
|
316 |
async with ClientSession(headers=common_headers) as session:
|
317 |
try:
|
318 |
async with session.post(
|
@@ -324,64 +237,7 @@ class Blackbox:
|
|
324 |
response_api_chat.raise_for_status()
|
325 |
text = await response_api_chat.text()
|
326 |
cleaned_response = cls.clean_response(text)
|
327 |
-
|
328 |
-
if model in cls.image_models:
|
329 |
-
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
330 |
-
if match:
|
331 |
-
image_url = match.group(1)
|
332 |
-
image_response = ImageResponse(images=image_url, alt="Generated Image")
|
333 |
-
yield image_response
|
334 |
-
else:
|
335 |
-
yield cleaned_response
|
336 |
-
else:
|
337 |
-
if web_search_mode:
|
338 |
-
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
339 |
-
if match:
|
340 |
-
source_part = match.group(1).strip()
|
341 |
-
answer_part = cleaned_response[match.end():].strip()
|
342 |
-
try:
|
343 |
-
sources = json.loads(source_part)
|
344 |
-
source_formatted = "**Source:**\n"
|
345 |
-
for item in sources:
|
346 |
-
title = item.get('title', 'No Title')
|
347 |
-
link = item.get('link', '#')
|
348 |
-
position = item.get('position', '')
|
349 |
-
source_formatted += f"{position}. [{title}]({link})\n"
|
350 |
-
final_response = f"{answer_part}\n\n{source_formatted}"
|
351 |
-
except json.JSONDecodeError:
|
352 |
-
final_response = f"{answer_part}\n\nSource information is unavailable."
|
353 |
-
else:
|
354 |
-
final_response = cleaned_response
|
355 |
-
else:
|
356 |
-
if '$~~~$' in cleaned_response:
|
357 |
-
final_response = cleaned_response.split('$~~~$')[0].strip()
|
358 |
-
else:
|
359 |
-
final_response = cleaned_response
|
360 |
-
|
361 |
-
yield final_response
|
362 |
-
except ClientResponseError as e:
|
363 |
-
error_text = f"Error {e.status}: {e.message}"
|
364 |
-
try:
|
365 |
-
error_response = await e.response.text()
|
366 |
-
cleaned_error = cls.clean_response(error_response)
|
367 |
-
error_text += f" - {cleaned_error}"
|
368 |
-
except Exception:
|
369 |
-
pass
|
370 |
-
yield error_text
|
371 |
-
except Exception as e:
|
372 |
-
yield f"Unexpected error during /api/chat request: {str(e)}"
|
373 |
-
|
374 |
-
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
375 |
-
|
376 |
-
try:
|
377 |
-
async with session.post(
|
378 |
-
chat_url,
|
379 |
-
headers=headers_chat_combined,
|
380 |
-
data=data_chat,
|
381 |
-
proxy=proxy
|
382 |
-
) as response_chat:
|
383 |
-
response_chat.raise_for_status()
|
384 |
-
pass
|
385 |
except ClientResponseError as e:
|
386 |
error_text = f"Error {e.status}: {e.message}"
|
387 |
try:
|
@@ -390,9 +246,9 @@ class Blackbox:
|
|
390 |
error_text += f" - {cleaned_error}"
|
391 |
except Exception:
|
392 |
pass
|
393 |
-
|
394 |
except Exception as e:
|
395 |
-
|
396 |
|
397 |
# Custom exception for model not working
|
398 |
class ModelNotWorkingException(Exception):
|
@@ -486,30 +342,11 @@ class ChatRequest(BaseModel):
|
|
486 |
temperature: Optional[float] = 1.0
|
487 |
top_p: Optional[float] = 1.0
|
488 |
n: Optional[int] = 1
|
489 |
-
stream: Optional[bool] = False
|
490 |
-
stop: Optional[Union[str, List[str]]] = None
|
491 |
max_tokens: Optional[int] = None
|
492 |
presence_penalty: Optional[float] = 0.0
|
493 |
frequency_penalty: Optional[float] = 0.0
|
494 |
logit_bias: Optional[Dict[str, float]] = None
|
495 |
user: Optional[str] = None
|
496 |
-
web_search_mode: Optional[bool] = False # Custom parameter
|
497 |
-
|
498 |
-
def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
|
499 |
-
return {
|
500 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
501 |
-
"object": "chat.completion.chunk",
|
502 |
-
"created": int(datetime.now().timestamp()),
|
503 |
-
"model": model,
|
504 |
-
"choices": [
|
505 |
-
{
|
506 |
-
"index": 0,
|
507 |
-
"delta": {"content": content, "role": "assistant"},
|
508 |
-
"finish_reason": finish_reason,
|
509 |
-
}
|
510 |
-
],
|
511 |
-
"usage": None,
|
512 |
-
}
|
513 |
|
514 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
515 |
async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
@@ -525,65 +362,35 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
525 |
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
526 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
527 |
|
528 |
-
# Process the request with actual message content, but don't log
|
529 |
-
async_generator = Blackbox.create_async_generator(
|
530 |
model=request.model,
|
531 |
-
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
532 |
-
|
|
|
533 |
)
|
534 |
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
else:
|
558 |
-
response_content = ""
|
559 |
-
async for chunk in async_generator:
|
560 |
-
if isinstance(chunk, ImageResponse):
|
561 |
-
response_content += f"\n"
|
562 |
-
else:
|
563 |
-
response_content += chunk
|
564 |
-
|
565 |
-
logger.info(f"Completed non-streaming response generation for API key: {api_key} | IP: {client_ip}")
|
566 |
-
return {
|
567 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
568 |
-
"object": "chat.completion",
|
569 |
-
"created": int(datetime.now().timestamp()),
|
570 |
-
"model": request.model,
|
571 |
-
"choices": [
|
572 |
-
{
|
573 |
-
"message": {
|
574 |
-
"role": "assistant",
|
575 |
-
"content": response_content
|
576 |
-
},
|
577 |
-
"finish_reason": "stop",
|
578 |
-
"index": 0
|
579 |
-
}
|
580 |
-
],
|
581 |
-
"usage": {
|
582 |
-
"prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
|
583 |
-
"completion_tokens": len(response_content.split()),
|
584 |
-
"total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
|
585 |
-
},
|
586 |
-
}
|
587 |
except ModelNotWorkingException as e:
|
588 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|
589 |
raise HTTPException(status_code=503, detail=str(e))
|
@@ -594,18 +401,6 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
594 |
logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
|
595 |
raise HTTPException(status_code=500, detail=str(e))
|
596 |
|
597 |
-
# Endpoint: POST /v1/tokenizer
|
598 |
-
class TokenizerRequest(BaseModel):
|
599 |
-
text: str
|
600 |
-
|
601 |
-
@app.post("/v1/tokenizer", dependencies=[Depends(rate_limiter_per_ip)])
|
602 |
-
async def tokenizer(request: TokenizerRequest, req: Request):
|
603 |
-
client_ip = req.client.host
|
604 |
-
text = request.text
|
605 |
-
token_count = len(text.split())
|
606 |
-
logger.info(f"Tokenizer requested from IP: {client_ip} | Text length: {len(text)}")
|
607 |
-
return {"text": text, "tokens": token_count}
|
608 |
-
|
609 |
# Endpoint: GET /v1/models
|
610 |
@app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
|
611 |
async def get_models(req: Request):
|
@@ -613,20 +408,6 @@ async def get_models(req: Request):
|
|
613 |
logger.info(f"Fetching available models from IP: {client_ip}")
|
614 |
return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]}
|
615 |
|
616 |
-
# Endpoint: GET /v1/models/{model}/status
|
617 |
-
@app.get("/v1/models/{model}/status", dependencies=[Depends(rate_limiter_per_ip)])
|
618 |
-
async def model_status(model: str, req: Request):
|
619 |
-
client_ip = req.client.host
|
620 |
-
logger.info(f"Model status requested for '{model}' from IP: {client_ip}")
|
621 |
-
if model in Blackbox.models:
|
622 |
-
return {"model": model, "status": "available"}
|
623 |
-
elif model in Blackbox.model_aliases and Blackbox.model_aliases[model] in Blackbox.models:
|
624 |
-
actual_model = Blackbox.model_aliases[model]
|
625 |
-
return {"model": actual_model, "status": "available via alias"}
|
626 |
-
else:
|
627 |
-
logger.warning(f"Model not found: {model} from IP: {client_ip}")
|
628 |
-
raise HTTPException(status_code=404, detail="Model not found")
|
629 |
-
|
630 |
# Endpoint: GET /v1/health
|
631 |
@app.get("/v1/health", dependencies=[Depends(rate_limiter_per_ip)])
|
632 |
async def health_check(req: Request):
|
@@ -634,13 +415,6 @@ async def health_check(req: Request):
|
|
634 |
logger.info(f"Health check requested from IP: {client_ip}")
|
635 |
return {"status": "ok"}
|
636 |
|
637 |
-
# Endpoint: GET /v1/chat/completions (GET method)
|
638 |
-
@app.get("/v1/chat/completions")
|
639 |
-
async def chat_completions_get(req: Request):
|
640 |
-
client_ip = req.client.host
|
641 |
-
logger.info(f"GET request made to /v1/chat/completions from IP: {client_ip}, redirecting to 'about:blank'")
|
642 |
-
return RedirectResponse(url='about:blank')
|
643 |
-
|
644 |
# Custom exception handler to match OpenAI's error format
|
645 |
@app.exception_handler(HTTPException)
|
646 |
async def http_exception_handler(request: Request, exc: HTTPException):
|
|
|
8 |
import asyncio
|
9 |
import time
|
10 |
from collections import defaultdict
|
11 |
+
from typing import List, Dict, Any, Optional, Union
|
12 |
from datetime import datetime
|
13 |
|
14 |
+
from aiohttp import ClientSession, ClientResponseError
|
15 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
16 |
+
from fastapi.responses import JSONResponse
|
17 |
from pydantic import BaseModel
|
18 |
|
19 |
# Configure logging
|
|
|
27 |
# Load environment variables
|
28 |
API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
|
29 |
RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
|
|
|
30 |
|
31 |
if not API_KEYS or API_KEYS == ['']:
|
32 |
logger.error("No API keys found. Please set the API_KEYS environment variable.")
|
33 |
raise Exception("API_KEYS environment variable not set.")
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# Simple in-memory rate limiter based solely on IP addresses
|
36 |
rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
37 |
|
|
|
39 |
CLEANUP_INTERVAL = 60 # seconds
|
40 |
RATE_LIMIT_WINDOW = 60 # seconds
|
41 |
|
|
|
|
|
|
|
|
|
|
|
42 |
class Blackbox:
|
43 |
label = "Blackbox AI"
|
44 |
url = "https://www.blackbox.ai"
|
45 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
46 |
working = True
|
47 |
supports_gpt_4 = True
|
|
|
48 |
supports_system_message = True
|
49 |
supports_message_history = True
|
50 |
|
51 |
default_model = 'blackboxai'
|
|
|
52 |
models = [
|
53 |
default_model,
|
54 |
'blackboxai-pro',
|
|
|
55 |
"llama-3.1-8b",
|
56 |
'llama-3.1-70b',
|
57 |
'llama-3.1-405b',
|
|
|
59 |
'gemini-pro',
|
60 |
'gemini-1.5-flash',
|
61 |
'claude-sonnet-3.5',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
]
|
63 |
|
64 |
+
agentMode = {}
|
|
|
|
|
|
|
65 |
trendingAgentMode = {
|
66 |
"blackboxai": {},
|
67 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
|
|
69 |
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
70 |
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
|
71 |
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
}
|
73 |
|
74 |
userSelectedModel = {
|
|
|
81 |
'gpt-4o': '@GPT-4o',
|
82 |
'gemini-pro': '@Gemini-PRO',
|
83 |
'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
|
|
85 |
}
|
86 |
|
87 |
model_referers = {
|
|
|
94 |
model_aliases = {
|
95 |
"gemini-flash": "gemini-1.5-flash",
|
96 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
|
|
97 |
}
|
98 |
|
99 |
@classmethod
|
|
|
142 |
return cleaned_text
|
143 |
|
144 |
@classmethod
|
145 |
+
async def generate_response(
|
146 |
cls,
|
147 |
model: str,
|
148 |
messages: List[Dict[str, str]],
|
149 |
proxy: Optional[str] = None,
|
|
|
150 |
**kwargs
|
151 |
+
) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
model = cls.get_model(model)
|
|
|
|
|
|
|
|
|
153 |
chat_id = cls.generate_random_string()
|
154 |
next_action = cls.generate_next_action()
|
155 |
next_router_state_tree = cls.generate_next_router_state_tree()
|
|
|
222 |
"clickedForceWebSearch": False,
|
223 |
"visitFromDelta": False,
|
224 |
"mobileClient": False,
|
225 |
+
"webSearchMode": False,
|
226 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
227 |
}
|
228 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
async with ClientSession(headers=common_headers) as session:
|
230 |
try:
|
231 |
async with session.post(
|
|
|
237 |
response_api_chat.raise_for_status()
|
238 |
text = await response_api_chat.text()
|
239 |
cleaned_response = cls.clean_response(text)
|
240 |
+
return cleaned_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
except ClientResponseError as e:
|
242 |
error_text = f"Error {e.status}: {e.message}"
|
243 |
try:
|
|
|
246 |
error_text += f" - {cleaned_error}"
|
247 |
except Exception:
|
248 |
pass
|
249 |
+
return error_text
|
250 |
except Exception as e:
|
251 |
+
return f"Unexpected error during /api/chat request: {str(e)}"
|
252 |
|
253 |
# Custom exception for model not working
|
254 |
class ModelNotWorkingException(Exception):
|
|
|
342 |
temperature: Optional[float] = 1.0
|
343 |
top_p: Optional[float] = 1.0
|
344 |
n: Optional[int] = 1
|
|
|
|
|
345 |
max_tokens: Optional[int] = None
|
346 |
presence_penalty: Optional[float] = 0.0
|
347 |
frequency_penalty: Optional[float] = 0.0
|
348 |
logit_bias: Optional[Dict[str, float]] = None
|
349 |
user: Optional[str] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
350 |
|
351 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
352 |
async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
|
|
362 |
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
363 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
364 |
|
365 |
+
# Process the request with actual message content, but don't log itresponse_content = await Blackbox.generate_response(
|
|
|
366 |
model=request.model,
|
367 |
+
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
368 |
+
temperature=request.temperature,
|
369 |
+
max_tokens=request.max_tokens
|
370 |
)
|
371 |
|
372 |
+
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
373 |
+
return {
|
374 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
375 |
+
"object": "chat.completion",
|
376 |
+
"created": int(datetime.now().timestamp()),
|
377 |
+
"model": request.model,
|
378 |
+
"choices": [
|
379 |
+
{
|
380 |
+
"index": 0,
|
381 |
+
"message": {
|
382 |
+
"role": "assistant",
|
383 |
+
"content": response_content
|
384 |
+
},
|
385 |
+
"finish_reason": "stop"
|
386 |
+
}
|
387 |
+
],
|
388 |
+
"usage": {
|
389 |
+
"prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
|
390 |
+
"completion_tokens": len(response_content.split()),
|
391 |
+
"total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
|
392 |
+
},
|
393 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
except ModelNotWorkingException as e:
|
395 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|
396 |
raise HTTPException(status_code=503, detail=str(e))
|
|
|
401 |
logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
|
402 |
raise HTTPException(status_code=500, detail=str(e))
|
403 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
# Endpoint: GET /v1/models
|
405 |
@app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
|
406 |
async def get_models(req: Request):
|
|
|
408 |
logger.info(f"Fetching available models from IP: {client_ip}")
|
409 |
return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]}
|
410 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
411 |
# Endpoint: GET /v1/health
|
412 |
@app.get("/v1/health", dependencies=[Depends(rate_limiter_per_ip)])
|
413 |
async def health_check(req: Request):
|
|
|
415 |
logger.info(f"Health check requested from IP: {client_ip}")
|
416 |
return {"status": "ok"}
|
417 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
418 |
# Custom exception handler to match OpenAI's error format
|
419 |
@app.exception_handler(HTTPException)
|
420 |
async def http_exception_handler(request: Request, exc: HTTPException):
|