Update main.py
Browse files
main.py
CHANGED
@@ -1,22 +1,45 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
import asyncio
|
4 |
-
import aiohttp
|
5 |
import random
|
6 |
import string
|
7 |
-
import json
|
8 |
import uuid
|
9 |
-
import
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
from aiohttp import ClientSession, ClientResponseError
|
13 |
-
|
14 |
-
from
|
15 |
-
from
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
label = "Blackbox AI"
|
21 |
url = "https://www.blackbox.ai"
|
22 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
@@ -166,26 +189,26 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
166 |
return cleaned_text
|
167 |
|
168 |
@classmethod
|
169 |
-
async def
|
170 |
cls,
|
171 |
model: str,
|
172 |
-
messages:
|
173 |
proxy: Optional[str] = None,
|
174 |
websearch: bool = False,
|
175 |
**kwargs
|
176 |
-
) ->
|
177 |
"""
|
178 |
-
|
179 |
|
180 |
Parameters:
|
181 |
model (str): Model to use for generating responses.
|
182 |
-
messages (
|
183 |
proxy (Optional[str]): Proxy URL, if needed.
|
184 |
websearch (bool): Enables or disables web search mode.
|
185 |
**kwargs: Additional keyword arguments.
|
186 |
|
187 |
-
|
188 |
-
|
189 |
"""
|
190 |
model = cls.get_model(model)
|
191 |
|
@@ -265,18 +288,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
265 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
266 |
}
|
267 |
|
268 |
-
headers_chat = {
|
269 |
-
'Accept': 'text/x-component',
|
270 |
-
'Content-Type': 'text/plain;charset=UTF-8',
|
271 |
-
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
272 |
-
'next-action': next_action,
|
273 |
-
'next-router-state-tree': next_router_state_tree,
|
274 |
-
'next-url': '/'
|
275 |
-
}
|
276 |
-
headers_chat_combined = {**common_headers, **headers_chat}
|
277 |
-
|
278 |
-
data_chat = '[]'
|
279 |
-
|
280 |
async with ClientSession(headers=common_headers) as session:
|
281 |
try:
|
282 |
async with session.post(
|
@@ -289,63 +300,29 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
289 |
text = await response_api_chat.text()
|
290 |
cleaned_response = cls.clean_response(text)
|
291 |
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
source_formatted += f"{position}. [{title}]({link})\n"
|
314 |
-
final_response = f"{answer_part}\n\n{source_formatted}"
|
315 |
-
except json.JSONDecodeError:
|
316 |
-
final_response = f"{answer_part}\n\nSource information is unavailable."
|
317 |
-
else:
|
318 |
-
final_response = cleaned_response
|
319 |
-
else:
|
320 |
-
if '$~~~$' in cleaned_response:
|
321 |
-
final_response = cleaned_response.split('$~~~$')[0].strip()
|
322 |
-
else:
|
323 |
-
final_response = cleaned_response
|
324 |
-
|
325 |
-
yield final_response
|
326 |
-
except ClientResponseError as e:
|
327 |
-
error_text = f"Error {e.status}: {e.message}"
|
328 |
-
try:
|
329 |
-
error_response = await e.response.text()
|
330 |
-
cleaned_error = cls.clean_response(error_response)
|
331 |
-
error_text += f" - {cleaned_error}"
|
332 |
-
except Exception:
|
333 |
-
pass
|
334 |
-
yield error_text
|
335 |
-
except Exception as e:
|
336 |
-
yield f"Unexpected error during /api/chat request: {str(e)}"
|
337 |
-
|
338 |
-
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
339 |
|
340 |
-
|
341 |
-
async with session.post(
|
342 |
-
chat_url,
|
343 |
-
headers=headers_chat_combined,
|
344 |
-
data=data_chat,
|
345 |
-
proxy=proxy
|
346 |
-
) as response_chat:
|
347 |
-
response_chat.raise_for_status()
|
348 |
-
pass
|
349 |
except ClientResponseError as e:
|
350 |
error_text = f"Error {e.status}: {e.message}"
|
351 |
try:
|
@@ -354,6 +331,117 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
354 |
error_text += f" - {cleaned_error}"
|
355 |
except Exception:
|
356 |
pass
|
357 |
-
|
358 |
except Exception as e:
|
359 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
|
|
|
|
3 |
import random
|
4 |
import string
|
|
|
5 |
import uuid
|
6 |
+
import json
|
7 |
+
import logging
|
8 |
+
import asyncio
|
9 |
+
import time
|
10 |
+
from collections import defaultdict
|
11 |
+
from typing import List, Dict, Any, Optional, Union, AsyncGenerator
|
12 |
+
from datetime import datetime
|
13 |
|
14 |
from aiohttp import ClientSession, ClientResponseError
|
15 |
+
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
16 |
+
from fastapi.responses import JSONResponse
|
17 |
+
from pydantic import BaseModel
|
18 |
+
|
19 |
+
# Configure logging
|
20 |
+
logging.basicConfig(
|
21 |
+
level=logging.INFO,
|
22 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
23 |
+
handlers=[logging.StreamHandler()]
|
24 |
+
)
|
25 |
+
logger = logging.getLogger(__name__)
|
26 |
+
|
27 |
+
# Load environment variables
|
28 |
+
API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
|
29 |
+
RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
|
30 |
+
|
31 |
+
if not API_KEYS or API_KEYS == ['']:
|
32 |
+
logger.error("No API keys found. Please set the API_KEYS environment variable.")
|
33 |
+
raise Exception("API_KEYS environment variable not set.")
|
34 |
+
|
35 |
+
# Simple in-memory rate limiter based solely on IP addresses
|
36 |
+
rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
37 |
+
|
38 |
+
# Define cleanup interval and window
|
39 |
+
CLEANUP_INTERVAL = 60 # seconds
|
40 |
+
RATE_LIMIT_WINDOW = 60 # seconds
|
41 |
+
|
42 |
+
class Blackbox:
|
43 |
label = "Blackbox AI"
|
44 |
url = "https://www.blackbox.ai"
|
45 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
|
|
189 |
return cleaned_text
|
190 |
|
191 |
@classmethod
|
192 |
+
async def generate_response(
|
193 |
cls,
|
194 |
model: str,
|
195 |
+
messages: List[Dict[str, str]],
|
196 |
proxy: Optional[str] = None,
|
197 |
websearch: bool = False,
|
198 |
**kwargs
|
199 |
+
) -> Dict[str, Any]:
|
200 |
"""
|
201 |
+
Generates a response from Blackbox AI for the /v1/chat/completions endpoint.
|
202 |
|
203 |
Parameters:
|
204 |
model (str): Model to use for generating responses.
|
205 |
+
messages (List[Dict[str, str]]): Message history.
|
206 |
proxy (Optional[str]): Proxy URL, if needed.
|
207 |
websearch (bool): Enables or disables web search mode.
|
208 |
**kwargs: Additional keyword arguments.
|
209 |
|
210 |
+
Returns:
|
211 |
+
Dict[str, Any]: The response dictionary in the format required by /v1/chat/completions.
|
212 |
"""
|
213 |
model = cls.get_model(model)
|
214 |
|
|
|
288 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
289 |
}
|
290 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
async with ClientSession(headers=common_headers) as session:
|
292 |
try:
|
293 |
async with session.post(
|
|
|
300 |
text = await response_api_chat.text()
|
301 |
cleaned_response = cls.clean_response(text)
|
302 |
|
303 |
+
response_data = {
|
304 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
305 |
+
"object": "chat.completion",
|
306 |
+
"created": int(datetime.now().timestamp()),
|
307 |
+
"model": model,
|
308 |
+
"choices": [
|
309 |
+
{
|
310 |
+
"index": 0,
|
311 |
+
"message": {
|
312 |
+
"role": "assistant",
|
313 |
+
"content": cleaned_response
|
314 |
+
},
|
315 |
+
"finish_reason": "stop"
|
316 |
+
}
|
317 |
+
],
|
318 |
+
"usage": {
|
319 |
+
"prompt_tokens": sum(len(msg['content'].split()) for msg in messages),
|
320 |
+
"completion_tokens": len(cleaned_response.split()),
|
321 |
+
"total_tokens": sum(len(msg['content'].split()) for msg in messages) + len(cleaned_response.split())
|
322 |
+
}
|
323 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
|
325 |
+
return response_data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
326 |
except ClientResponseError as e:
|
327 |
error_text = f"Error {e.status}: {e.message}"
|
328 |
try:
|
|
|
331 |
error_text += f" - {cleaned_error}"
|
332 |
except Exception:
|
333 |
pass
|
334 |
+
raise HTTPException(status_code=e.status, detail=error_text)
|
335 |
except Exception as e:
|
336 |
+
raise HTTPException(status_code=500, detail=f"Unexpected error during /api/chat request: {str(e)}")
|
337 |
+
|
338 |
+
# FastAPI app setup
|
339 |
+
app = FastAPI()
|
340 |
+
|
341 |
+
# Add the cleanup task when the app starts
|
342 |
+
@app.on_event("startup")
|
343 |
+
async def startup_event():
|
344 |
+
asyncio.create_task(cleanup_rate_limit_stores())
|
345 |
+
logger.info("Started rate limit store cleanup task.")
|
346 |
+
|
347 |
+
# Middleware to enhance security and enforce Content-Type for specific endpoints
|
348 |
+
@app.middleware("http")
|
349 |
+
async def security_middleware(request: Request, call_next):
|
350 |
+
client_ip = request.client.host
|
351 |
+
# Enforce that POST requests to /v1/chat/completions must have Content-Type: application/json
|
352 |
+
if request.method == "POST" and request.url.path == "/v1/chat/completions":
|
353 |
+
content_type = request.headers.get("Content-Type")
|
354 |
+
if content_type != "application/json":
|
355 |
+
logger.warning(f"Invalid Content-Type from IP: {client_ip} for path: {request.url.path}")
|
356 |
+
return JSONResponse(
|
357 |
+
status_code=400,
|
358 |
+
content={
|
359 |
+
"error": {
|
360 |
+
"message": "Content-Type must be application/json",
|
361 |
+
"type": "invalid_request_error",
|
362 |
+
"param": None,
|
363 |
+
"code": None
|
364 |
+
}
|
365 |
+
},
|
366 |
+
)
|
367 |
+
response = await call_next(request)
|
368 |
+
return response
|
369 |
+
|
370 |
+
# Request Models
|
371 |
+
class Message(BaseModel):
|
372 |
+
role: str
|
373 |
+
content: str
|
374 |
+
|
375 |
+
class ChatRequest(BaseModel):
|
376 |
+
model: str
|
377 |
+
messages: List[Message]
|
378 |
+
temperature: Optional[float] = 1.0
|
379 |
+
top_p: Optional[float] = 1.0
|
380 |
+
n: Optional[int] = 1
|
381 |
+
max_tokens: Optional[int] = None
|
382 |
+
presence_penalty: Optional[float] = 0.0
|
383 |
+
frequency_penalty: Optional[float] = 0.0
|
384 |
+
logit_bias: Optional[Dict[str, float]] = None
|
385 |
+
user: Optional[str] = None
|
386 |
+
|
387 |
+
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
388 |
+
async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
389 |
+
client_ip = req.client.host
|
390 |
+
# Redact user messages only for logging purposes
|
391 |
+
redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
|
392 |
+
|
393 |
+
logger.info(f"Received chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages}")
|
394 |
+
|
395 |
+
try:
|
396 |
+
# Validate that the requested model is available
|
397 |
+
if request.model not in Blackbox.models and request.model not in Blackbox.model_aliases:
|
398 |
+
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
399 |
+
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
400 |
+
|
401 |
+
# Process the request with actual message content, but don't log it
|
402 |
+
response_content = await Blackbox.generate_response(
|
403 |
+
model=request.model,
|
404 |
+
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
405 |
+
temperature=request.temperature,
|
406 |
+
max_tokens=request.max_tokens
|
407 |
+
)
|
408 |
+
|
409 |
+
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
410 |
+
return response_content
|
411 |
+
except HTTPException as he:
|
412 |
+
logger.warning(f"HTTPException: {he.detail} | IP: {client_ip}")
|
413 |
+
raise he
|
414 |
+
except Exception as e:
|
415 |
+
logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
|
416 |
+
raise HTTPException(status_code=500, detail=str(e))
|
417 |
+
|
418 |
+
# Endpoint: GET /v1/models
|
419 |
+
@app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
|
420 |
+
async def get_models(req: Request):
|
421 |
+
client_ip = req.client.host
|
422 |
+
logger.info(f"Fetching available models from IP: {client_ip}")
|
423 |
+
return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]}
|
424 |
+
|
425 |
+
# Endpoint: GET /v1/health
|
426 |
+
@app.get("/v1/health", dependencies=[Depends(rate_limiter_per_ip)])
|
427 |
+
async def health_check(req: Request):
|
428 |
+
client_ip = req.client.host
|
429 |
+
logger.info(f"Health check requested from IP: {client_ip}")
|
430 |
+
return {"status": "ok"}
|
431 |
+
|
432 |
+
# Custom exception handler to match OpenAI's error format
|
433 |
+
@app.exception_handler(HTTPException)
|
434 |
+
async def http_exception_handler(request: Request, exc: HTTPException):
|
435 |
+
client_ip = request.client.host
|
436 |
+
logger.error(f"HTTPException: {exc.detail} | Path: {request.url.path} | IP: {client_ip}")
|
437 |
+
return JSONResponse(
|
438 |
+
status_code=exc.status_code,
|
439 |
+
content={
|
440 |
+
"error": {
|
441 |
+
"message": exc.detail,
|
442 |
+
"type": "invalid_request_error",
|
443 |
+
"param": None,
|
444 |
+
"code": None
|
445 |
+
}
|
446 |
+
},
|
447 |
+
)
|