from __future__ import annotations import re import random import string import uuid import json from aiohttp import ClientSession from fastapi import FastAPI, HTTPException from pydantic import BaseModel from typing import List, Dict, Any, Optional from datetime import datetime from fastapi.responses import StreamingResponse # Custom exception for model not working class ModelNotWorkingException(Exception): def __init__(self, model: str): self.model = model self.message = f"The model '{model}' is currently not working. Please wait for NiansuhAI to fix this. Thank you for your patience." super().__init__(self.message) # Mock implementations for ImageResponse and to_data_uri class ImageResponse: def __init__(self, url: str, alt: str): self.url = url self.alt = alt def to_data_uri(image: Any) -> str: return "data:image/png;base64,..." # Replace with actual base64 data class AsyncGeneratorProvider: pass class ProviderModelMixin: pass class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.blackbox.ai" api_endpoint = "https://www.blackbox.ai/api/chat" working = True supports_stream = True supports_system_message = True supports_message_history = True default_model = 'blackbox' models = [ 'blackbox', 'gemini-1.5-flash', "llama-3.1-8b", 'llama-3.1-70b', # Example of a non-working model 'llama-3.1-405b', 'ImageGenerationLV45LJp', 'gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', ] # Define the working status of models model_status = { 'blackbox': True, 'gemini-1.5-flash': True, 'llama-3.1-8b': True, 'llama-3.1-70b': False, # Non-working model 'llama-3.1-405b': True, 'ImageGenerationLV45LJp': True, 'gpt-4o': True, 'gemini-pro': True, 'claude-sonnet-3.5': True, } agentMode = { 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, } trendingAgentMode = { "blackbox": {}, "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}, } userSelectedModel = { "gpt-4o": "gpt-4o", "gemini-pro": "gemini-pro", 'claude-sonnet-3.5': "claude-sonnet-3.5", } model_aliases = { "gemini-flash": "gemini-1.5-flash", "flux": "ImageGenerationLV45LJp", } @classmethod def get_model(cls, model: str) -> str: if model in cls.models: return model elif model in cls.userSelectedModel: return model elif model in cls.model_aliases: return cls.model_aliases[model] else: return cls.default_model @classmethod async def create_async_generator( cls, model: str, messages: List[Dict[str, str]], proxy: Optional[str] = None, image: Optional[Any] = None, image_name: Optional[str] = None, **kwargs ) -> Any: model = cls.get_model(model) # Check if the model is working if not cls.model_status.get(model, False): raise ModelNotWorkingException(model) headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", "cache-control": "no-cache", "content-type": "application/json", "origin": cls.url, "pragma": "no-cache", "referer": f"{cls.url}/", "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"Linux"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" } if model in cls.userSelectedModel: prefix = f"@{cls.userSelectedModel[model]}" if not messages[0]['content'].startswith(prefix): messages[0]['content'] = f"{prefix} {messages[0]['content']}" async with ClientSession(headers=headers) as session: if image is not None: messages[-1]["data"] = { "fileText": image_name, "imageBase64": to_data_uri(image) } random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7)) data = { "messages": messages, "id": random_id, "previewToken": None, "userId": None, "codeModelMode": True, "agentMode": {}, "trendingAgentMode": {}, "userSelectedModel": None, "userSystemPrompt": None, "isMicMode": False, "maxTokens": 4096, "playgroundTopP": 0.9, "playgroundTemperature": 0.5, "isChromeExt": False, "githubToken": None, "clickedAnswer2": False, "clickedAnswer3": False, "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, "webSearchMode": False, } async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() if model == 'ImageGenerationLV45LJp': response_text = await response.text() url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text) if url_match: image_url = url_match.group(0) yield ImageResponse(image_url, alt=messages[-1]['content']) else: raise Exception("Image URL not found in the response") else: response_content = "" async for chunk in response.content.iter_any(): if chunk: decoded_chunk = chunk.decode(errors='ignore') decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk) if decoded_chunk.strip(): response_content += decoded_chunk # Check if the response content is empty if not response_content.strip(): raise ModelNotWorkingException(model) yield response_content # FastAPI app setup app = FastAPI() class Message(BaseModel): role: str content: str class ChatRequest(BaseModel): model: str messages: List[Message] stream: Optional[bool] = False # Add this for streaming def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]: return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion.chunk", "created": int(datetime.now().timestamp()), "model": model, "choices": [ { "index": 0, "delta": {"content": content, "role": "assistant"}, "finish_reason": finish_reason, } ], "usage": None, } @app.post("/niansuhai/v1/chat/completions") async def chat_completions(request: ChatRequest): # Validate the model valid_models = Blackbox.models + list(Blackbox.userSelectedModel.keys()) + list(Blackbox.model_aliases.keys()) if request.model not in valid_models: raise HTTPException(status_code=400, detail=f"Invalid model name: {request.model}. Valid models are: {valid_models}") messages = [{"role": msg.role, "content": msg.content} for msg in request.messages] try: async_generator = Blackbox.create_async_generator( model=request.model, messages=messages, image=None, # Pass the image if required image_name=None # Pass image name if required ) except ModelNotWorkingException as e: raise HTTPException(status_code=503, detail=str(e)) if request.stream: async def generate(): async for chunk in async_generator: if isinstance(chunk, ImageResponse): image_markdown = f"![image]({chunk.url})" yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n" else: yield f"data: {json.dumps(create_response(chunk + '\nNiansuhAI', request.model))}\n\n" yield "data: [DONE]\n\n" return StreamingResponse(generate(), media_type="text/event-stream") else: response_content = "" async for chunk in async_generator: if isinstance(chunk, ImageResponse): response_content += f"![image]({chunk.url})\n" else: response_content += chunk # Append "\nNiansuhAI" to the final response content response_content += "\n**NiansuhAI**" return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion", "created": int(datetime.now().timestamp()), "model": request.model, "choices": [ { "message": { "role": "assistant", "content": response_content }, "finish_reason": "stop", "index": 0 } ], "usage": None, } @app.get("/niansuhai/v1/models") async def get_models(): return {"models": Blackbox.models}