Spaces:
Running
Running
from fastapi import FastAPI, HTTPException, Depends, Header | |
from fastapi.responses import StreamingResponse, JSONResponse | |
from pydantic import BaseModel | |
from typing import List, Optional, Literal | |
import json | |
import g4f | |
from g4f.Provider import Blackbox, RetryProvider | |
from g4f.models import ModelUtils | |
app = FastAPI() | |
# Configure Blackbox provider | |
g4f.Provider.Blackbox.url = "https://www.blackbox.ai/api/chat" | |
g4f.Provider.Blackbox.working = True | |
# Available Models | |
TEXT_MODELS = [ | |
"blackboxai", "blackboxai-pro", "gpt-4o-mini", "deepseek-chat", | |
"deepseek-v3", "deepseek-r1", "gpt-4o", "o1", "o3-mini", | |
"claude-3.7-sonnet", "llama-3.3-70b", "mixtral-small-24b", "qwq-32b" | |
] | |
IMAGE_MODELS = [ | |
"flux", "flux-pro", "dall-e-3", "stable-diffusion-xl" | |
] | |
# Pydantic Models | |
class Message(BaseModel): | |
role: Literal["system", "user", "assistant"] | |
content: str | |
class ChatRequest(BaseModel): | |
model: str | |
messages: List[Message] | |
temperature: Optional[float] = 0.7 | |
max_tokens: Optional[int] = None | |
stream: Optional[bool] = False | |
class ImageRequest(BaseModel): | |
model: str | |
prompt: str | |
size: Optional[str] = "1024x1024" | |
async def get_models(): | |
"""Return available models""" | |
return { | |
"text_models": TEXT_MODELS, | |
"image_models": IMAGE_MODELS | |
} | |
async def chat_completion(request: ChatRequest): | |
"""Handle Blackbox and other text generation requests""" | |
if request.model not in TEXT_MODELS: | |
raise HTTPException( | |
status_code=400, | |
detail=f"Invalid model. Available: {TEXT_MODELS}" | |
) | |
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages] | |
try: | |
if request.stream: | |
async def stream_generator(): | |
# Special handling for Blackbox models | |
if request.model in ["blackboxai", "blackboxai-pro"]: | |
provider = Blackbox | |
else: | |
provider = RetryProvider([ | |
g4f.Provider.Blackbox, | |
g4f.Provider.DeepSeek, | |
g4f.Provider.OpenaiChat | |
]) | |
response = await g4f.ChatCompletion.create_async( | |
model=request.model, | |
messages=messages, | |
provider=provider, | |
temperature=request.temperature, | |
stream=True | |
) | |
async for chunk in response: | |
if isinstance(chunk, str): | |
yield f"data: {json.dumps({'content': chunk})}\n\n" | |
elif hasattr(chunk, 'choices'): | |
yield f"data: {json.dumps({'content': chunk.choices[0].delta.content})}\n\n" | |
yield "data: [DONE]\n\n" | |
return StreamingResponse(stream_generator(), media_type="text/event-stream") | |
else: | |
# Non-streaming response | |
response = await g4f.ChatCompletion.create_async( | |
model=request.model, | |
messages=messages, | |
provider=Blackbox if request.model in ["blackboxai", "blackboxai-pro"] else None, | |
temperature=request.temperature | |
) | |
return {"content": str(response)} | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
async def generate_image(request: ImageRequest): | |
"""Handle Flux and other image generation""" | |
if request.model not in IMAGE_MODELS: | |
raise HTTPException( | |
status_code=400, | |
detail=f"Invalid model. Available: {IMAGE_MODELS}" | |
) | |
try: | |
if request.model in ["flux", "flux-pro"]: | |
image_data = g4f.ImageGeneration.create( | |
prompt=request.prompt, | |
model=request.model, | |
provider=g4f.Provider.Blackbox | |
) | |
return JSONResponse({ | |
"url": f"data:image/png;base64,{image_data.decode('utf-8')}" | |
}) | |
else: | |
# Implementation for other image providers | |
raise HTTPException( | |
status_code=501, | |
detail=f"{request.model} implementation pending" | |
) | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=8000) |