Spaces:
Running
Running
File size: 4,832 Bytes
c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 d947512 c770256 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel
from typing import List, Optional, Literal
import json
import g4f
from g4f.Provider import Blackbox, RetryProvider
app = FastAPI()
# Configure Blackbox provider
g4f.Provider.Blackbox.url = "https://www.blackbox.ai/api/chat"
g4f.Provider.Blackbox.working = True
# All available models from Blackbox provider
TEXT_MODELS = [
# Blackbox models
"blackbox", "blackbox-pro", "blackbox-70b", "blackbox-180b",
# OpenAI compatible
"gpt-4", "gpt-4-turbo", "gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo",
# Anthropic
"claude-3-opus", "claude-3-sonnet", "claude-3-haiku", "claude-3.5", "claude-3.7-sonnet",
# Meta
"llama-3-70b", "llama-3-8b", "llama-3.3-70b", "llama-2-70b",
# DeepSeek
"deepseek-chat", "deepseek-v3", "deepseek-r1", "deepseek-coder",
# Other
"o1", "o3-mini", "mixtral-8x7b", "mixtral-small-24b", "qwq-32b",
"command-r-plus", "code-llama-70b", "gemini-pro", "gemini-1.5-flash"
]
IMAGE_MODELS = [
"flux", "flux-pro", "dall-e-3", "stable-diffusion-xl", "playground-v2.5",
"kandinsky-3", "deepfloyd-if", "sdxl-turbo"
]
class Message(BaseModel):
role: Literal["system", "user", "assistant"]
content: str
class ChatRequest(BaseModel):
model: str
messages: List[Message]
temperature: Optional[float] = 0.7
max_tokens: Optional[int] = None
stream: Optional[bool] = False
class ImageRequest(BaseModel):
model: str
prompt: str
size: Optional[str] = "1024x1024"
quality: Optional[Literal["standard", "hd"]] = "standard"
@app.get("/v1/models")
async def get_models():
"""Return all available models"""
return {
"text_models": TEXT_MODELS,
"image_models": IMAGE_MODELS
}
@app.post("/v1/chat/completions")
async def chat_completion(request: ChatRequest):
"""Handle text generation with Blackbox and other models"""
if request.model not in TEXT_MODELS:
raise HTTPException(
status_code=400,
detail=f"Invalid model. Available: {TEXT_MODELS}"
)
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
try:
if request.stream:
async def stream_generator():
response = await g4f.ChatCompletion.create_async(
model=request.model,
messages=messages,
provider=RetryProvider([Blackbox]),
temperature=request.temperature,
max_tokens=request.max_tokens,
stream=True
)
async for chunk in response:
if isinstance(chunk, str):
yield f"data: {json.dumps({'content': chunk})}\n\n"
elif hasattr(chunk, 'choices'):
content = chunk.choices[0].delta.get('content', '')
yield f"data: {json.dumps({'content': content})}\n\n"
yield "data: [DONE]\n\n"
return StreamingResponse(stream_generator(), media_type="text/event-stream")
else:
response = await g4f.ChatCompletion.create_async(
model=request.model,
messages=messages,
provider=RetryProvider([Blackbox]),
temperature=request.temperature,
max_tokens=request.max_tokens
)
return {"content": str(response)}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/v1/images/generations")
async def generate_image(request: ImageRequest):
"""Handle image generation with Flux and other models"""
if request.model not in IMAGE_MODELS:
raise HTTPException(
status_code=400,
detail=f"Invalid model. Available: {IMAGE_MODELS}"
)
try:
if request.model in ["flux", "flux-pro"]:
image_data = g4f.ImageGeneration.create(
prompt=request.prompt,
model=request.model,
provider=Blackbox,
size=request.size
)
return JSONResponse({
"url": f"data:image/png;base64,{image_data.decode('utf-8')}",
"model": request.model
})
else:
# Implementation for other image providers
raise HTTPException(
status_code=501,
detail=f"{request.model} implementation pending"
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860) |