Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, Depends, Header
|
2 |
+
from fastapi.responses import StreamingResponse, JSONResponse
|
3 |
+
from pydantic import BaseModel
|
4 |
+
from typing import List, Optional, Literal
|
5 |
+
import json
|
6 |
+
import g4f
|
7 |
+
from g4f.Provider import Blackbox, RetryProvider
|
8 |
+
from g4f.models import ModelUtils
|
9 |
+
|
10 |
+
app = FastAPI()
|
11 |
+
|
12 |
+
# Configure Blackbox provider
|
13 |
+
g4f.Provider.Blackbox.url = "https://www.blackbox.ai/api/chat"
|
14 |
+
g4f.Provider.Blackbox.working = True
|
15 |
+
|
16 |
+
# Available Models
|
17 |
+
TEXT_MODELS = [
|
18 |
+
"blackboxai", "blackboxai-pro", "gpt-4o-mini", "deepseek-chat",
|
19 |
+
"deepseek-v3", "deepseek-r1", "gpt-4o", "o1", "o3-mini",
|
20 |
+
"claude-3.7-sonnet", "llama-3.3-70b", "mixtral-small-24b", "qwq-32b"
|
21 |
+
]
|
22 |
+
|
23 |
+
IMAGE_MODELS = [
|
24 |
+
"flux", "flux-pro", "dall-e-3", "stable-diffusion-xl"
|
25 |
+
]
|
26 |
+
|
27 |
+
# Pydantic Models
|
28 |
+
class Message(BaseModel):
|
29 |
+
role: Literal["system", "user", "assistant"]
|
30 |
+
content: str
|
31 |
+
|
32 |
+
class ChatRequest(BaseModel):
|
33 |
+
model: str
|
34 |
+
messages: List[Message]
|
35 |
+
temperature: Optional[float] = 0.7
|
36 |
+
max_tokens: Optional[int] = None
|
37 |
+
stream: Optional[bool] = False
|
38 |
+
|
39 |
+
class ImageRequest(BaseModel):
|
40 |
+
model: str
|
41 |
+
prompt: str
|
42 |
+
size: Optional[str] = "1024x1024"
|
43 |
+
|
44 |
+
@app.get("/v1/models")
|
45 |
+
async def get_models():
|
46 |
+
"""Return available models"""
|
47 |
+
return {
|
48 |
+
"text_models": TEXT_MODELS,
|
49 |
+
"image_models": IMAGE_MODELS
|
50 |
+
}
|
51 |
+
|
52 |
+
@app.post("/v1/chat/completions")
|
53 |
+
async def chat_completion(request: ChatRequest):
|
54 |
+
"""Handle Blackbox and other text generation requests"""
|
55 |
+
if request.model not in TEXT_MODELS:
|
56 |
+
raise HTTPException(
|
57 |
+
status_code=400,
|
58 |
+
detail=f"Invalid model. Available: {TEXT_MODELS}"
|
59 |
+
)
|
60 |
+
|
61 |
+
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
|
62 |
+
|
63 |
+
try:
|
64 |
+
if request.stream:
|
65 |
+
async def stream_generator():
|
66 |
+
# Special handling for Blackbox models
|
67 |
+
if request.model in ["blackboxai", "blackboxai-pro"]:
|
68 |
+
provider = Blackbox
|
69 |
+
else:
|
70 |
+
provider = RetryProvider([
|
71 |
+
g4f.Provider.Blackbox,
|
72 |
+
g4f.Provider.DeepSeek,
|
73 |
+
g4f.Provider.OpenaiChat
|
74 |
+
])
|
75 |
+
|
76 |
+
response = await g4f.ChatCompletion.create_async(
|
77 |
+
model=request.model,
|
78 |
+
messages=messages,
|
79 |
+
provider=provider,
|
80 |
+
temperature=request.temperature,
|
81 |
+
stream=True
|
82 |
+
)
|
83 |
+
|
84 |
+
async for chunk in response:
|
85 |
+
if isinstance(chunk, str):
|
86 |
+
yield f"data: {json.dumps({'content': chunk})}\n\n"
|
87 |
+
elif hasattr(chunk, 'choices'):
|
88 |
+
yield f"data: {json.dumps({'content': chunk.choices[0].delta.content})}\n\n"
|
89 |
+
yield "data: [DONE]\n\n"
|
90 |
+
|
91 |
+
return StreamingResponse(stream_generator(), media_type="text/event-stream")
|
92 |
+
|
93 |
+
else:
|
94 |
+
# Non-streaming response
|
95 |
+
response = await g4f.ChatCompletion.create_async(
|
96 |
+
model=request.model,
|
97 |
+
messages=messages,
|
98 |
+
provider=Blackbox if request.model in ["blackboxai", "blackboxai-pro"] else None,
|
99 |
+
temperature=request.temperature
|
100 |
+
)
|
101 |
+
|
102 |
+
return {"content": str(response)}
|
103 |
+
|
104 |
+
except Exception as e:
|
105 |
+
raise HTTPException(status_code=500, detail=str(e))
|
106 |
+
|
107 |
+
@app.post("/v1/images/generations")
|
108 |
+
async def generate_image(request: ImageRequest):
|
109 |
+
"""Handle Flux and other image generation"""
|
110 |
+
if request.model not in IMAGE_MODELS:
|
111 |
+
raise HTTPException(
|
112 |
+
status_code=400,
|
113 |
+
detail=f"Invalid model. Available: {IMAGE_MODELS}"
|
114 |
+
)
|
115 |
+
|
116 |
+
try:
|
117 |
+
if request.model in ["flux", "flux-pro"]:
|
118 |
+
image_data = g4f.ImageGeneration.create(
|
119 |
+
prompt=request.prompt,
|
120 |
+
model=request.model,
|
121 |
+
provider=g4f.Provider.Blackbox
|
122 |
+
)
|
123 |
+
return JSONResponse({
|
124 |
+
"url": f"data:image/png;base64,{image_data.decode('utf-8')}"
|
125 |
+
})
|
126 |
+
else:
|
127 |
+
# Implementation for other image providers
|
128 |
+
raise HTTPException(
|
129 |
+
status_code=501,
|
130 |
+
detail=f"{request.model} implementation pending"
|
131 |
+
)
|
132 |
+
except Exception as e:
|
133 |
+
raise HTTPException(status_code=500, detail=str(e))
|
134 |
+
|
135 |
+
if __name__ == "__main__":
|
136 |
+
import uvicorn
|
137 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|