snackshell commited on
Commit
c770256
·
verified ·
1 Parent(s): eaf04e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -30
app.py CHANGED
@@ -1,11 +1,10 @@
1
- from fastapi import FastAPI, HTTPException, Depends, Header
2
  from fastapi.responses import StreamingResponse, JSONResponse
3
  from pydantic import BaseModel
4
  from typing import List, Optional, Literal
5
  import json
6
  import g4f
7
  from g4f.Provider import Blackbox, RetryProvider
8
- from g4f.models import ModelUtils
9
 
10
  app = FastAPI()
11
 
@@ -13,18 +12,33 @@ app = FastAPI()
13
  g4f.Provider.Blackbox.url = "https://www.blackbox.ai/api/chat"
14
  g4f.Provider.Blackbox.working = True
15
 
16
- # Available Models
17
  TEXT_MODELS = [
18
- "blackboxai", "blackboxai-pro", "gpt-4o-mini", "deepseek-chat",
19
- "deepseek-v3", "deepseek-r1", "gpt-4o", "o1", "o3-mini",
20
- "claude-3.7-sonnet", "llama-3.3-70b", "mixtral-small-24b", "qwq-32b"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  ]
22
 
23
  IMAGE_MODELS = [
24
- "flux", "flux-pro", "dall-e-3", "stable-diffusion-xl"
 
25
  ]
26
 
27
- # Pydantic Models
28
  class Message(BaseModel):
29
  role: Literal["system", "user", "assistant"]
30
  content: str
@@ -40,10 +54,11 @@ class ImageRequest(BaseModel):
40
  model: str
41
  prompt: str
42
  size: Optional[str] = "1024x1024"
 
43
 
44
  @app.get("/v1/models")
45
  async def get_models():
46
- """Return available models"""
47
  return {
48
  "text_models": TEXT_MODELS,
49
  "image_models": IMAGE_MODELS
@@ -51,7 +66,7 @@ async def get_models():
51
 
52
  @app.post("/v1/chat/completions")
53
  async def chat_completion(request: ChatRequest):
54
- """Handle Blackbox and other text generation requests"""
55
  if request.model not in TEXT_MODELS:
56
  raise HTTPException(
57
  status_code=400,
@@ -63,21 +78,12 @@ async def chat_completion(request: ChatRequest):
63
  try:
64
  if request.stream:
65
  async def stream_generator():
66
- # Special handling for Blackbox models
67
- if request.model in ["blackboxai", "blackboxai-pro"]:
68
- provider = Blackbox
69
- else:
70
- provider = RetryProvider([
71
- g4f.Provider.Blackbox,
72
- g4f.Provider.DeepSeek,
73
- g4f.Provider.OpenaiChat
74
- ])
75
-
76
  response = await g4f.ChatCompletion.create_async(
77
  model=request.model,
78
  messages=messages,
79
- provider=provider,
80
  temperature=request.temperature,
 
81
  stream=True
82
  )
83
 
@@ -85,20 +91,20 @@ async def chat_completion(request: ChatRequest):
85
  if isinstance(chunk, str):
86
  yield f"data: {json.dumps({'content': chunk})}\n\n"
87
  elif hasattr(chunk, 'choices'):
88
- yield f"data: {json.dumps({'content': chunk.choices[0].delta.content})}\n\n"
 
89
  yield "data: [DONE]\n\n"
90
 
91
  return StreamingResponse(stream_generator(), media_type="text/event-stream")
92
 
93
  else:
94
- # Non-streaming response
95
  response = await g4f.ChatCompletion.create_async(
96
  model=request.model,
97
  messages=messages,
98
- provider=Blackbox if request.model in ["blackboxai", "blackboxai-pro"] else None,
99
- temperature=request.temperature
 
100
  )
101
-
102
  return {"content": str(response)}
103
 
104
  except Exception as e:
@@ -106,7 +112,7 @@ async def chat_completion(request: ChatRequest):
106
 
107
  @app.post("/v1/images/generations")
108
  async def generate_image(request: ImageRequest):
109
- """Handle Flux and other image generation"""
110
  if request.model not in IMAGE_MODELS:
111
  raise HTTPException(
112
  status_code=400,
@@ -118,10 +124,12 @@ async def generate_image(request: ImageRequest):
118
  image_data = g4f.ImageGeneration.create(
119
  prompt=request.prompt,
120
  model=request.model,
121
- provider=g4f.Provider.Blackbox
 
122
  )
123
  return JSONResponse({
124
- "url": f"data:image/png;base64,{image_data.decode('utf-8')}"
 
125
  })
126
  else:
127
  # Implementation for other image providers
@@ -134,4 +142,4 @@ async def generate_image(request: ImageRequest):
134
 
135
  if __name__ == "__main__":
136
  import uvicorn
137
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
1
+ from fastapi import FastAPI, HTTPException
2
  from fastapi.responses import StreamingResponse, JSONResponse
3
  from pydantic import BaseModel
4
  from typing import List, Optional, Literal
5
  import json
6
  import g4f
7
  from g4f.Provider import Blackbox, RetryProvider
 
8
 
9
  app = FastAPI()
10
 
 
12
  g4f.Provider.Blackbox.url = "https://www.blackbox.ai/api/chat"
13
  g4f.Provider.Blackbox.working = True
14
 
15
+ # All available models from Blackbox provider
16
  TEXT_MODELS = [
17
+ # Blackbox models
18
+ "blackbox", "blackbox-pro", "blackbox-70b", "blackbox-180b",
19
+
20
+ # OpenAI compatible
21
+ "gpt-4", "gpt-4-turbo", "gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo",
22
+
23
+ # Anthropic
24
+ "claude-3-opus", "claude-3-sonnet", "claude-3-haiku", "claude-3.5", "claude-3.7-sonnet",
25
+
26
+ # Meta
27
+ "llama-3-70b", "llama-3-8b", "llama-3.3-70b", "llama-2-70b",
28
+
29
+ # DeepSeek
30
+ "deepseek-chat", "deepseek-v3", "deepseek-r1", "deepseek-coder",
31
+
32
+ # Other
33
+ "o1", "o3-mini", "mixtral-8x7b", "mixtral-small-24b", "qwq-32b",
34
+ "command-r-plus", "code-llama-70b", "gemini-pro", "gemini-1.5-flash"
35
  ]
36
 
37
  IMAGE_MODELS = [
38
+ "flux", "flux-pro", "dall-e-3", "stable-diffusion-xl", "playground-v2.5",
39
+ "kandinsky-3", "deepfloyd-if", "sdxl-turbo"
40
  ]
41
 
 
42
  class Message(BaseModel):
43
  role: Literal["system", "user", "assistant"]
44
  content: str
 
54
  model: str
55
  prompt: str
56
  size: Optional[str] = "1024x1024"
57
+ quality: Optional[Literal["standard", "hd"]] = "standard"
58
 
59
  @app.get("/v1/models")
60
  async def get_models():
61
+ """Return all available models"""
62
  return {
63
  "text_models": TEXT_MODELS,
64
  "image_models": IMAGE_MODELS
 
66
 
67
  @app.post("/v1/chat/completions")
68
  async def chat_completion(request: ChatRequest):
69
+ """Handle text generation with Blackbox and other models"""
70
  if request.model not in TEXT_MODELS:
71
  raise HTTPException(
72
  status_code=400,
 
78
  try:
79
  if request.stream:
80
  async def stream_generator():
 
 
 
 
 
 
 
 
 
 
81
  response = await g4f.ChatCompletion.create_async(
82
  model=request.model,
83
  messages=messages,
84
+ provider=RetryProvider([Blackbox]),
85
  temperature=request.temperature,
86
+ max_tokens=request.max_tokens,
87
  stream=True
88
  )
89
 
 
91
  if isinstance(chunk, str):
92
  yield f"data: {json.dumps({'content': chunk})}\n\n"
93
  elif hasattr(chunk, 'choices'):
94
+ content = chunk.choices[0].delta.get('content', '')
95
+ yield f"data: {json.dumps({'content': content})}\n\n"
96
  yield "data: [DONE]\n\n"
97
 
98
  return StreamingResponse(stream_generator(), media_type="text/event-stream")
99
 
100
  else:
 
101
  response = await g4f.ChatCompletion.create_async(
102
  model=request.model,
103
  messages=messages,
104
+ provider=RetryProvider([Blackbox]),
105
+ temperature=request.temperature,
106
+ max_tokens=request.max_tokens
107
  )
 
108
  return {"content": str(response)}
109
 
110
  except Exception as e:
 
112
 
113
  @app.post("/v1/images/generations")
114
  async def generate_image(request: ImageRequest):
115
+ """Handle image generation with Flux and other models"""
116
  if request.model not in IMAGE_MODELS:
117
  raise HTTPException(
118
  status_code=400,
 
124
  image_data = g4f.ImageGeneration.create(
125
  prompt=request.prompt,
126
  model=request.model,
127
+ provider=Blackbox,
128
+ size=request.size
129
  )
130
  return JSONResponse({
131
+ "url": f"data:image/png;base64,{image_data.decode('utf-8')}",
132
+ "model": request.model
133
  })
134
  else:
135
  # Implementation for other image providers
 
142
 
143
  if __name__ == "__main__":
144
  import uvicorn
145
+ uvicorn.run(app, host="0.0.0.0", port=7860)