snackshell commited on
Commit
3156680
·
verified ·
1 Parent(s): 8b706d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -98
app.py CHANGED
@@ -1,56 +1,68 @@
1
- from fastapi import FastAPI, HTTPException, Depends, Header, Request
2
  from fastapi.responses import StreamingResponse
3
  from pydantic import BaseModel
4
  from typing import List, Optional, Literal
5
  import json
6
  import g4f
7
  from g4f.Provider import OpenaiAccount, RetryProvider
8
- from g4f.models import ModelUtils
9
 
10
  app = FastAPI()
11
 
12
- # Complete list of available models from G4F
13
- models = [
14
- # OpenAI models
15
- "gpt-4", "gpt-4-turbo", "gpt-4o", "gpt-3.5-turbo",
 
 
 
16
 
17
- # Anthropic models
18
- "claude-3-opus", "claude-3-sonnet", "claude-3-haiku", "claude-2.1",
 
 
 
19
 
20
- # Google models
21
- "gemini-pro", "gemini-1.5-pro", "gemini-1.5-flash",
 
 
 
22
 
23
- # Meta models
24
- "llama-2-70b", "llama-2-13b", "llama-2-7b", "llama-3-70b", "llama-3-8b",
 
 
 
25
 
26
- # Other providers
27
- "mistral-7b", "mixtral-8x7b", "command-r-plus", "cohere-command-r",
28
- "deepseek-chat", "deepseek-coder", "code-llama-34b", "code-llama-70b",
 
29
 
30
- # Specialized models
31
- "grok-1", "grok-1.5", "grok-2", "o1", "o3-mini", "flux", "flux-pro"
32
- ]
33
-
34
- # Configure G4F backend
35
- class CustomBackend(g4f.Provider.BackendApi):
36
- working = True
37
- ssl = False
38
- url = "https://ahe.hopto.org"
39
- headers = {"Authorization": "Basic Z2dnOmc0Zl8="}
40
 
41
- @classmethod
42
- async def create_async_generator(
43
- cls,
44
- model: str,
45
- messages: g4f.typing.Messages,
46
- **kwargs
47
- ) -> g4f.typing.AsyncResult:
48
- if model in OpenaiAccount.get_models():
49
- kwargs["provider"] = OpenaiAccount
50
- async for chunk in super().create_async_generator(model, messages, **kwargs):
51
- yield chunk
 
 
 
 
 
52
 
53
- # Pydantic models
54
  class Message(BaseModel):
55
  role: Literal["system", "user", "assistant"]
56
  content: str
@@ -61,53 +73,36 @@ class ChatRequest(BaseModel):
61
  temperature: Optional[float] = 0.7
62
  max_tokens: Optional[int] = None
63
  top_p: Optional[float] = 0.9
64
- streaming: bool = True
65
-
66
- class ChatResponse(BaseModel):
67
- role: str = "assistant"
68
- content: str
69
- model: Optional[str] = None
70
 
71
  class ModelListResponse(BaseModel):
72
- models: List[str]
73
-
74
- # API Key Verification
75
- async def verify_api_key(x_api_key: str = Header(...)):
76
- if x_api_key != "fb207532285886a5568298b4b4e61124":
77
- raise HTTPException(status_code=403, detail="Invalid API key")
 
78
 
79
- @app.get("/v1/models", response_model=ModelListResponse, tags=["Models"])
 
80
  async def get_models():
81
- """Get list of all available models"""
82
- return ModelListResponse(models=models)
83
 
84
- @app.post("/v1/chat/completions", response_model=Optional[ChatResponse], tags=["Chat"])
85
- async def chat_completion(
86
- request: ChatRequest,
87
- api_key: str = Depends(verify_api_key)
88
- ):
89
- """
90
- Handle chat completion requests with streaming support.
91
-
92
- Args:
93
- request: ChatRequest containing model, messages and parameters
94
- api_key: Verified API key
95
-
96
- Returns:
97
- Either a StreamingResponse or direct ChatResponse
98
- """
99
- # Validate model
100
- if request.model not in models:
101
  raise HTTPException(
102
  status_code=400,
103
- detail=f"Invalid model. Available models: {', '.join(models)}"
104
  )
105
-
106
- # Prepare messages
107
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
108
 
109
  try:
110
- if request.streaming:
111
  async def stream_generator():
112
  response = await g4f.ChatCompletion.create_async(
113
  model=request.model,
@@ -115,45 +110,53 @@ async def chat_completion(
115
  temperature=request.temperature,
116
  top_p=request.top_p,
117
  max_tokens=request.max_tokens,
118
- provider=RetryProvider([CustomBackend])
119
  )
120
 
121
  async for chunk in response:
122
- if isinstance(chunk, dict):
123
- yield f"data: {json.dumps(chunk)}\n\n"
124
- else:
125
- yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
126
  yield "data: [DONE]\n\n"
127
-
128
- return StreamingResponse(
129
- stream_generator(),
130
- media_type="text/event-stream"
131
- )
132
  else:
133
  response = await g4f.ChatCompletion.create_async(
134
  model=request.model,
135
  messages=messages,
136
  temperature=request.temperature,
137
  top_p=request.top_p,
138
- max_tokens=request.max_tokens,
139
- provider=RetryProvider([CustomBackend])
140
  )
141
-
142
- if isinstance(response, str):
143
- return ChatResponse(content=response, model=request.model)
144
- elif isinstance(response, dict):
145
- return ChatResponse(
146
- content=response.get("choices", [{}])[0].get("message", {}).get("content", ""),
147
- model=request.model
148
- )
149
- return ChatResponse(content=str(response), model=request.model
150
-
151
  except Exception as e:
 
 
 
 
 
 
152
  raise HTTPException(
153
- status_code=500,
154
- detail=f"Error processing request: {str(e)}"
155
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
  if __name__ == "__main__":
158
  import uvicorn
159
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ from fastapi import FastAPI, HTTPException, Depends, Header
2
  from fastapi.responses import StreamingResponse
3
  from pydantic import BaseModel
4
  from typing import List, Optional, Literal
5
  import json
6
  import g4f
7
  from g4f.Provider import OpenaiAccount, RetryProvider
 
8
 
9
  app = FastAPI()
10
 
11
+ # Organized model list (updated with all G4F available models)
12
+ MODELS = {
13
+ # OpenAI
14
+ "openai": [
15
+ "gpt-4o", "gpt-4o-mini", "gpt-4", "gpt-4-turbo",
16
+ "gpt-3.5-turbo", "gpt-3.5-turbo-16k"
17
+ ],
18
 
19
+ # Anthropic
20
+ "anthropic": [
21
+ "claude-3-opus", "claude-3-sonnet", "claude-3-haiku",
22
+ "claude-3.5", "claude-3.7-sonnet", "claude-2.1"
23
+ ],
24
 
25
+ # Google
26
+ "google": [
27
+ "gemini-pro", "gemini-1.5-pro", "gemini-1.5-flash",
28
+ "gemini-2.5-pro-exp-03-25"
29
+ ],
30
 
31
+ # Meta
32
+ "meta": [
33
+ "llama-3-70b", "llama-3-8b", "llama-3.1-405b",
34
+ "llama-2-70b", "llama-2-13b", "llama-2-7b"
35
+ ],
36
 
37
+ # XAI (Grok)
38
+ "xai": [
39
+ "grok-1", "grok-1.5", "grok-2", "grok-3"
40
+ ],
41
 
42
+ # Other
43
+ "other": [
44
+ "o1", "o3-mini", "mistral-7b", "mixtral-8x7b",
45
+ "command-r-plus", "deepseek-chat", "code-llama-70b"
46
+ ],
 
 
 
 
 
47
 
48
+ # Image Models
49
+ "image": [
50
+ "dall-e-3", "stable-diffusion-xl",
51
+ "flux", "flux-pro", "playground-v2.5"
52
+ ]
53
+ }
54
+
55
+ # Flattened list for API endpoint
56
+ ALL_MODELS = [
57
+ *MODELS["openai"],
58
+ *MODELS["anthropic"],
59
+ *MODELS["google"],
60
+ *MODELS["meta"],
61
+ *MODELS["xai"],
62
+ *MODELS["other"]
63
+ ]
64
 
65
+ # Pydantic Models
66
  class Message(BaseModel):
67
  role: Literal["system", "user", "assistant"]
68
  content: str
 
73
  temperature: Optional[float] = 0.7
74
  max_tokens: Optional[int] = None
75
  top_p: Optional[float] = 0.9
76
+ stream: Optional[bool] = True
 
 
 
 
 
77
 
78
  class ModelListResponse(BaseModel):
79
+ openai: List[str]
80
+ anthropic: List[str]
81
+ google: List[str]
82
+ meta: List[str]
83
+ xai: List[str]
84
+ other: List[str]
85
+ image: List[str]
86
 
87
+ # API Endpoints
88
+ @app.get("/v1/models", response_model=ModelListResponse)
89
  async def get_models():
90
+ """Get all available models categorized by provider"""
91
+ return ModelListResponse(**MODELS)
92
 
93
+ @app.post("/v1/chat/completions")
94
+ async def chat_completion(request: ChatRequest):
95
+ """Handle chat completion requests"""
96
+ if request.model not in ALL_MODELS:
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  raise HTTPException(
98
  status_code=400,
99
+ detail=f"Invalid model. Available: {ALL_MODELS}"
100
  )
101
+
 
102
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
103
 
104
  try:
105
+ if request.stream:
106
  async def stream_generator():
107
  response = await g4f.ChatCompletion.create_async(
108
  model=request.model,
 
110
  temperature=request.temperature,
111
  top_p=request.top_p,
112
  max_tokens=request.max_tokens,
113
+ provider=RetryProvider([g4f.Provider.BackendApi])
114
  )
115
 
116
  async for chunk in response:
117
+ yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
 
 
 
118
  yield "data: [DONE]\n\n"
119
+
120
+ return StreamingResponse(stream_generator(), media_type="text/event-stream")
 
 
 
121
  else:
122
  response = await g4f.ChatCompletion.create_async(
123
  model=request.model,
124
  messages=messages,
125
  temperature=request.temperature,
126
  top_p=request.top_p,
127
+ provider=RetryProvider([g4f.Provider.BackendApi])
 
128
  )
129
+ return {"content": str(response)}
130
+
 
 
 
 
 
 
 
 
131
  except Exception as e:
132
+ raise HTTPException(status_code=500, detail=str(e))
133
+
134
+ # Image Generation Endpoint
135
+ @app.post("/v1/images/generations")
136
+ async def generate_image(prompt: str, model: str = "dall-e-3"):
137
+ if model not in MODELS["image"]:
138
  raise HTTPException(
139
+ status_code=400,
140
+ detail=f"Invalid image model. Available: {MODELS['image']}"
141
  )
142
+
143
+ try:
144
+ if model in ["flux", "flux-pro"]:
145
+ image_data = g4f.ImageGeneration.create(
146
+ prompt=prompt,
147
+ model=model,
148
+ provider=g4f.Provider.BackendApi
149
+ )
150
+ return {"url": f"data:image/png;base64,{image_data.decode('utf-8')}"}
151
+ else:
152
+ # Implementation for other image providers
153
+ raise HTTPException(
154
+ status_code=501,
155
+ detail=f"{model} implementation pending"
156
+ )
157
+ except Exception as e:
158
+ raise HTTPException(status_code=500, detail=str(e))
159
 
160
  if __name__ == "__main__":
161
  import uvicorn
162
+ uvicorn.run(app, host="0.0.0.0", port=8000)