snackshell commited on
Commit
8b706d4
·
verified ·
1 Parent(s): 1a94ac5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -77
app.py CHANGED
@@ -1,135 +1,158 @@
1
  from fastapi import FastAPI, HTTPException, Depends, Header, Request
2
  from fastapi.responses import StreamingResponse
3
  from pydantic import BaseModel
4
- from typing import List
5
- from g4f import ChatCompletion
6
- from g4f.typing import Messages, AsyncResult
7
- import g4f.Provider
8
- from g4f.Provider import BackendApi
9
 
10
  app = FastAPI()
11
 
12
- # List of available models
13
  models = [
14
- "gpt-4o", "gpt-4o-mini", "gpt-4",
15
- "gpt-4-turbo", "gpt-3.5-turbo",
16
- "claude-3.7-sonnet", "o3-mini", "o1", "grok-3", "gemini-2.5-pro-exp-03-25", "claude-3.5",
17
- "llama-3.1-405b"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  ]
19
 
20
- url = "https://ahe.hopto.org"
21
- headers = {"Authorization": "Basic Z2dnOmc0Zl8="}
22
-
23
- BackendApi.working = True
24
- BackendApi.ssl = False
25
- BackendApi.url = url
26
- BackendApi.headers = headers
27
-
28
- class BackendApi(BackendApi):
29
  working = True
30
  ssl = False
31
- url = url
32
- headers = headers
33
- image_models = ["flux", "flux-pro"]
34
- models = ["deepseek-r1", *g4f.Provider.OpenaiAccount.get_models(), "flux", "flux-pro"]
35
-
36
  @classmethod
37
  async def create_async_generator(
38
  cls,
39
  model: str,
40
- messages: Messages,
41
  **kwargs
42
- ) -> AsyncResult:
43
- if model in g4f.Provider.OpenaiAccount.get_models():
44
- kwargs["provider"] = "OpenaiAccount"
45
  async for chunk in super().create_async_generator(model, messages, **kwargs):
46
  yield chunk
47
 
48
- # Request model
49
  class Message(BaseModel):
50
- role: str
51
  content: str
52
 
53
  class ChatRequest(BaseModel):
54
  model: str
55
  messages: List[Message]
 
 
 
56
  streaming: bool = True
57
 
58
  class ChatResponse(BaseModel):
59
- role: str
60
  content: str
 
61
 
62
- # Dependency to check API key
 
 
 
63
  async def verify_api_key(x_api_key: str = Header(...)):
64
  if x_api_key != "fb207532285886a5568298b4b4e61124":
65
  raise HTTPException(status_code=403, detail="Invalid API key")
66
 
67
- @app.get("/v1/models", tags=["Models"])
68
  async def get_models():
69
- """Endpoint to get the list of available models."""
70
- return {"models": models}
71
 
72
- @app.post("/v1/chat/completions", tags=["Chat Completion"])
73
  async def chat_completion(
74
- chat_request: ChatRequest,
75
  api_key: str = Depends(verify_api_key)
76
  ):
77
  """
78
- Handle chat completion requests with optional streaming.
79
- Removed rate limiting for unrestricted access.
 
 
 
 
 
 
80
  """
81
  # Validate model
82
- if chat_request.model not in models:
83
- raise HTTPException(status_code=400, detail="Invalid model selected.")
84
-
85
- # Check if messages are provided
86
- if not chat_request.messages:
87
- raise HTTPException(status_code=400, detail="Messages cannot be empty.")
88
-
89
- # Convert messages to the format expected by ChatCompletion
90
- formatted_messages = [{"role": msg.role, "content": msg.content} for msg in chat_request.messages]
91
-
92
  try:
93
- if chat_request.streaming:
94
- # Stream the response
95
- def event_stream():
96
- response = ChatCompletion.create(
97
- model=chat_request.model,
98
- messages=formatted_messages,
99
- stream=True
 
 
100
  )
101
 
102
- for chunk in response:
103
- if isinstance(chunk, dict) and 'choices' in chunk:
104
- for choice in chunk['choices']:
105
- if 'delta' in choice and 'content' in choice['delta']:
106
- yield f"data: {json.dumps({'content': choice['delta']['content']})}\n\n"
107
- elif 'message' in choice:
108
- yield f"data: {json.dumps({'content': choice['message']['content']})}\n\n"
109
  else:
110
  yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
 
111
 
112
- return StreamingResponse(event_stream(), media_type="text/event-stream")
 
 
 
113
  else:
114
- # Non-streaming response
115
- response = ChatCompletion.create(
116
- model=chat_request.model,
117
- messages=formatted_messages,
118
- stream=False
 
 
119
  )
120
-
121
  if isinstance(response, str):
122
- return ChatResponse(role="assistant", content=response)
123
- elif isinstance(response, dict) and 'choices' in response:
124
  return ChatResponse(
125
- role="assistant",
126
- content=response['choices'][0]['message']['content']
127
  )
128
- else:
129
- raise HTTPException(status_code=500, detail="Unexpected response structure.")
130
 
131
  except Exception as e:
132
- raise HTTPException(status_code=500, detail=str(e))
 
 
 
133
 
134
  if __name__ == "__main__":
135
  import uvicorn
 
1
  from fastapi import FastAPI, HTTPException, Depends, Header, Request
2
  from fastapi.responses import StreamingResponse
3
  from pydantic import BaseModel
4
+ from typing import List, Optional, Literal
5
+ import json
6
+ import g4f
7
+ from g4f.Provider import OpenaiAccount, RetryProvider
8
+ from g4f.models import ModelUtils
9
 
10
  app = FastAPI()
11
 
12
+ # Complete list of available models from G4F
13
  models = [
14
+ # OpenAI models
15
+ "gpt-4", "gpt-4-turbo", "gpt-4o", "gpt-3.5-turbo",
16
+
17
+ # Anthropic models
18
+ "claude-3-opus", "claude-3-sonnet", "claude-3-haiku", "claude-2.1",
19
+
20
+ # Google models
21
+ "gemini-pro", "gemini-1.5-pro", "gemini-1.5-flash",
22
+
23
+ # Meta models
24
+ "llama-2-70b", "llama-2-13b", "llama-2-7b", "llama-3-70b", "llama-3-8b",
25
+
26
+ # Other providers
27
+ "mistral-7b", "mixtral-8x7b", "command-r-plus", "cohere-command-r",
28
+ "deepseek-chat", "deepseek-coder", "code-llama-34b", "code-llama-70b",
29
+
30
+ # Specialized models
31
+ "grok-1", "grok-1.5", "grok-2", "o1", "o3-mini", "flux", "flux-pro"
32
  ]
33
 
34
+ # Configure G4F backend
35
+ class CustomBackend(g4f.Provider.BackendApi):
 
 
 
 
 
 
 
36
  working = True
37
  ssl = False
38
+ url = "https://ahe.hopto.org"
39
+ headers = {"Authorization": "Basic Z2dnOmc0Zl8="}
40
+
 
 
41
  @classmethod
42
  async def create_async_generator(
43
  cls,
44
  model: str,
45
+ messages: g4f.typing.Messages,
46
  **kwargs
47
+ ) -> g4f.typing.AsyncResult:
48
+ if model in OpenaiAccount.get_models():
49
+ kwargs["provider"] = OpenaiAccount
50
  async for chunk in super().create_async_generator(model, messages, **kwargs):
51
  yield chunk
52
 
53
+ # Pydantic models
54
  class Message(BaseModel):
55
+ role: Literal["system", "user", "assistant"]
56
  content: str
57
 
58
  class ChatRequest(BaseModel):
59
  model: str
60
  messages: List[Message]
61
+ temperature: Optional[float] = 0.7
62
+ max_tokens: Optional[int] = None
63
+ top_p: Optional[float] = 0.9
64
  streaming: bool = True
65
 
66
  class ChatResponse(BaseModel):
67
+ role: str = "assistant"
68
  content: str
69
+ model: Optional[str] = None
70
 
71
+ class ModelListResponse(BaseModel):
72
+ models: List[str]
73
+
74
+ # API Key Verification
75
  async def verify_api_key(x_api_key: str = Header(...)):
76
  if x_api_key != "fb207532285886a5568298b4b4e61124":
77
  raise HTTPException(status_code=403, detail="Invalid API key")
78
 
79
+ @app.get("/v1/models", response_model=ModelListResponse, tags=["Models"])
80
  async def get_models():
81
+ """Get list of all available models"""
82
+ return ModelListResponse(models=models)
83
 
84
+ @app.post("/v1/chat/completions", response_model=Optional[ChatResponse], tags=["Chat"])
85
  async def chat_completion(
86
+ request: ChatRequest,
87
  api_key: str = Depends(verify_api_key)
88
  ):
89
  """
90
+ Handle chat completion requests with streaming support.
91
+
92
+ Args:
93
+ request: ChatRequest containing model, messages and parameters
94
+ api_key: Verified API key
95
+
96
+ Returns:
97
+ Either a StreamingResponse or direct ChatResponse
98
  """
99
  # Validate model
100
+ if request.model not in models:
101
+ raise HTTPException(
102
+ status_code=400,
103
+ detail=f"Invalid model. Available models: {', '.join(models)}"
104
+ )
105
+
106
+ # Prepare messages
107
+ messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
108
+
 
109
  try:
110
+ if request.streaming:
111
+ async def stream_generator():
112
+ response = await g4f.ChatCompletion.create_async(
113
+ model=request.model,
114
+ messages=messages,
115
+ temperature=request.temperature,
116
+ top_p=request.top_p,
117
+ max_tokens=request.max_tokens,
118
+ provider=RetryProvider([CustomBackend])
119
  )
120
 
121
+ async for chunk in response:
122
+ if isinstance(chunk, dict):
123
+ yield f"data: {json.dumps(chunk)}\n\n"
 
 
 
 
124
  else:
125
  yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
126
+ yield "data: [DONE]\n\n"
127
 
128
+ return StreamingResponse(
129
+ stream_generator(),
130
+ media_type="text/event-stream"
131
+ )
132
  else:
133
+ response = await g4f.ChatCompletion.create_async(
134
+ model=request.model,
135
+ messages=messages,
136
+ temperature=request.temperature,
137
+ top_p=request.top_p,
138
+ max_tokens=request.max_tokens,
139
+ provider=RetryProvider([CustomBackend])
140
  )
141
+
142
  if isinstance(response, str):
143
+ return ChatResponse(content=response, model=request.model)
144
+ elif isinstance(response, dict):
145
  return ChatResponse(
146
+ content=response.get("choices", [{}])[0].get("message", {}).get("content", ""),
147
+ model=request.model
148
  )
149
+ return ChatResponse(content=str(response), model=request.model
 
150
 
151
  except Exception as e:
152
+ raise HTTPException(
153
+ status_code=500,
154
+ detail=f"Error processing request: {str(e)}"
155
+ )
156
 
157
  if __name__ == "__main__":
158
  import uvicorn