Spaces:
Sleeping
Sleeping
import g4f.api | |
import g4f.Provider | |
from fastapi import FastAPI, HTTPException, Depends, Header, Request | |
from fastapi.responses import StreamingResponse | |
from pydantic import BaseModel | |
from typing import List | |
from g4f import ChatCompletion | |
from g4f.typing import Messages, AsyncResult | |
from g4f.Provider import BackendApi | |
app = FastAPI() | |
# List of available models | |
models = [ | |
"gpt-4o", "gpt-4o-mini", "gpt-4", | |
"gpt-4-turbo", "gpt-3.5-turbo", | |
"claude-3.7-sonnet", "o3-mini", "o1", "grok-3", "gemini-2.5-pro-exp-03-25", "claude-3.5", | |
"llama-3.1-405b", "deepseek-r1", *g4f.Provider.OpenaiAccount.get_models(), "flux", "flux-pro" | |
] | |
url = "https://ahe.hopto.org" | |
headers = {"Authorization": "Basic Z2dnOmc0Zl8="} | |
BackendApi.working = True | |
BackendApi.ssl = False | |
BackendApi.url = url | |
BackendApi.headers = headers | |
class BackendApi(BackendApi): | |
working = True | |
ssl = False | |
url = url | |
headers = headers | |
image_models = ["flux", "flux-pro"] | |
models = ["deepseek-r1", *g4f.Provider.OpenaiAccount.get_models(), "flux", "flux-pro"] | |
async def create_async_generator( | |
cls, | |
model: str, | |
messages: Messages, | |
**kwargs | |
) -> AsyncResult: | |
if model in g4f.Provider.OpenaiAccount.get_models(): | |
kwargs["provider"] = "OpenaiAccount" | |
async for chunk in super().create_async_generator(model, messages, **kwargs): | |
yield chunk | |
# Request model | |
class Message(BaseModel): | |
role: str | |
content: str | |
class ChatRequest(BaseModel): | |
model: str | |
messages: List[Message] | |
streaming: bool = True | |
class ChatResponse(BaseModel): | |
role: str | |
content: str | |
# Dependency to check API key | |
async def verify_api_key(x_api_key: str = Header(...)): | |
if x_api_key != "fb207532285886a5568298b4b4e61124": | |
raise HTTPException(status_code=403, detail="Invalid API key") | |
async def get_models(): | |
"""Endpoint to get the list of available models.""" | |
return {"models": models} | |
async def chat_completion( | |
chat_request: ChatRequest, | |
api_key: str = Depends(verify_api_key) | |
): | |
""" | |
Handle chat completion requests with optional streaming. | |
Removed rate limiting for unrestricted access. | |
""" | |
# Validate model | |
if chat_request.model not in models: | |
raise HTTPException(status_code=400, detail="Invalid model selected.") | |
# Check if messages are provided | |
if not chat_request.messages: | |
raise HTTPException(status_code=400, detail="Messages cannot be empty.") | |
# Convert messages to the format expected by ChatCompletion | |
formatted_messages = [{"role": msg.role, "content": msg.content} for msg in chat_request.messages] | |
try: | |
if chat_request.streaming: | |
# Stream the response | |
def event_stream(): | |
response = ChatCompletion.create( | |
model=chat_request.model, | |
messages=formatted_messages, | |
stream=True | |
) | |
for chunk in response: | |
if isinstance(chunk, dict) and 'choices' in chunk: | |
for choice in chunk['choices']: | |
if 'delta' in choice and 'content' in choice['delta']: | |
yield f"data: {json.dumps({'content': choice['delta']['content']})}\n\n" | |
elif 'message' in choice: | |
yield f"data: {json.dumps({'content': choice['message']['content']})}\n\n" | |
else: | |
yield f"data: {json.dumps({'content': str(chunk)})}\n\n" | |
return StreamingResponse(event_stream(), media_type="text/event-stream") | |
else: | |
# Non-streaming response | |
response = ChatCompletion.create( | |
model=chat_request.model, | |
messages=formatted_messages, | |
stream=False | |
) | |
if isinstance(response, str): | |
return ChatResponse(role="assistant", content=response) | |
elif isinstance(response, dict) and 'choices' in response: | |
return ChatResponse( | |
role="assistant", | |
content=response['choices'][0]['message']['content'] | |
) | |
else: | |
raise HTTPException(status_code=500, detail="Unexpected response structure.") | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=7860) |