Spaces:
Sleeping
Sleeping
File size: 4,623 Bytes
726ab04 d1a7225 2add2a0 726ab04 d1a7225 efe67f8 6566a73 d1a7225 726ab04 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 d5a5d22 d1a7225 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import g4f.api
import g4f.Provider
from fastapi import FastAPI, HTTPException, Depends, Header, Request
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import List
from g4f import ChatCompletion
from g4f.typing import Messages, AsyncResult
from g4f.Provider import BackendApi
app = FastAPI()
# List of available models
models = [
"gpt-4o", "gpt-4o-mini", "gpt-4",
"gpt-4-turbo", "gpt-3.5-turbo",
"claude-3.7-sonnet", "o3-mini", "o1", "grok-3", "gemini-2.5-pro-exp-03-25", "claude-3.5",
"llama-3.1-405b"
]
url = "https://ahe.hopto.org"
headers = {"Authorization": "Basic Z2dnOmc0Zl8="}
BackendApi.working = True
BackendApi.ssl = False
BackendApi.url = url
BackendApi.headers = headers
class BackendApi(BackendApi):
working = True
ssl = False
url = url
headers = headers
image_models = ["flux", "flux-pro"]
models = ["deepseek-r1", *g4f.Provider.OpenaiAccount.get_models(), "flux", "flux-pro"]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
**kwargs
) -> AsyncResult:
if model in g4f.Provider.OpenaiAccount.get_models():
kwargs["provider"] = "OpenaiAccount"
async for chunk in super().create_async_generator(model, messages, **kwargs):
yield chunk
# Request model
class Message(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
model: str
messages: List[Message]
streaming: bool = True
class ChatResponse(BaseModel):
role: str
content: str
# Dependency to check API key
async def verify_api_key(x_api_key: str = Header(...)):
if x_api_key != "fb207532285886a5568298b4b4e61124":
raise HTTPException(status_code=403, detail="Invalid API key")
@app.get("/v1/models", tags=["Models"])
async def get_models():
"""Endpoint to get the list of available models."""
return {"models": models}
@app.post("/v1/chat/completions", tags=["Chat Completion"])
async def chat_completion(
chat_request: ChatRequest,
api_key: str = Depends(verify_api_key)
):
"""
Handle chat completion requests with optional streaming.
Removed rate limiting for unrestricted access.
"""
# Validate model
if chat_request.model not in models:
raise HTTPException(status_code=400, detail="Invalid model selected.")
# Check if messages are provided
if not chat_request.messages:
raise HTTPException(status_code=400, detail="Messages cannot be empty.")
# Convert messages to the format expected by ChatCompletion
formatted_messages = [{"role": msg.role, "content": msg.content} for msg in chat_request.messages]
try:
if chat_request.streaming:
# Stream the response
def event_stream():
response = ChatCompletion.create(
model=chat_request.model,
messages=formatted_messages,
stream=True
)
for chunk in response:
if isinstance(chunk, dict) and 'choices' in chunk:
for choice in chunk['choices']:
if 'delta' in choice and 'content' in choice['delta']:
yield f"data: {json.dumps({'content': choice['delta']['content']})}\n\n"
elif 'message' in choice:
yield f"data: {json.dumps({'content': choice['message']['content']})}\n\n"
else:
yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
return StreamingResponse(event_stream(), media_type="text/event-stream")
else:
# Non-streaming response
response = ChatCompletion.create(
model=chat_request.model,
messages=formatted_messages,
stream=False
)
if isinstance(response, str):
return ChatResponse(role="assistant", content=response)
elif isinstance(response, dict) and 'choices' in response:
return ChatResponse(
role="assistant",
content=response['choices'][0]['message']['content']
)
else:
raise HTTPException(status_code=500, detail="Unexpected response structure.")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860) |