selamgptmodels / app.py
snackshell's picture
Update app.py
4e52fd8 verified
from fastapi import FastAPI, HTTPException, Depends, Header, Request
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import List
from g4f import ChatCompletion
app = FastAPI()
# List of available models
models = [
"gpt-4o", "gpt-4o-mini", "gpt-4",
"gpt-4-turbo", "gpt-3.5-turbo",
"claude-3.7-sonnet", "o3-mini", "o1", "claude-3.5",
"llama-3.1-405b", "gemini-flash", "blackboxai-pro", "openchat-3.5", "glm-4-9B", "blackboxai"
]
# Request model
class Message(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
model: str
messages: List[Message]
streaming: bool = True
class ChatResponse(BaseModel):
role: str
content: str
# Dependency to check API key
async def verify_api_key(x_api_key: str = Header(...)):
if x_api_key != "fb207532285886a5568298b4b4e61124":
raise HTTPException(status_code=403, detail="Invalid API key")
@app.get("/v1/models", tags=["Models"])
async def get_models():
"""Endpoint to get the list of available models."""
return {"models": models}
@app.post("/v1/chat/completions", tags=["Chat Completion"])
async def chat_completion(
chat_request: ChatRequest,
api_key: str = Depends(verify_api_key)
):
"""
Handle chat completion requests with optional streaming.
Removed rate limiting for unrestricted access.
"""
# Validate model
if chat_request.model not in models:
raise HTTPException(status_code=400, detail="Invalid model selected.")
# Check if messages are provided
if not chat_request.messages:
raise HTTPException(status_code=400, detail="Messages cannot be empty.")
# Convert messages to the format expected by ChatCompletion
formatted_messages = [{"role": msg.role, "content": msg.content} for msg in chat_request.messages]
try:
if chat_request.streaming:
# Stream the response
def event_stream():
response = ChatCompletion.create(
model=chat_request.model,
messages=formatted_messages,
stream=True
)
for chunk in response:
if isinstance(chunk, dict) and 'choices' in chunk:
for choice in chunk['choices']:
if 'delta' in choice and 'content' in choice['delta']:
yield f"data: {json.dumps({'content': choice['delta']['content']})}\n\n"
elif 'message' in choice:
yield f"data: {json.dumps({'content': choice['message']['content']})}\n\n"
else:
yield f"data: {json.dumps({'content': str(chunk)})}\n\n"
return StreamingResponse(event_stream(), media_type="text/event-stream")
else:
# Non-streaming response
response = ChatCompletion.create(
model=chat_request.model,
messages=formatted_messages,
stream=False
)
if isinstance(response, str):
return ChatResponse(role="assistant", content=response)
elif isinstance(response, dict) and 'choices' in response:
return ChatResponse(
role="assistant",
content=response['choices'][0]['message']['content']
)
else:
raise HTTPException(status_code=500, detail="Unexpected response structure.")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)