inferencing-llm / api.py
Shyamnath's picture
Update api.py with proper Swagger UI and CORS configuration
ef7bcea
raw
history blame
667 Bytes
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from litellm.proxy.proxy_server import app as proxy_app
# Create FastAPI app
app = FastAPI(
title="LiteLLM API",
version="1.0.0",
docs_url="/", # Serve Swagger UI at root
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Mount the LiteLLM Proxy server
app.mount("/proxy", proxy_app)
@app.get("/health")
def health_check():
return {"status": "healthy"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)