inferencing-llm / api.py
Shyamnath's picture
Add Swagger UI static files and update api.py to serve them
8744ae8
raw
history blame
799 Bytes
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from litellm.proxy.proxy_server import app as proxy_app
app = FastAPI(
title="LiteLLM API",
version="1.0.0",
docs_url="/", # Serve Swagger UI at root
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Mount static files
app.mount("/swagger", StaticFiles(directory="swagger"), name="swagger")
# Mount the LiteLLM Proxy server at /proxy path
app.mount("/proxy", proxy_app)
@app.get("/health")
def health_check():
return {"status": "healthy"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)