Inference-API / main /main.py
AurelioAguirre's picture
changed to uvicorn setup for HF v4
0af4a83
raw
history blame
1.64 kB
"""
LLM Inference Server main application using LitServe framework.
"""
import litserve as ls
import yaml
import logging
from pathlib import Path
from fastapi.middleware.cors import CORSMiddleware
from .routes import router, init_router
from .api import InferenceApi
def setup_logging():
"""Set up basic logging configuration"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
return logging.getLogger(__name__)
def load_config():
"""Load configuration from config.yaml"""
config_path = Path(__file__).parent / "config.yaml"
with open(config_path) as f:
return yaml.safe_load(f)
def create_app():
"""Create and configure the application instance."""
logger = setup_logging()
config = load_config()
server_config = config.get('server', {})
# Initialize API with config
api = InferenceApi(config)
# Create LitServer instance
server = ls.LitServer(
api,
timeout=server_config.get('timeout', 60),
max_batch_size=server_config.get('max_batch_size', 1),
track_requests=True
)
# Get the FastAPI app
app = server.app
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Add routes with configured prefix
api_prefix = config.get('llm_server', {}).get('api_prefix', '/api/v1')
app.include_router(router, prefix=api_prefix)
return app
# Create the app instance for uvicorn
app = create_app()