LLMServer / main /app.py
AurelioAguirre's picture
Added double init, for embedding and chat models at the same time.
8083005
raw
history blame
1.19 kB
import yaml
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .routes import router, init_router
from .utils.logging import setup_logger
from .utils.validation import validate_hf
def load_config():
"""Load configuration from yaml file"""
with open("main/config.yaml", "r") as f:
return yaml.safe_load(f)
def create_app():
config = load_config()
logger = setup_logger(config, "main")
validate_hf(setup_logger, config)
logger.info("Starting LLM API server")
app = FastAPI(
title="LLM API",
description="API for Large Language Model operations",
version=config["api"]["version"]
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=config["api"]["cors"]["origins"],
allow_credentials=config["api"]["cors"]["credentials"],
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize routes with config
init_router(config)
app.include_router(router, prefix=f"{config['api']['prefix']}/{config['api']['version']}")
logger.info("FastAPI application created successfully")
return app
app = create_app()