ggml-mllm / main.py
matthoffner's picture
Create main.py
7b3d278 verified
raw
history blame
685 Bytes
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
from llama_cpp.server.app import create_app, Settings
import tomli
# Load the configuration from the config.toml file
with open("config.toml", "rb") as f:
settings = tomli.load(f)
settings = Settings(**settings)
app = create_app(settings=settings)
# Extend the app with your custom route
@app.get('/', response_class=HTMLResponse)
def custom_index_route():
html_content = """
<html>
<body>
Hello world
</body>
</html>
"""
return HTMLResponse(content=html_content)
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host='0.0.0.0', port=8000)