ggml-mllm / main.py
matthoffner's picture
Update main.py
23af35e
raw
history blame
775 Bytes
rom fastapi import FastAPI
from fastapi.responses import HTMLResponse
from llama_cpp.server.app import create_app, Settings
import tomli
# Load the configuration from the config.toml file
with open("config.toml", "rb") as f:
settings = tomli.load(f)
settings = Settings(**settings)
app = create_app(settings=settings)
# Extend the app with your custom route
@app.get('/', response_class=HTMLResponse)
def custom_index_route():
html_content = """
<html>
<body>
<iframe src="https://matthoffner-chatbot.hf.space" frameborder="0" width="100%" height="100%"></iframe>
</body>
</html>
"""
return HTMLResponse(content=html_content)
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host='0.0.0.0', port=8000)