#!/bin/bash # Start Ollama server in background echo "🚀 Starting Ollama server..." ollama serve & # Wait for Ollama to be ready echo "⏳ Waiting for Ollama to start..." while ! nc -z localhost 11434; do sleep 1 done # Optional: Pull default model echo "📥 Pulling default model..." ollama pull all-minilm # Start FastAPI reverse proxy echo "🔀 Starting FastAPI reverse proxy on port 7860..." #exec python -m uvicorn ollama:app --host 0.0.0.0 --port 7860 --proxy-headers exec node ollama.js