Ollama / ollama.sh
PyxiLabs's picture
Update ollama.sh
f5f67a3 verified
raw
history blame
505 Bytes
#!/bin/bash
# Start Ollama server in background
echo "πŸš€ Starting Ollama server..."
ollama serve &
# Wait for Ollama to be ready
echo "⏳ Waiting for Ollama to start..."
while ! nc -z localhost 11434; do
sleep 1
done
# Optional: Pull default model
echo "πŸ“₯ Pulling default model..."
ollama pull all-minilm
# Start FastAPI reverse proxy
echo "πŸ”€ Starting FastAPI reverse proxy on port 7860..."
#exec python -m uvicorn ollama:app --host 0.0.0.0 --port 7860 --proxy-headers
exec node ollama.js