Spaces:
Running
Running
# Start Ollama in the background | |
ollama serve & | |
# Wait for Ollama to be ready | |
until curl -s http://localhost:11434/api/tags >/dev/null; do | |
sleep 1 | |
done | |
# Pull the model if not already present | |
if ! ollama list | grep -q "llama3.2"; then | |
ollama pull llama3.2 | |
fi | |
# Start the FastAPI server | |
uvicorn app:app --host 0.0.0.0 --port 7860 | |