File size: 354 Bytes
04fea66
 
 
 
 
 
 
 
 
 
 
14e0b19
 
04fea66
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#!/bin/bash

# Start Ollama in the background
ollama serve &

# Wait for Ollama to be ready
until curl -s http://localhost:11434/api/tags >/dev/null; do
    sleep 1
done

# Pull the model if not already present
if ! ollama list | grep -q "llama3.2"; then
    ollama pull llama3.2
fi

# Start the FastAPI server
uvicorn app:app --host 0.0.0.0 --port 7860