ollama / start.sh
Germano Cavalcante
Use lightweight models
7d178fd
raw
history blame contribute delete
410 Bytes
#!/bin/bash
# Set environment variables for the ollama server
export OLLAMA_HOST=0.0.0.0
export OLLAMA_ORIGINS="*"
# Start the Ollama service in the background
ollama serve &
# Wait for the service to initialize
sleep 10
# Create the model using Ollama
ollama create llama3.2-tunned -f Modelfile
ollama pull llama3.2:1b
ollama pull qwen2.5:0.5b
# Keep the container running indefinitely
tail -f /dev/null