#!/bin/bash # Set environment variables for the ollama server export OLLAMA_HOST=0.0.0.0 export OLLAMA_ORIGINS="*" # Start the Ollama service in the background ollama serve & # Wait for the service to initialize sleep 10 # Create the model using Ollama ollama create llama3.2-tunned -f Modelfile ollama run llama3.2-tunned # Keep the container running indefinitely tail -f /dev/null