OpenOllama / run-ollama-docker.sh
wzebrowski's picture
Update run-ollama-docker.sh
a4f2f70
raw
history blame contribute delete
784 Bytes
#!/bin/bash
host_port=11434
container_port=11434
read -r -p "Do you want ollama in Docker with GPU support? (y/n): " use_gpu
docker rm -f ollama || true
docker pull ollama/ollama:latest
# docker_args="-d -v ollama:/root/.ollama -p $host_port:$container_port --name ollama ollama/ollama"
docker_args="-d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main"
# docker_args="-d -p 3000:8080 -v ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:ollama"
# if [ "$use_gpu" = "y" ]; then
# docker_args="--gpus=all $docker_args"
# fi
docker_args="$docker_args"
docker run $docker_args
docker image prune -f