File size: 410 Bytes
a8144f6
 
 
 
0b6a91f
a8144f6
 
 
 
 
 
 
 
5c6a670
7d178fd
 
a8144f6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#!/bin/bash

# Set environment variables for the ollama server
export OLLAMA_HOST=0.0.0.0
export OLLAMA_ORIGINS="*"

# Start the Ollama service in the background
ollama serve &

# Wait for the service to initialize
sleep 10

# Create the model using Ollama
ollama create llama3.2-tunned -f Modelfile
ollama pull llama3.2:1b
ollama pull qwen2.5:0.5b

# Keep the container running indefinitely
tail -f /dev/null