Spaces:
Running
Running
Use llama3.2-3B-tunned-for-blender
Browse files
start.sh
CHANGED
@@ -10,11 +10,8 @@ ollama serve &
|
|
10 |
# Wait for the service to initialize
|
11 |
sleep 10
|
12 |
|
13 |
-
# Download the required file
|
14 |
-
curl -fsSL https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q4_K_M-GGUF/resolve/main/llama-3.2-1b-instruct-q4_k_m.gguf?download=true -o llama.gguf
|
15 |
-
|
16 |
# Create the model using Ollama
|
17 |
-
ollama
|
18 |
|
19 |
# Keep the container running indefinitely
|
20 |
tail -f /dev/null
|
|
|
10 |
# Wait for the service to initialize
|
11 |
sleep 10
|
12 |
|
|
|
|
|
|
|
13 |
# Create the model using Ollama
|
14 |
+
ollama run hf.co/mano-wii/llama3.2-3B-tunned-for-blender:Q5_K_M
|
15 |
|
16 |
# Keep the container running indefinitely
|
17 |
tail -f /dev/null
|