mobinln commited on
Commit
f75aab0
·
verified ·
1 Parent(s): f500c06

Update start.sh

Browse files
Files changed (1) hide show
  1. start.sh +1 -2
start.sh CHANGED
@@ -7,12 +7,11 @@ echo "✅ Downloading llamacpp..."
7
 
8
  wget -O llama_cpp.zip https://github.com/ggml-org/llama.cpp/releases/download/b6102/llama-b6102-bin-ubuntu-x64.zip > /dev/null 2>&1
9
  unzip llama_cpp.zip > /dev/null 2>&1
10
- ls
11
 
12
  echo "✅ Booting up llama server..."
13
 
14
  # wget -O model.gguf https://huggingface.co/lmstudio-community/Qwen3-4B-Instruct-2507-GGUF/resolve/main/Qwen3-4B-Instruct-2507-Q6_K.gguf?download=true > /dev/null 2>&1
15
  wget -O model.gguf https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-F16.gguf?download=true > /dev/null 2>&1
16
- ./buid/bin/llama-server -m model.gguf --port 8000 --host 0.0.0.0 --threads 2 --ctx-size 4096 --mlock --jinja
17
 
18
  echo "✅ llama server running on port 8000"
 
7
 
8
  wget -O llama_cpp.zip https://github.com/ggml-org/llama.cpp/releases/download/b6102/llama-b6102-bin-ubuntu-x64.zip > /dev/null 2>&1
9
  unzip llama_cpp.zip > /dev/null 2>&1
 
10
 
11
  echo "✅ Booting up llama server..."
12
 
13
  # wget -O model.gguf https://huggingface.co/lmstudio-community/Qwen3-4B-Instruct-2507-GGUF/resolve/main/Qwen3-4B-Instruct-2507-Q6_K.gguf?download=true > /dev/null 2>&1
14
  wget -O model.gguf https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-F16.gguf?download=true > /dev/null 2>&1
15
+ ./build/bin/llama-server -m model.gguf --port 8000 --host 0.0.0.0 --threads 2 --ctx-size 4096 --mlock --jinja
16
 
17
  echo "✅ llama server running on port 8000"