Fix: Disable LLAMA_CURL in Hugging Face space environment

#168
Files changed (1) hide show
  1. start.sh +4 -1
start.sh CHANGED
@@ -6,13 +6,16 @@ if [ ! -d "llama.cpp" ]; then
6
  fi
7
 
8
  export GGML_CUDA=OFF
 
9
  if [[ -z "${RUN_LOCALLY}" ]]; then
10
  # enable CUDA if NOT running locally
11
  export GGML_CUDA=ON
 
 
12
  fi
13
 
14
  cd llama.cpp
15
- cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA}
16
  cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
17
  cp ./build/bin/llama-* .
18
  rm -rf build
 
6
  fi
7
 
8
  export GGML_CUDA=OFF
9
+ export LLAMA_CURL=OFF
10
  if [[ -z "${RUN_LOCALLY}" ]]; then
11
  # enable CUDA if NOT running locally
12
  export GGML_CUDA=ON
13
+ # enable CURL if NOT running locally
14
+ export LLAMA_CURL=ON
15
  fi
16
 
17
  cd llama.cpp
18
+ cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA} -DLLAMA_CURL=${LLAMA_CURL}
19
  cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
20
  cp ./build/bin/llama-* .
21
  rm -rf build