matthoffner commited on
Commit
2298350
·
1 Parent(s): 6d815dd

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -5
Dockerfile CHANGED
@@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND=noninteractive
4
 
5
  # Install dependencies
6
  RUN apt update && \
7
- apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake zlib1g-dev && \
8
  apt clean && rm -rf /var/lib/apt/lists/*
9
 
10
  WORKDIR /app
@@ -13,12 +13,12 @@ WORKDIR /app
13
  RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/ggml-model-q4_k.gguf && \
14
  wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/mmproj-model-f16.gguf
15
 
16
- # Clone and build llava-server
17
  RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
18
  cd llava-cpp-server && \
19
  git submodule init && \
20
  git submodule update && \
21
- make
22
 
23
  # Create a non-root user for security reasons
24
  RUN useradd -m -u 1000 user && \
@@ -38,5 +38,3 @@ RUN ls -al
38
 
39
  # Start the llava-server with models
40
  CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "/home/user/app/ggml-model-q4_k.gguf", "--mmproj", "/home/user/app/mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "8080"]
41
-
42
-
 
4
 
5
  # Install dependencies
6
  RUN apt update && \
7
+ apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake zlib1g-dev libblas-dev && \
8
  apt clean && rm -rf /var/lib/apt/lists/*
9
 
10
  WORKDIR /app
 
13
  RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/ggml-model-q4_k.gguf && \
14
  wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/mmproj-model-f16.gguf
15
 
16
+ # Clone and build llava-server with CUDA support
17
  RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
18
  cd llava-cpp-server && \
19
  git submodule init && \
20
  git submodule update && \
21
+ LLAMA_CUBLAS=1 make
22
 
23
  # Create a non-root user for security reasons
24
  RUN useradd -m -u 1000 user && \
 
38
 
39
  # Start the llava-server with models
40
  CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "/home/user/app/ggml-model-q4_k.gguf", "--mmproj", "/home/user/app/mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "8080"]