matthoffner commited on
Commit
477cb33
·
verified ·
1 Parent(s): 1283b41

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -3
Dockerfile CHANGED
@@ -26,8 +26,8 @@ ENV PATH="/usr/local/cuda/bin:$PATH" \
26
  WORKDIR /app
27
 
28
  # Download ggml and mmproj models from HuggingFace
29
- RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/ggml-model-q5_k.gguf && \
30
- wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
  RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
@@ -54,4 +54,4 @@ WORKDIR $HOME/app
54
  EXPOSE 8080
55
 
56
  # Start the llava-server with models
57
- CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "ggml-model-q5_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "-ngl", "30", "-c", "2048"]
 
26
  WORKDIR /app
27
 
28
  # Download ggml and mmproj models from HuggingFace
29
+ RUN wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q4_k.gguf && \
30
+ wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
  RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
 
54
  EXPOSE 8080
55
 
56
  # Start the llava-server with models
57
+ CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "-ngl", "30", "-c", "2048"]