Spaces:
Paused
Paused
File size: 1,367 Bytes
9e8d33e 3e50743 7131f6c 922b09a 6cba908 922b09a 79f1838 636bd97 9e8d33e 6520f0e 7131f6c 6db6156 2298350 05c8990 6520f0e 2298350 6db6156 6520f0e 7131f6c 6db6156 6520f0e 6db6156 6520f0e 6db6156 6d815dd 6520f0e 573b906 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
FROM nvidia/cuda:12.2.0-devel-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
# Install dependencies
RUN apt update && \
apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake zlib1g-dev libblas-dev && \
apt install -y cuda-libraries-dev-12-2 && \
apt clean && rm -rf /var/lib/apt/lists/*
WORKDIR /app
ENV PATH=/usr/local/cuda/bin:${PATH}
# Download ggml and mmproj models from HuggingFace
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/ggml-model-q4_k.gguf && \
wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/mmproj-model-f16.gguf
# Clone and build llava-server with CUDA support
RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
cd llava-cpp-server && \
git submodule init && \
git submodule update && \
LLAMA_CUBLAS=1 make
# Create a non-root user for security reasons
RUN useradd -m -u 1000 user && \
mkdir -p /home/user/app && \
cp /app/ggml-model-q4_k.gguf /home/user/app && \
cp /app/mmproj-model-f16.gguf /home/user/app
USER user
ENV HOME=/home/user
WORKDIR $HOME/app
# Expose the port
EXPOSE 8080
RUN ls -al
# Start the llava-server with models
CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "/home/user/app/ggml-model-q4_k.gguf", "--mmproj", "/home/user/app/mmproj-model-f16.gguf", "--host", "0.0.0.0"] |