ggml-mllm / Dockerfile
matthoffner's picture
Update Dockerfile
d53ee70
raw
history blame
1.61 kB
FROM nvidia/cuda:12.2.0-devel-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
# Install dependencies
RUN apt update && \
apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake zlib1g-dev libblas-dev && \
apt clean && rm -rf /var/lib/apt/lists/*
RUN wget -qO- "https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
WORKDIR /app
ENV PATH=/usr/local/cuda/bin:${PATH}
ENV CUDA_HOME=/usr/local/cuda \
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
RUN echo 'alias nvcc="nvcc -Xcompiler \"-fPIC\" -std=c++17"' >> ~/.bashrc
# Download ggml and mmproj models from HuggingFace
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/ggml-model-q4_k.gguf && \
wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/mmproj-model-f16.gguf
# Clone and build llava-server with CUDA support
RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
cd llava-cpp-server && \
git submodule init && \
git submodule update && \
LLAMA_CUBLAS=1 make
# Create a non-root user for security reasons
RUN useradd -m -u 1000 user && \
mkdir -p /home/user/app && \
cp /app/ggml-model-q4_k.gguf /home/user/app && \
cp /app/mmproj-model-f16.gguf /home/user/app
USER user
ENV HOME=/home/user
WORKDIR $HOME/app
# Expose the port
EXPOSE 8080
RUN ls -al
# Start the llava-server with models
CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "/home/user/app/ggml-model-q4_k.gguf", "--mmproj", "/home/user/app/mmproj-model-f16.gguf", "--host", "0.0.0.0"]