ggml-mllm / Dockerfile
matthoffner's picture
Update Dockerfile
6520f0e
raw
history blame
1.06 kB
FROM nvidia/cuda:12.0.0-cudnn8-devel-ubuntu22.04
# Install dependencies
RUN apt update && \
apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake && \
apt clean && rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Download ggml and mmproj models from HuggingFace
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/ggml-model-q4_k.gguf -O ggml-model-q4_k.gguf && \
wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/mmproj-model-f16.gguf -O mmproj-model-f16.gguf
# Clone and build llava-server
RUN git clone https://github.com/trzy/llava-cpp-server.git llava && \
cd llava && \
git submodule init && \
git submodule update && \
make
# Create a non-root user for security reasons
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user
WORKDIR $HOME/app
# Expose the port
EXPOSE 8080
# Start the llava-server with models
CMD ["./llava/bin/llava-server", "-m", "./ggml-model-q4_k.gguf", "--mmproj", "./mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "8080"]