File size: 1,488 Bytes
9e8d33e
3e50743
 
7131f6c
922b09a
6cba908
922b09a
79f1838
636bd97
05c1374
 
636bd97
 
6520f0e
05d2b81
 
6db6156
2298350
05c8990
 
6520f0e
2514e86
d108fb5
6db6156
6520f0e
7131f6c
 
 
 
6db6156
79d7c2e
 
 
6db6156
6520f0e
6db6156
 
 
6520f0e
 
6db6156
6d815dd
 
6520f0e
0d3fdb1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
FROM nvidia/cuda:12.2.0-devel-ubuntu22.04

ENV DEBIAN_FRONTEND=noninteractive

# Install dependencies
RUN apt update && \
    apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake zlib1g-dev libblas-dev && \
    apt clean && rm -rf /var/lib/apt/lists/*

RUN wget -qO- "https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local

WORKDIR /app

# Download ggml and mmproj models from HuggingFace
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/ggml-model-q4_k.gguf && \
    wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf 

# Clone and build llava-server with CUDA support
RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
    cd llava-cpp-server && \
    git submodule init && \
    git submodule update && \
    make

# Create a non-root user for security reasons
RUN useradd -m -u 1000 user && \
    mkdir -p /home/user/app && \
    cp /app/ggml-model-q4_k.gguf /home/user/app && \
    cp /app/mmproj-model-f16.gguf /home/user/app

RUN chown user:user /home/user/app/ggml-model-q4_k.gguf && \
    chown user:user /home/user/app/mmproj-model-f16.gguf

USER user
ENV HOME=/home/user

WORKDIR $HOME/app

# Expose the port
EXPOSE 8080

RUN ls -al

# Start the llava-server with models
CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0"]