File size: 1,059 Bytes
5c6fc68
6db6156
6520f0e
79f1838
6520f0e
79f1838
636bd97
 
 
6520f0e
 
 
6db6156
6520f0e
 
 
 
 
 
6db6156
6520f0e
6db6156
 
 
6520f0e
6db6156
 
 
6520f0e
 
6db6156
6520f0e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
FROM nvidia/cuda:12.0.0-cudnn8-devel-ubuntu22.04

# Install dependencies
RUN apt update && \
    apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake && \
    apt clean && rm -rf /var/lib/apt/lists/*

WORKDIR /app

# Download ggml and mmproj models from HuggingFace
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/ggml-model-q4_k.gguf -O ggml-model-q4_k.gguf && \
    wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/mmproj-model-f16.gguf -O mmproj-model-f16.gguf

# Clone and build llava-server
RUN git clone https://github.com/trzy/llava-cpp-server.git llava && \
    cd llava && \
    git submodule init && \
    git submodule update && \
    make

# Create a non-root user for security reasons
RUN useradd -m -u 1000 user

USER user
ENV HOME=/home/user

WORKDIR $HOME/app

# Expose the port
EXPOSE 8080

# Start the llava-server with models
CMD ["./llava/bin/llava-server", "-m", "./ggml-model-q4_k.gguf", "--mmproj", "./mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "8080"]