File size: 2,296 Bytes
93551c4
bbfd261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab48cd3
 
3e50743
93551c4
9a57e00
 
 
be1949d
e5bf1d7
be1949d
e5bf1d7
7131f6c
93551c4
636bd97
 
93551c4
 
 
6db6156
93551c4
 
be1949d
93551c4
 
 
be1949d
ab48cd3
 
 
93551c4
d1e4e0d
6db6156
93551c4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
# Using the specified base image that's suited for llama-cpp-python
# Define the image argument and provide a default value
ARG IMAGE=python:3-slim-bullseye

# Use the image as specified
FROM ${IMAGE}

# Re-declare the ARG after FROM
ARG IMAGE

# Update and upgrade the existing packages 
RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends \
    python3 \
    python3-pip \
    ninja-build \
    build-essential

RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context

# Perform the conditional installations based on the image
RUN echo "Image: ${IMAGE}" && \
    if [ "${IMAGE}" = "python:3-slim-bullseye" ] ; then \
    echo "OpenBLAS install:" && \
    apt-get install -y --no-install-recommends libopenblas-dev && \
    LLAMA_OPENBLAS=1 pip install llama-cpp-python --verbose; \
else \
    echo "CuBLAS install:" && \
    LLAMA_CUBLAS=1 pip install llama-cpp-python --verbose; \
fi

VOLUME ["/models"]

# Environment variables for model details
ENV MODEL_NAME="llava-1.6-mistral-7b-gguf"
ENV DEFAULT_MODEL_FILE="llava-v1.6-mistral-7b.Q3_K_XS.gguf"
ENV MODEL_USER="cjpais"
ENV DEFAULT_MODEL_BRANCH="main"
ENV DEFAULT_CLIP_MODEL_FILE="mmproj-model-f16.gguf"
ENV MODEL_URL="https://huggingface.co/${MODEL_USER}/${MODEL_NAME}/resolve/${DEFAULT_MODEL_BRANCH}/${DEFAULT_MODEL_FILE}"
ENV CLIP_MODEL_URL="https://huggingface.co/${MODEL_USER}/${MODEL_NAME}/resolve/${DEFAULT_MODEL_BRANCH}/${DEFAULT_CLIP_MODEL_FILE}"

# Set up the working directory
WORKDIR /app

# Ensure curl is available for downloading the models
RUN apt-get update && apt-get install -y curl && \
    apt-get clean && rm -rf /var/lib/apt/lists/*

# Create a directory for the models
RUN mkdir -p /models

# Download the models
RUN curl -L "${MODEL_URL}" -o /models/${DEFAULT_MODEL_FILE} && \
    curl -L "${CLIP_MODEL_URL}" -o /models/${DEFAULT_CLIP_MODEL_FILE}


ENV HOST=0.0.0.0
ENV PORT=8000
# Expose the port the server will run on
EXPOSE 8000

# Command to run the server, using environment variables for model paths
CMD ["python3", "-m", "llama_cpp.server", "--model", "/models/llava-v1.6-mistral-7b.Q3_K_XS.gguf", "--clip_model_path", "/models/mmproj-model-f16.gguf", "--chat_format", "llava-1-5"]