Spaces:
Sleeping
Sleeping
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker | |
# you will also find guides on how best to write your Dockerfile | |
FROM ghcr.io/ggml-org/llama.cpp:server | |
ARG MODEL_DOWNLOAD_LINK | |
ENV MODEL_DOWNLOAD_LINK=${MODEL_DOWNLOAD_LINK:-https://huggingface.co/unsloth/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q4_K_M.gguf?download=true} | |
ENV DEBIAN_FRONTEND=noninteractive | |
USER root | |
RUN apt-get update && apt-get install -y wget | |
RUN mkdir /models && chmod 777 /models | |
RUN useradd -m -u 1000 user | |
USER user | |
ENV PATH="/home/user/.local/bin:$PATH" | |
WORKDIR /app | |
COPY --chown=user . /app | |
RUN wget -nv -O /models/local_model.gguf ${MODEL_DOWNLOAD_LINK} | |
# CMD ["/app/llama-server", "--host", "0.0.0.0","--port","8080", "-c", "2048","-m","/models/local_model.gguf", "--cache-type-k", "q8_0", "--api-key", "1234", "--parallel", "2" ] | |
CMD ["--host", "0.0.0.0","--port","8080","-m","/models/local_model.gguf", "--cache-type-k", "q8_0", "--api-key", "1234", "--parallel", "2" ] | |