ggml-mllm / Dockerfile
matthoffner's picture
Update Dockerfile
e2934c5
raw
history blame
691 Bytes
FROM python:latest
ENV PYTHONUNBUFFERED 1
EXPOSE 8080
WORKDIR /app
RUN wget -qO- "https://cmake.org/files/v3.17/cmake-3.17.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
RUN git clone https://github.com/abetlen/llama-cpp-python
RUN CMAKE_ARGS="-DLLAMA_OPENBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python && cd llama-cpp-python
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
RUN apt-get install git-lfs
RUN git lfs pull https://huggingface.co/TheBloke/wizardLM-7B-GGML --include=wizardLM-7B.ggml.q5_1.bin
RUN ls
COPY . .
RUN ls -al
CMD python3 -m llama-cpp-python/llama_cpp.server --model wizardLM-7B.ggml.q5_1.bin