ggml-mllm / Dockerfile
matthoffner's picture
Update Dockerfile
080a566
raw
history blame
905 Bytes
FROM nvidia/cuda:11.6.0-base-ubuntu20.04
RUN export PATH="/usr/local/cuda/bin:$PATH"
RUN apt update && \
apt install --no-install-recommends -y build-essential python3 python3-pip wget curl && \
apt clean && rm -rf /var/lib/apt/lists/*
EXPOSE 8000
WORKDIR /app
RUN export PATH="/usr/local/cuda/bin:$PATH"
RUN wget -qO- "https://cmake.org/files/v3.17/cmake-3.17.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
COPY requirements.txt ./
RUN pip install --upgrade pip && \
pip install -r requirements.txt
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
RUN apt-get install git-lfs -y
RUN git clone https://huggingface.co/TheBloke/Samantha-7B-GGML
COPY . .
RUN ls -al
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]