File size: 1,281 Bytes
4231027 60c7b37 f412bec 7a72e74 ea70a54 7a72e74 bd1e8a4 efd2474 bde26c6 fccfd2c 8e58ab6 993e945 7a72e74 5f0b09b aea9a02 c541e24 aea9a02 c52c52b 1ca5550 efd2474 a1124ce 2773516 226a233 aea9a02 90ed3f1 aea9a02 d303a21 05c5a94 fbe2880 d303a21 d93fd26 aea9a02 cd6e8fe c541e24 41ce9b3 f254a1b 2d51dcb cd6e8fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
FROM python:3.11-slim-bullseye
#FROM nvidia/cuda:12.3.0-devel-ubuntu22.04
RUN apt-get update && apt-get upgrade -y && apt-get install -y --no-install-recommends \
python3 \
python3-dev \
python3-pip \
ninja-build \
build-essential \
pkg-config \
gnupg2 \
git \
liblzma-dev \
wget \
clang \
c++11 \
g++
RUN apt remove cmake -y
RUN pip install cmake --upgrade
WORKDIR /code
RUN chmod 777 .
COPY ./requirements.txt /code/requirements.txt
RUN pip install --upgrade pip
RUN pip install --upgrade setuptools
RUN cd /tmp && git clone --recurse-submodules https://github.com/nomic-ai/gpt4all && cd gpt4all/gpt4all-backend/ && mkdir build && cd build && cmake .. && cmake --build . --parallel && cd ../../gpt4all-bindings/python && pip3 install -e .
RUN pip install llama-cpp-python \
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
RUN pip install --no-cache-dir -r /code/requirements.txt
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH \
TF_ENABLE_ONEDNN_OPTS=0 \
HOST=0.0.0.0 \
PORT=7860 \
ORIGINS=* \
DLLMODEL_CUDA=OFF
WORKDIR $HOME/app
COPY --chown=user . $HOME/app
RUN chmod 777 .
EXPOSE 7860
CMD ["python", "-m", "main"] |