File size: 1,032 Bytes
1dc2887
79f1838
 
d1f4c29
79f1838
636bd97
756ebd4
636bd97
 
 
5ac6a38
ecef3f5
d1f4c29
a170260
df3a501
 
 
c5da4da
b54cf69
 
 
 
 
b8923c9
636bd97
 
 
756ebd4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
FROM nvidia/cuda:11.6.0-base-ubuntu20.04
RUN export PATH="/usr/local/cuda/bin:$PATH"
RUN apt update && \
    apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git && \
    apt clean && rm -rf /var/lib/apt/lists/*

EXPOSE 8000

WORKDIR /app

RUN export PATH="/usr/local/cuda/bin:$PATH"
RUN wget -qO- "https://cmake.org/files/v3.17/cmake-3.17.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install git+https://github.com/abetlen/llama-cpp-python --no-cache-dir

COPY requirements.txt ./
RUN pip install --upgrade pip && \
    pip install -r requirements.txt

#RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
#RUN apt-get install git-lfs -y
#RUN git clone https://huggingface.co/TheBloke/robin-13B-v2-GGML
#RUN mv robin-13B-v2-GGML/robin-13b.ggmlv3.q3_K_M.bin .
#RUN rm -rf robin-13B-v2-GGML/

COPY . .
RUN ls -al

CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]