matthoffner commited on
Commit
4fee425
·
1 Parent(s): 6d26a31

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +2 -3
Dockerfile CHANGED
@@ -1,7 +1,7 @@
1
- FROM nvidia/cuda:11.6.0-base-ubuntu20.04
2
  RUN export PATH="/usr/local/cuda/bin:$PATH"
3
  RUN apt update && \
4
- apt install --no-install-recommends -y build-essential python3 python3-pip wget curl && \
5
  apt clean && rm -rf /var/lib/apt/lists/*
6
 
7
  EXPOSE 8000
@@ -9,7 +9,6 @@ EXPOSE 8000
9
  WORKDIR /app
10
 
11
  RUN export PATH="/usr/local/cuda/bin:$PATH"
12
- RUN wget -qO- "https://cmake.org/files/v3.17/cmake-3.17.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
13
  RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
14
  COPY requirements.txt ./
15
  RUN pip install --upgrade pip && \
 
1
+ FROM nvidia/cuda:12.1.1-devel-ubuntu20.04
2
  RUN export PATH="/usr/local/cuda/bin:$PATH"
3
  RUN apt update && \
4
+ apt install --no-install-recommends -y build-essential python3 python3-pip wget curl cmake && \
5
  apt clean && rm -rf /var/lib/apt/lists/*
6
 
7
  EXPOSE 8000
 
9
  WORKDIR /app
10
 
11
  RUN export PATH="/usr/local/cuda/bin:$PATH"
 
12
  RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
13
  COPY requirements.txt ./
14
  RUN pip install --upgrade pip && \