coolmanx commited on
Commit
5522189
·
verified ·
1 Parent(s): b13cb37

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +5 -176
Dockerfile CHANGED
@@ -1,176 +1,5 @@
1
- # syntax=docker/dockerfile:1
2
- # Initialize device type args
3
- # use build args in the docker build command with --build-arg="BUILDARG=true"
4
- ARG USE_CUDA=false
5
- ARG USE_OLLAMA=false
6
- # Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
7
- ARG USE_CUDA_VER=cu121
8
- # any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
9
- # Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
10
- # for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
11
- # IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
12
- ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
13
- ARG USE_RERANKING_MODEL=""
14
-
15
- # Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
16
- ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
17
-
18
- ARG BUILD_HASH=dev-build
19
- # Override at your own risk - non-root configurations are untested
20
- ARG UID=0
21
- ARG GID=0
22
-
23
- ######## WebUI frontend ########
24
- FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
25
- ARG BUILD_HASH
26
-
27
- WORKDIR /app
28
-
29
- COPY package.json package-lock.json ./
30
- RUN npm ci
31
-
32
- COPY . .
33
- ENV APP_BUILD_HASH=${BUILD_HASH}
34
- RUN npm run build
35
-
36
- ######## WebUI backend ########
37
- FROM python:3.11-slim-bookworm AS base
38
-
39
- # Use args
40
- ARG USE_CUDA
41
- ARG USE_OLLAMA
42
- ARG USE_CUDA_VER
43
- ARG USE_EMBEDDING_MODEL
44
- ARG USE_RERANKING_MODEL
45
- ARG UID
46
- ARG GID
47
-
48
- ## Basis ##
49
- ENV ENV=prod \
50
- PORT=8080 \
51
- # pass build args to the build
52
- USE_OLLAMA_DOCKER=${USE_OLLAMA} \
53
- USE_CUDA_DOCKER=${USE_CUDA} \
54
- USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
55
- USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \
56
- USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL}
57
-
58
- ## Basis URL Config ##
59
- ENV OLLAMA_BASE_URL="/ollama" \
60
- OPENAI_API_BASE_URL=""
61
-
62
- ## API Key and Security Config ##
63
- ENV OPENAI_API_KEY="" \
64
- WEBUI_SECRET_KEY="" \
65
- SCARF_NO_ANALYTICS=true \
66
- DO_NOT_TRACK=true \
67
- ANONYMIZED_TELEMETRY=false
68
-
69
- #### Other models #########################################################
70
- ## whisper TTS model settings ##
71
- ENV WHISPER_MODEL="base" \
72
- WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
73
-
74
- ## RAG Embedding model settings ##
75
- ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
76
- RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \
77
- SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
78
-
79
- ## Tiktoken model settings ##
80
- ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \
81
- TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
82
-
83
- ## Hugging Face download cache ##
84
- ENV HF_HOME="/app/backend/data/cache/embedding/models"
85
-
86
- ## Torch Extensions ##
87
- # ENV TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
88
-
89
- #### Other models ##########################################################
90
-
91
- WORKDIR /app/backend
92
-
93
- ENV HOME=/root
94
- # Create user and group if not root
95
- RUN if [ $UID -ne 0 ]; then \
96
- if [ $GID -ne 0 ]; then \
97
- addgroup --gid $GID app; \
98
- fi; \
99
- adduser --uid $UID --gid $GID --home $HOME --disabled-password --no-create-home app; \
100
- fi
101
-
102
- RUN mkdir -p $HOME/.cache/chroma
103
- RUN echo -n 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id
104
-
105
- # Make sure the user has access to the app and root directory
106
- RUN chown -R $UID:$GID /app $HOME
107
-
108
- RUN if [ "$USE_OLLAMA" = "true" ]; then \
109
- apt-get update && \
110
- # Install pandoc and netcat
111
- apt-get install -y --no-install-recommends git build-essential pandoc netcat-openbsd curl && \
112
- apt-get install -y --no-install-recommends gcc python3-dev && \
113
- # for RAG OCR
114
- apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
115
- # install helper tools
116
- apt-get install -y --no-install-recommends curl jq && \
117
- # install ollama
118
- curl -fsSL https://ollama.com/install.sh | sh && \
119
- # cleanup
120
- rm -rf /var/lib/apt/lists/*; \
121
- else \
122
- apt-get update && \
123
- # Install pandoc, netcat and gcc
124
- apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
125
- apt-get install -y --no-install-recommends gcc python3-dev && \
126
- # for RAG OCR
127
- apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
128
- # cleanup
129
- rm -rf /var/lib/apt/lists/*; \
130
- fi
131
-
132
- # install python dependencies
133
- COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt
134
-
135
- RUN pip3 install uv && \
136
- if [ "$USE_CUDA" = "true" ]; then \
137
- # If you use CUDA the whisper and embedding model will be downloaded on first use
138
- pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
139
- uv pip install --system -r requirements.txt --no-cache-dir && \
140
- python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
141
- python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
142
- python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
143
- else \
144
- pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
145
- uv pip install --system -r requirements.txt --no-cache-dir && \
146
- python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
147
- python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
148
- python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
149
- fi; \
150
- chown -R $UID:$GID /app/backend/data/
151
-
152
-
153
-
154
- # copy embedding weight from build
155
- # RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
156
- # COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
157
-
158
- # copy built frontend files
159
- COPY --chown=$UID:$GID --from=build /app/build /app/build
160
- COPY --chown=$UID:$GID --from=build /app/CHANGELOG.md /app/CHANGELOG.md
161
- COPY --chown=$UID:$GID --from=build /app/package.json /app/package.json
162
-
163
- # copy backend files
164
- COPY --chown=$UID:$GID ./backend .
165
-
166
- EXPOSE 8080
167
-
168
- HEALTHCHECK CMD curl --silent --fail http://localhost:${PORT:-8080}/health | jq -ne 'input.status == true' || exit 1
169
-
170
- USER $UID:$GID
171
-
172
- ARG BUILD_HASH
173
- ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
174
- ENV DOCKER=true
175
-
176
- CMD [ "bash", "start.sh"]
 
1
+ #Dockerfile
2
+
3
+ FROM ghcr.io/open-webui/open-webui:0.4.7
4
+
5
+ RUN chmod -R 777 ./data