Spaces:
Paused
Paused
github-actions[bot]
commited on
Commit
Β·
29c06df
1
Parent(s):
f82253f
GitHub deploy: dbe463a53d170c94514bf5454b362de89c31995b
Browse files- .github/workflows/deploy-to-hf-spaces.yml +1 -1
- Dockerfile +1 -1
- README.md +4 -6
- backend/data/litellm/config.yaml +1 -3
- run-ollama-docker.sh +4 -8
.github/workflows/deploy-to-hf-spaces.yml
CHANGED
@@ -44,7 +44,7 @@ jobs:
|
|
44 |
echo "---" >> temp_readme.md
|
45 |
cat README.md >> temp_readme.md
|
46 |
mv temp_readme.md README.md
|
47 |
-
|
48 |
- name: Configure git
|
49 |
run: |
|
50 |
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
|
|
44 |
echo "---" >> temp_readme.md
|
45 |
cat README.md >> temp_readme.md
|
46 |
mv temp_readme.md README.md
|
47 |
+
|
48 |
- name: Configure git
|
49 |
run: |
|
50 |
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
Dockerfile
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
# Initialize device type args
|
3 |
# use build args in the docker build commmand with --build-arg="BUILDARG=true"
|
4 |
ARG USE_CUDA=false
|
5 |
-
ARG USE_OLLAMA=
|
6 |
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
|
7 |
ARG USE_CUDA_VER=cu121
|
8 |
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
|
|
|
2 |
# Initialize device type args
|
3 |
# use build args in the docker build commmand with --build-arg="BUILDARG=true"
|
4 |
ARG USE_CUDA=false
|
5 |
+
ARG USE_OLLAMA=false
|
6 |
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
|
7 |
ARG USE_CUDA_VER=cu121
|
8 |
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
|
README.md
CHANGED
@@ -1,14 +1,12 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: π³
|
4 |
colorFrom: purple
|
5 |
colorTo: gray
|
6 |
sdk: docker
|
7 |
app_port: 8080
|
8 |
-
license: apache-2.0
|
9 |
---
|
10 |
-
|
11 |
-
# OpenOllama π
|
12 |
|
13 |

|
14 |

|
@@ -25,7 +23,7 @@ Open WebUI is an [extensible](https://github.com/open-webui/pipelines), feature-
|
|
25 |
|
26 |

|
27 |
|
28 |
-
## Key Features of
|
29 |
|
30 |
- π **Effortless Setup**: Install seamlessly using Docker or Kubernetes (kubectl, kustomize or helm) for a hassle-free experience with support for both `:ollama` and `:cuda` tagged images.
|
31 |
|
@@ -210,4 +208,4 @@ If you have any questions, suggestions, or need assistance, please open an issue
|
|
210 |
|
211 |
---
|
212 |
|
213 |
-
Created by [Timothy J. Baek](https://github.com/tjbck) - Let's make Open WebUI even more amazing together! πͺ
|
|
|
1 |
---
|
2 |
+
title: Open WebUI
|
3 |
emoji: π³
|
4 |
colorFrom: purple
|
5 |
colorTo: gray
|
6 |
sdk: docker
|
7 |
app_port: 8080
|
|
|
8 |
---
|
9 |
+
# Open WebUI (Formerly Ollama WebUI) π
|
|
|
10 |
|
11 |

|
12 |

|
|
|
23 |
|
24 |

|
25 |
|
26 |
+
## Key Features of Open WebUI β
|
27 |
|
28 |
- π **Effortless Setup**: Install seamlessly using Docker or Kubernetes (kubectl, kustomize or helm) for a hassle-free experience with support for both `:ollama` and `:cuda` tagged images.
|
29 |
|
|
|
208 |
|
209 |
---
|
210 |
|
211 |
+
Created by [Timothy J. Baek](https://github.com/tjbck) - Let's make Open WebUI even more amazing together! πͺ
|
backend/data/litellm/config.yaml
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
general_settings: {}
|
2 |
-
litellm_settings:
|
3 |
-
success_callback: ["langfuse"]
|
4 |
-
failure_callback: ["langfuse"]
|
5 |
model_list: []
|
6 |
router_settings: {}
|
|
|
1 |
general_settings: {}
|
2 |
+
litellm_settings: {}
|
|
|
|
|
3 |
model_list: []
|
4 |
router_settings: {}
|
run-ollama-docker.sh
CHANGED
@@ -8,15 +8,11 @@ read -r -p "Do you want ollama in Docker with GPU support? (y/n): " use_gpu
|
|
8 |
docker rm -f ollama || true
|
9 |
docker pull ollama/ollama:latest
|
10 |
|
11 |
-
|
12 |
-
docker_args="-d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main"
|
13 |
-
# docker_args="-d -p 3000:8080 -v ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:ollama"
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
docker_args="$docker_args"
|
20 |
|
21 |
docker run $docker_args
|
22 |
|
|
|
8 |
docker rm -f ollama || true
|
9 |
docker pull ollama/ollama:latest
|
10 |
|
11 |
+
docker_args="-d -v ollama:/root/.ollama -p $host_port:$container_port --name ollama ollama/ollama"
|
|
|
|
|
12 |
|
13 |
+
if [ "$use_gpu" = "y" ]; then
|
14 |
+
docker_args="--gpus=all $docker_args"
|
15 |
+
fi
|
|
|
|
|
16 |
|
17 |
docker run $docker_args
|
18 |
|