Spaces:
Sleeping
Sleeping
Commit
·
85a3972
1
Parent(s):
cf14dc1
tweak 7
Browse files- Dockerfile +23 -5
- app.py +1 -4
Dockerfile
CHANGED
@@ -1,13 +1,31 @@
|
|
1 |
FROM python:3.11
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
EXPOSE 7860
|
8 |
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
9 |
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
CMD ["python", "app.py"]
|
|
|
1 |
FROM python:3.11
|
2 |
|
3 |
+
# Set up a new user named "user" with user ID 1000
|
4 |
+
RUN useradd -m -u 1000 user
|
5 |
+
|
6 |
+
# Switch to the "user" user
|
7 |
+
USER user
|
8 |
+
|
9 |
+
# Set home to the user's home directory
|
10 |
+
ENV HOME=/home/user \
|
11 |
+
PATH=/home/user/.local/bin:$PATH
|
12 |
+
|
13 |
+
WORKDIR $HOME/app
|
14 |
+
|
15 |
+
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
16 |
+
COPY --chown=user . $HOME/app
|
17 |
+
|
18 |
+
|
19 |
|
20 |
EXPOSE 7860
|
21 |
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
22 |
|
23 |
+
|
24 |
+
# Download a checkpoint
|
25 |
+
RUN mkdir content
|
26 |
+
ADD --chown=user https://huggingface.co/NicholasJohn/OpenBioLLM-Llama3-8B-Q5_K_M.gguf/blob/main/OpenBioLLM-Llama3-8B-Q5_K_M.gguf content/OpenBioLLM-Llama3-8B-Q5_K_M.gguf
|
27 |
+
|
28 |
+
|
29 |
+
RUN pip install -r requirements.txt
|
30 |
|
31 |
CMD ["python", "app.py"]
|
app.py
CHANGED
@@ -11,10 +11,7 @@ from huggingface_hub.file_download import http_get
|
|
11 |
llm = Llama(
|
12 |
# model_path="./Phi-3-mini-4k-instruct-q4.gguf",
|
13 |
# model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
14 |
-
model_path =
|
15 |
-
repo_id=os.environ.get("REPO_ID", "LoneStriker/OpenBioLLM-Llama3-8B-GGUF"),
|
16 |
-
filename=os.environ.get("MODEL_FILE", "Llama3-8B-Q5_K_M.gguf"),
|
17 |
-
),
|
18 |
n_ctx=2048,
|
19 |
n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
|
20 |
)
|
|
|
11 |
llm = Llama(
|
12 |
# model_path="./Phi-3-mini-4k-instruct-q4.gguf",
|
13 |
# model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
14 |
+
model_path = "content/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
|
|
|
|
|
|
15 |
n_ctx=2048,
|
16 |
n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
|
17 |
)
|