Spaces:
Sleeping
Sleeping
Commit
·
f0cd7b3
1
Parent(s):
73ef89f
tweak 7
Browse files- Dockerfile +1 -1
- app.py +1 -1
Dockerfile
CHANGED
@@ -25,7 +25,7 @@ ENV GRADIO_SERVER_NAME="0.0.0.0"
|
|
25 |
RUN mkdir content
|
26 |
ADD --chown=user https://huggingface.co/LoneStriker/OpenBioLLM-Llama3-8B-GGUF/blob/main/OpenBioLLM-Llama3-8B-Q5_K_M.gguf content/OpenBioLLM-Llama3-8B-Q5_K_M.gguf
|
27 |
|
28 |
-
RUN ls -l $HOME/app
|
29 |
|
30 |
RUN pip install -r requirements.txt
|
31 |
|
|
|
25 |
RUN mkdir content
|
26 |
ADD --chown=user https://huggingface.co/LoneStriker/OpenBioLLM-Llama3-8B-GGUF/blob/main/OpenBioLLM-Llama3-8B-Q5_K_M.gguf content/OpenBioLLM-Llama3-8B-Q5_K_M.gguf
|
27 |
|
28 |
+
RUN ls -l $HOME/app/content
|
29 |
|
30 |
RUN pip install -r requirements.txt
|
31 |
|
app.py
CHANGED
@@ -11,7 +11,7 @@ from huggingface_hub.file_download import http_get
|
|
11 |
llm = Llama(
|
12 |
# model_path="./Phi-3-mini-4k-instruct-q4.gguf",
|
13 |
# model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
14 |
-
model_path = "
|
15 |
n_ctx=2048,
|
16 |
n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
|
17 |
)
|
|
|
11 |
llm = Llama(
|
12 |
# model_path="./Phi-3-mini-4k-instruct-q4.gguf",
|
13 |
# model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
14 |
+
model_path = "./content/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
15 |
n_ctx=2048,
|
16 |
n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
|
17 |
)
|