Spaces:
Sleeping
Sleeping
NicholasGuerrero
commited on
Commit
·
73ef89f
1
Parent(s):
2f63131
tweak 7
Browse files- Dockerfile +2 -1
- app.py +1 -1
Dockerfile
CHANGED
@@ -23,8 +23,9 @@ ENV GRADIO_SERVER_NAME="0.0.0.0"
|
|
23 |
|
24 |
# Download a checkpoint
|
25 |
RUN mkdir content
|
26 |
-
ADD --chown=user https://huggingface.co/
|
27 |
|
|
|
28 |
|
29 |
RUN pip install -r requirements.txt
|
30 |
|
|
|
23 |
|
24 |
# Download a checkpoint
|
25 |
RUN mkdir content
|
26 |
+
ADD --chown=user https://huggingface.co/LoneStriker/OpenBioLLM-Llama3-8B-GGUF/blob/main/OpenBioLLM-Llama3-8B-Q5_K_M.gguf content/OpenBioLLM-Llama3-8B-Q5_K_M.gguf
|
27 |
|
28 |
+
RUN ls -l $HOME/app
|
29 |
|
30 |
RUN pip install -r requirements.txt
|
31 |
|
app.py
CHANGED
@@ -11,7 +11,7 @@ from huggingface_hub.file_download import http_get
|
|
11 |
llm = Llama(
|
12 |
# model_path="./Phi-3-mini-4k-instruct-q4.gguf",
|
13 |
# model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
14 |
-
model_path = "
|
15 |
n_ctx=2048,
|
16 |
n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
|
17 |
)
|
|
|
11 |
llm = Llama(
|
12 |
# model_path="./Phi-3-mini-4k-instruct-q4.gguf",
|
13 |
# model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
14 |
+
model_path = "/content/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
|
15 |
n_ctx=2048,
|
16 |
n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
|
17 |
)
|