NicholasGuerrero commited on
Commit
6c3a696
·
1 Parent(s): 9fafb6e
Files changed (3) hide show
  1. Dockerfile +17 -1
  2. app.py +1 -1
  3. entrypoint.sh +7 -0
Dockerfile CHANGED
@@ -1,9 +1,25 @@
 
1
  FROM python:3.11
2
 
 
3
  WORKDIR /usr/src/app
 
 
4
  COPY . .
 
 
5
  RUN pip install -r requirements.txt
 
 
 
 
 
6
  EXPOSE 7860
 
 
7
  ENV GRADIO_SERVER_NAME="0.0.0.0"
8
 
9
- CMD ["python", "app.py"]
 
 
 
 
1
+ # Use the Python 3.11 base image
2
  FROM python:3.11
3
 
4
+ # Set the working directory
5
  WORKDIR /usr/src/app
6
+
7
+ # Copy the application code into the container
8
  COPY . .
9
+
10
+ # Install the Python dependencies from requirements.txt
11
  RUN pip install -r requirements.txt
12
+
13
+ # Install huggingface-cli
14
+ RUN pip install huggingface-cli
15
+
16
+ # Expose the port on which your Gradio app runs
17
  EXPOSE 7860
18
+
19
+ # Set an environment variable for the Gradio server name
20
  ENV GRADIO_SERVER_NAME="0.0.0.0"
21
 
22
+ # Define an entrypoint script to run both commands
23
+ COPY entrypoint.sh /usr/src/app/entrypoint.sh
24
+ RUN chmod +x /usr/src/app/entrypoint.sh
25
+ ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
app.py CHANGED
@@ -8,7 +8,7 @@ from huggingface_hub import hf_hub_download
8
  # huggingface-cli download LoneStriker/OpenBioLLM-Llama3-8B-GGUF --local-dir ./llama3-gguf
9
  llm = Llama(
10
  # model_path="./Phi-3-mini-4k-instruct-q4.gguf",
11
- model_path="LoneStriker/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
12
  n_ctx=2048,
13
  n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
14
  )
 
8
  # huggingface-cli download LoneStriker/OpenBioLLM-Llama3-8B-GGUF --local-dir ./llama3-gguf
9
  llm = Llama(
10
  # model_path="./Phi-3-mini-4k-instruct-q4.gguf",
11
+ model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
12
  n_ctx=2048,
13
  n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
14
  )
entrypoint.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Download the Hugging Face model
4
+ huggingface-cli repo download LoneStriker/OpenBioLLM-Llama3-8B-GGUF --model-id Llama3-8B-Q5_K_M.gguf --target-dir ./llama3-gguf
5
+
6
+ # Start your Python application (replace "app.py" with your actual script)
7
+ python app.py