NicholasGuerrero commited on
Commit
312337f
·
1 Parent(s): 6c3a696
Files changed (2) hide show
  1. Dockerfile +1 -16
  2. app.py +8 -0
Dockerfile CHANGED
@@ -1,25 +1,10 @@
1
- # Use the Python 3.11 base image
2
  FROM python:3.11
3
 
4
- # Set the working directory
5
  WORKDIR /usr/src/app
6
-
7
- # Copy the application code into the container
8
  COPY . .
9
-
10
- # Install the Python dependencies from requirements.txt
11
  RUN pip install -r requirements.txt
12
 
13
- # Install huggingface-cli
14
- RUN pip install huggingface-cli
15
-
16
- # Expose the port on which your Gradio app runs
17
  EXPOSE 7860
18
-
19
- # Set an environment variable for the Gradio server name
20
  ENV GRADIO_SERVER_NAME="0.0.0.0"
21
 
22
- # Define an entrypoint script to run both commands
23
- COPY entrypoint.sh /usr/src/app/entrypoint.sh
24
- RUN chmod +x /usr/src/app/entrypoint.sh
25
- ENTRYPOINT ["/usr/src/app/entrypoint.sh"]
 
 
1
  FROM python:3.11
2
 
 
3
  WORKDIR /usr/src/app
 
 
4
  COPY . .
 
 
5
  RUN pip install -r requirements.txt
6
 
 
 
 
 
7
  EXPOSE 7860
 
 
8
  ENV GRADIO_SERVER_NAME="0.0.0.0"
9
 
10
+ CMD ["python", "app.py"]
 
 
 
app.py CHANGED
@@ -6,6 +6,14 @@ from huggingface_hub import hf_hub_download
6
 
7
  # huggingface-cli download microsoft/Phi-3-mini-4k-instruct-gguf Phi-3-mini-4k-instruct-q4.gguf --local-dir .
8
  # huggingface-cli download LoneStriker/OpenBioLLM-Llama3-8B-GGUF --local-dir ./llama3-gguf
 
 
 
 
 
 
 
 
9
  llm = Llama(
10
  # model_path="./Phi-3-mini-4k-instruct-q4.gguf",
11
  model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
 
6
 
7
  # huggingface-cli download microsoft/Phi-3-mini-4k-instruct-gguf Phi-3-mini-4k-instruct-q4.gguf --local-dir .
8
  # huggingface-cli download LoneStriker/OpenBioLLM-Llama3-8B-GGUF --local-dir ./llama3-gguf
9
+
10
+ model_name = "aaditya/OpenBioLLM-Llama3-8B-GGUF"
11
+ model_file = "openbiollm-llama3-8b.Q5_K_M.gguf"
12
+
13
+ model_path = hf_hub_download(model_name,
14
+ filename=model_file,
15
+ local_dir='/llama3-gguf')
16
+
17
  llm = Llama(
18
  # model_path="./Phi-3-mini-4k-instruct-q4.gguf",
19
  model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",