vkrishnan569 commited on
Commit
b2c482e
1 Parent(s): fac3e1f

Server Deployment

Browse files
Files changed (2) hide show
  1. Dockerfile +0 -2
  2. model.py +1 -6
Dockerfile CHANGED
@@ -8,6 +8,4 @@ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
 
9
  COPY . .
10
 
11
- RUN chmod u+rwx /path_to_cache_directory
12
-
13
  CMD ["uvicorn","main:model","--host", "0.0.0.0", "--port", "80"]
 
8
 
9
  COPY . .
10
 
 
 
11
  CMD ["uvicorn","main:model","--host", "0.0.0.0", "--port", "80"]
model.py CHANGED
@@ -6,14 +6,9 @@ repo_id = 'TheBloke/Llama-2-7B-Chat-GGUF'
6
  # Define the filename you want to download
7
  filename = 'llama-2-7b-chat.Q2_K.gguf'
8
 
9
- # Define the cache directory (optional)
10
- # If not provided, the default cache directory will be used
11
- cache_dir = './path_to_cache_directory'
12
-
13
  def model_download():
14
  # Download the file
15
  file_path = hf_hub_download(
16
  repo_id=repo_id,
17
- filename=filename,
18
- cache_dir=cache_dir) # The file_path variable now contains the local path to the downloaded file
19
  print(f"File downloaded to: {file_path}")
 
6
  # Define the filename you want to download
7
  filename = 'llama-2-7b-chat.Q2_K.gguf'
8
 
 
 
 
 
9
  def model_download():
10
  # Download the file
11
  file_path = hf_hub_download(
12
  repo_id=repo_id,
13
+ filename=filename) # The file_path variable now contains the local path to the downloaded file
 
14
  print(f"File downloaded to: {file_path}")