palondomus commited on
Commit
a737444
·
1 Parent(s): 7135578

CaesarFrenchLLM first test

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. caesarfrenchllm.py +1 -1
Dockerfile CHANGED
@@ -10,5 +10,5 @@ COPY ./requirements.txt /code/requirements.txt
10
  RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
 
12
  COPY . .
13
-
14
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
10
  RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
 
12
  COPY . .
13
+ RUN mkdir ./.cache
14
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
caesarfrenchllm.py CHANGED
@@ -1,6 +1,6 @@
1
  import torch
2
  import os
3
- #os.environ['TRANSFORMERS_CACHE'] = "T:/CaesarLLModel/.cache"
4
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TextStreamer
5
  from vigogne.preprocess import generate_inference_chat_prompt
6
  class CaesarFrenchLLM:
 
1
  import torch
2
  import os
3
+ os.environ['TRANSFORMERS_CACHE'] = "./.cache"
4
  from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TextStreamer
5
  from vigogne.preprocess import generate_inference_chat_prompt
6
  class CaesarFrenchLLM: