yhavinga commited on
Commit
6202886
·
1 Parent(s): 22ec763

Unpin streamlit, update cache function usage

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. requirements.txt +5 -5
app.py CHANGED
@@ -20,7 +20,7 @@ device = torch.cuda.device_count() - 1
20
  TRANSLATION_NL_TO_EN = "translation_en_to_nl"
21
 
22
 
23
- @st.cache(suppress_st_warning=True, allow_output_mutation=True)
24
  def load_model(model_name, task):
25
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
26
  try:
 
20
  TRANSLATION_NL_TO_EN = "translation_en_to_nl"
21
 
22
 
23
+ @st.cache_resource()
24
  def load_model(model_name, task):
25
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
26
  try:
requirements.txt CHANGED
@@ -1,9 +1,9 @@
1
  #-f https://download.pytorch.org/whl/torch_stable.html
2
- -f https://download.pytorch.org/whl/cu116
3
- -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
4
- protobuf<3.20
5
- streamlit>=1.4.0,<=1.10.0
6
- torch
7
  git+https://github.com/huggingface/transformers.git@1905384fd576acf4b645a8216907f980b4788d9b
8
  mtranslate
9
  psutil
 
1
  #-f https://download.pytorch.org/whl/torch_stable.html
2
+ #-f https://download.pytorch.org/whl/cu116
3
+ #-f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
4
+ #protobuf<3.20
5
+ streamlit
6
+ #torch
7
  git+https://github.com/huggingface/transformers.git@1905384fd576acf4b645a8216907f980b4788d9b
8
  mtranslate
9
  psutil