cblock commited on
Commit
2624dfd
·
verified ·
1 Parent(s): cc1239c

Update requirements.txt

Browse files

add sentencepiece

Runtime error
:00, 63193.87 examples/s]
Traceback (most recent call last):
File "/home/user/app/app.py", line 19, in <module>
model_fr = SentenceTransformer('ECLASS-Standard/Sahajtomar-french_semantic', token=str(os.environ['hf_token']))
File "/usr/local/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py", line 190, in __init__
modules = self._load_sbert_model(
File "/usr/local/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py", line 1175, in _load_sbert_model
module = module_class.load(module_path)
File "/usr/local/lib/python3.10/site-packages/sentence_transformers/models/Transformer.py", line 187, in load
return Transformer(model_name_or_path=input_path, **config)
File "/usr/local/lib/python3.10/site-packages/sentence_transformers/models/Transformer.py", line 38, in __init__
self.tokenizer = AutoTokenizer.from_pretrained(
File "/usr/local/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 843, in from_pretrained
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
File "/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2048, in from_pretrained
return cls._from_pretrained(
File "/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2287, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/usr/local/lib/python3.10/site-packages/transformers/models/camembert/tokenization_camembert_fast.py", line 127, in __init__
super().__init__(
File "/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py", line 120, in __init__
raise ValueError(
ValueError: Couldn't instantiate the backend tokenizer from one of:
(1) a `tokenizers` library serialization file,
(2) a slow tokenizer instance to convert or
(3) an equivalent slow tokenizer class to instantiate and convert.
You need to have sentencepiece installed to convert a slow tokenizer to a fast one.

Files changed (1) hide show
  1. requirements.txt +2 -1
requirements.txt CHANGED
@@ -3,4 +3,5 @@ transformers
3
  torch
4
  sentence_transformers
5
  openpyxl
6
- requests
 
 
3
  torch
4
  sentence_transformers
5
  openpyxl
6
+ requests
7
+ sentencepiece