NimaKL commited on
Commit
dc256c2
Β·
1 Parent(s): 25cba84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -21,15 +21,15 @@ if st.button('Load Model', disabled=False):
21
  token_id = []
22
  attention_masks = []
23
  def preprocessing(input_text, tokenizer):
24
- '''
25
  Returns <class transformers.tokenization_utils_base.BatchEncoding> with the following fields:
26
  - input_ids: list of token ids
27
  - token_type_ids: list of token type ids
28
  - attention_mask: list of indices (0,1) specifying which tokens should considered by the model (return_attention_mask = True).
29
- '''
30
  return tokenizer.encode_plus(
31
- input_text,
32
- add_special_tokens = True,
33
  max_length = 32,
34
  pad_to_max_length = True,
35
  return_attention_mask = True,
 
21
  token_id = []
22
  attention_masks = []
23
  def preprocessing(input_text, tokenizer):
24
+ '''
25
  Returns <class transformers.tokenization_utils_base.BatchEncoding> with the following fields:
26
  - input_ids: list of token ids
27
  - token_type_ids: list of token type ids
28
  - attention_mask: list of indices (0,1) specifying which tokens should considered by the model (return_attention_mask = True).
29
+ '''
30
  return tokenizer.encode_plus(
31
+ input_text,
32
+ add_special_tokens = True,
33
  max_length = 32,
34
  pad_to_max_length = True,
35
  return_attention_mask = True,