new5558 commited on
Commit
643e5ff
1 Parent(s): d8e4832

docs: fix padding error

Browse files
Files changed (1) hide show
  1. README.md +6 -4
README.md CHANGED
@@ -50,8 +50,9 @@ with torch.no_grad():
50
  all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
51
 
52
  sentence = " _ ".join(all_sent)
53
- token_ids = tokenizer(sentence, return_tensors = 'pt')['input_ids']
54
- features = model(token_ids)
 
55
  ```
56
 
57
  For batch processing,
@@ -69,8 +70,9 @@ with torch.no_grad():
69
 
70
  sentence = " _ ".join(all_sent)
71
  inputList.append(sentence)
72
- token_ids = tokenizer(inputList, padding = True, return_tensors = 'pt').input_ids
73
- features = model(token_ids)
 
74
  ```
75
 
76
  To use HoogBERTa as an embedding layer, use
 
50
  all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
51
 
52
  sentence = " _ ".join(all_sent)
53
+ tokenized_text = tokenizer(sentence, return_tensors = 'pt')
54
+ token_ids = tokenized_text['input_ids']
55
+ features = model(**tokenized_text)
56
  ```
57
 
58
  For batch processing,
 
70
 
71
  sentence = " _ ".join(all_sent)
72
  inputList.append(sentence)
73
+ tokenized_text = tokenizer(inputList, padding = True, return_tensors = 'pt')
74
+ token_ids = tokenized_text['input_ids']
75
+ features = model(**tokenized_text)
76
  ```
77
 
78
  To use HoogBERTa as an embedding layer, use