pedramyazdipoor commited on
Commit
cfb1c97
·
1 Parent(s): 3e1f3f8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -3
README.md CHANGED
@@ -75,7 +75,7 @@ def generate_indexes(start_logits, end_logits, N, min_index_list):
75
  final_start_idx.append(start_idx)
76
  final_end_idx.append(end_idx)
77
 
78
- return final_start_idx, final_end_idx
79
  ```
80
 
81
  ```python
@@ -91,9 +91,10 @@ encoding = tokenizer(text,question,add_special_tokens = True,
91
  truncation = 'only_first',
92
  max_length = 32)
93
  out = model(encoding['input_ids'].to(device),encoding['attention_mask'].to(device), encoding['token_type_ids'].to(device))
94
- print(generate_indexes(out['start_logits'][0][1:], out['end_logits'][0][1:], 5, 0))
 
95
  print(tokenizer.tokenize(text + question))
96
- >>> ([5], [5])
97
  >>> ['▁سلام', '▁من', '▁پدر', 'ام', 'م', '▁26', '▁سالم', 'ه', 'نام', 'م', '▁چیست', '؟']
98
  ```
99
 
 
75
  final_start_idx.append(start_idx)
76
  final_end_idx.append(end_idx)
77
 
78
+ return final_start_idx[0], final_end_idx[0]
79
  ```
80
 
81
  ```python
 
91
  truncation = 'only_first',
92
  max_length = 32)
93
  out = model(encoding['input_ids'].to(device),encoding['attention_mask'].to(device), encoding['token_type_ids'].to(device))
94
+ start_index, end_index = generate_indexes(out['start_logits'][0][1:], out['end_logits'][0][1:], 5, 0)
95
+ print(tokenizer.tokenize(text + question)[start_index:end_index+1])
96
  print(tokenizer.tokenize(text + question))
97
+ >>> ['▁26']
98
  >>> ['▁سلام', '▁من', '▁پدر', 'ام', 'م', '▁26', '▁سالم', 'ه', 'نام', 'م', '▁چیست', '؟']
99
  ```
100