Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
Uri commited on
Commit
f5d5532
·
1 Parent(s): 53c563c

Update fs.py

Browse files
Files changed (1) hide show
  1. fs.py +7 -5
fs.py CHANGED
@@ -170,18 +170,20 @@ class Fs(datasets.GeneratorBasedBuilder):
170
  row = json.loads(line)
171
 
172
  prefix, addition = row["source"].strip(), "Summary:"
173
- input_length = len(tokenizer.encode(prefix + "\n" + addition))
174
-
 
175
  n_truncations = 0
176
  while input_length > max_source_length:
177
- overflow = len(tokenizer.encode(prefix + "\n" + addition)) - max_source_length
178
  tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
179
  if overflow > 0:
180
  tokenized_prefix = tokenized_prefix[:-overflow]
181
  n_truncations += 1
182
  prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
183
- input_length = len(tokenizer.encode(prefix + "\n" + addition))
184
-
 
185
  valid_input = prefix + "\n" + addition
186
 
187
  row["input"] = valid_input
 
170
  row = json.loads(line)
171
 
172
  prefix, addition = row["source"].strip(), "Summary:"
173
+ encoded_input = tokenizer.encode(prefix + "\n" + addition)
174
+ input_length = len(encoded_input)
175
+
176
  n_truncations = 0
177
  while input_length > max_source_length:
178
+ overflow = input_length - max_source_length
179
  tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
180
  if overflow > 0:
181
  tokenized_prefix = tokenized_prefix[:-overflow]
182
  n_truncations += 1
183
  prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
184
+ encoded_input = tokenizer.encode(prefix + "\n" + addition)
185
+ input_length = len(encoded_input)
186
+
187
  valid_input = prefix + "\n" + addition
188
 
189
  row["input"] = valid_input