Update fs.py
Browse files
fs.py
CHANGED
@@ -170,18 +170,20 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
170 |
row = json.loads(line)
|
171 |
|
172 |
prefix, addition = row["source"].strip(), "Summary:"
|
173 |
-
|
174 |
-
|
|
|
175 |
n_truncations = 0
|
176 |
while input_length > max_source_length:
|
177 |
-
overflow =
|
178 |
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
|
179 |
if overflow > 0:
|
180 |
tokenized_prefix = tokenized_prefix[:-overflow]
|
181 |
n_truncations += 1
|
182 |
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
|
183 |
-
|
184 |
-
|
|
|
185 |
valid_input = prefix + "\n" + addition
|
186 |
|
187 |
row["input"] = valid_input
|
|
|
170 |
row = json.loads(line)
|
171 |
|
172 |
prefix, addition = row["source"].strip(), "Summary:"
|
173 |
+
encoded_input = tokenizer.encode(prefix + "\n" + addition)
|
174 |
+
input_length = len(encoded_input)
|
175 |
+
|
176 |
n_truncations = 0
|
177 |
while input_length > max_source_length:
|
178 |
+
overflow = input_length - max_source_length
|
179 |
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
|
180 |
if overflow > 0:
|
181 |
tokenized_prefix = tokenized_prefix[:-overflow]
|
182 |
n_truncations += 1
|
183 |
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
|
184 |
+
encoded_input = tokenizer.encode(prefix + "\n" + addition)
|
185 |
+
input_length = len(encoded_input)
|
186 |
+
|
187 |
valid_input = prefix + "\n" + addition
|
188 |
|
189 |
row["input"] = valid_input
|