yuvalkirstain
commited on
Commit
•
8be7166
1
Parent(s):
2eacbde
first attempt to prompt and truncate the suffix of the input
Browse files
fs.py
CHANGED
@@ -168,6 +168,25 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
168 |
with open(data_file, encoding="utf-8") as f:
|
169 |
for line in f:
|
170 |
row = json.loads(line)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
yield row["pid"], row
|
172 |
|
173 |
|
|
|
168 |
with open(data_file, encoding="utf-8") as f:
|
169 |
for line in f:
|
170 |
row = json.loads(line)
|
171 |
+
|
172 |
+
prefix, addition = row["source"].strip(), "Summary:"
|
173 |
+
input_length = len(tokenizer.encode(prefix + "\n" + addition))
|
174 |
+
|
175 |
+
n_truncations = 0
|
176 |
+
while input_length > max_source_length:
|
177 |
+
overflow = len(tokenizer.encode(prefix + "\n" + addition)) - max_source_length
|
178 |
+
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
|
179 |
+
if overflow > 0:
|
180 |
+
tokenized_prefix = tokenized_prefix[:-overflow]
|
181 |
+
n_truncations += 1
|
182 |
+
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
|
183 |
+
input_length = len(tokenizer.encode(prefix + "\n" + addition))
|
184 |
+
|
185 |
+
valid_input = prefix + "\n" + addition
|
186 |
+
|
187 |
+
row["input"] = valid_input
|
188 |
+
row["output"] = row["target"]
|
189 |
+
|
190 |
yield row["pid"], row
|
191 |
|
192 |
|