yuvalkirstain
commited on
Commit
•
4316980
1
Parent(s):
1ade361
inserting prefix truncation to the process input func
Browse files
debug.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
def main():
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
6 |
+
# dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:")
|
7 |
+
ssfd_debug = load_dataset("/Users/yuvalkirstain/repos/fs", name="summ_screen_fd_debug", max_source_length=512,
|
8 |
+
tokenizer=tokenizer)
|
9 |
+
arxiv_debug = load_dataset("/Users/yuvalkirstain/repos/fs", name="arxiv_debug", max_source_length=512,
|
10 |
+
tokenizer=tokenizer, prompt="Summarize the above:")
|
11 |
+
|
12 |
+
|
13 |
+
if __name__ == '__main__':
|
14 |
+
main()
|
fs.py
CHANGED
@@ -4,6 +4,8 @@
|
|
4 |
|
5 |
import json
|
6 |
import os
|
|
|
|
|
7 |
import datasets
|
8 |
from datasets import load_dataset
|
9 |
from transformers import AutoTokenizer # TODO comment out when getting rid of __main__:
|
@@ -120,11 +122,17 @@ class ScrollsConfig(FSConfig):
|
|
120 |
self.validation_file = "validation.jsonl"
|
121 |
self.test_file = "test.jsonl"
|
122 |
|
123 |
-
self.input_key = "input"
|
124 |
self.output_key = "output"
|
125 |
self.id_key = "pid"
|
126 |
self.redundant_fields = [self.input_key, self.output_key, "id"]
|
127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
class ArxivConfig(FSConfig):
|
130 |
def __init__(self, **kwargs):
|
@@ -141,11 +149,28 @@ class ArxivConfig(FSConfig):
|
|
141 |
self.redundant_fields = [self.input_key, self.output_key, self.id_key, 'labels', 'section_names', 'sections']
|
142 |
|
143 |
def process_input(self, s):
|
144 |
-
|
|
|
|
|
|
|
145 |
|
146 |
def process_output(self, s):
|
147 |
# TODO remove "<S>" and "</S>" ?
|
148 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
|
151 |
class Fs(datasets.GeneratorBasedBuilder):
|
@@ -240,24 +265,9 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
240 |
for line in f:
|
241 |
row = json.loads(line)
|
242 |
|
243 |
-
prefix = self.config.process_input(row[self.config.input_key])
|
244 |
-
suffix = "\n" + self.config.prompt
|
245 |
-
encoded_input = tokenizer.encode(prefix + suffix)
|
246 |
-
|
247 |
-
n_truncations = 0
|
248 |
-
while len(encoded_input) > max_source_length:
|
249 |
-
overflow = len(encoded_input) - max_source_length
|
250 |
-
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
|
251 |
-
if overflow > 0:
|
252 |
-
tokenized_prefix = tokenized_prefix[:-overflow]
|
253 |
-
n_truncations += 1
|
254 |
-
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
|
255 |
-
encoded_input = tokenizer.encode(prefix + suffix)
|
256 |
-
|
257 |
row["pid"] = row[self.config.id_key]
|
258 |
-
row["source"] =
|
259 |
-
target = row[self.config.output_key]
|
260 |
-
row["target"] = self.config.process_output(target)
|
261 |
|
262 |
self.config.remove_redundant_fields(row)
|
263 |
yield row["pid"], row
|
@@ -265,17 +275,3 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
265 |
|
266 |
def _get_task_name_from_data_url(data_url):
|
267 |
return data_url.split("/")[-1].split(".")[0]
|
268 |
-
|
269 |
-
|
270 |
-
if __name__ == '__main__':
|
271 |
-
tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
272 |
-
# dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:")
|
273 |
-
ssfd_debug = load_dataset("/Users/uri/Projects/fs/fs.py", name="summ_screen_fd_debug", max_source_length=512,
|
274 |
-
tokenizer=tokenizer)
|
275 |
-
arxiv_debug = load_dataset("/Users/uri/Projects/fs/fs.py", name="arxiv_debug", max_source_length=512,
|
276 |
-
tokenizer=tokenizer, prompt="Summarize the above:")
|
277 |
-
x = 5
|
278 |
-
# builder = Scrolls("scrolls", "summ_screen_fd")
|
279 |
-
# builder.download_and_prepare()
|
280 |
-
# dataset = builder.as_dataset("validation")
|
281 |
-
# x = 5
|
|
|
4 |
|
5 |
import json
|
6 |
import os
|
7 |
+
from abc import abstractmethod
|
8 |
+
|
9 |
import datasets
|
10 |
from datasets import load_dataset
|
11 |
from transformers import AutoTokenizer # TODO comment out when getting rid of __main__:
|
|
|
122 |
self.validation_file = "validation.jsonl"
|
123 |
self.test_file = "test.jsonl"
|
124 |
|
125 |
+
self.input_key = "input" # TODO I think that we should keep the original fields
|
126 |
self.output_key = "output"
|
127 |
self.id_key = "pid"
|
128 |
self.redundant_fields = [self.input_key, self.output_key, "id"]
|
129 |
|
130 |
+
def process_input(self, s):
|
131 |
+
prefix = s.strip()
|
132 |
+
suffix = "\nSummarize the above:"
|
133 |
+
prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
|
134 |
+
return prefix + suffix
|
135 |
+
|
136 |
|
137 |
class ArxivConfig(FSConfig):
|
138 |
def __init__(self, **kwargs):
|
|
|
149 |
self.redundant_fields = [self.input_key, self.output_key, self.id_key, 'labels', 'section_names', 'sections']
|
150 |
|
151 |
def process_input(self, s):
|
152 |
+
prefix = ' '.join(s)
|
153 |
+
suffix = "\nSummarize the above:"
|
154 |
+
prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
|
155 |
+
return prefix + suffix
|
156 |
|
157 |
def process_output(self, s):
|
158 |
# TODO remove "<S>" and "</S>" ?
|
159 |
+
return ' '.join(s).replace("<S>", "").replace("</S>", "")
|
160 |
+
|
161 |
+
|
162 |
+
def _truncate_prefix(prefix, suffix, max_source_length, tokenizer):
|
163 |
+
encoded_input = tokenizer.encode(prefix + suffix)
|
164 |
+
|
165 |
+
while len(encoded_input) > max_source_length:
|
166 |
+
overflow = len(encoded_input) - max_source_length
|
167 |
+
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
|
168 |
+
if overflow > 0:
|
169 |
+
tokenized_prefix = tokenized_prefix[:-overflow]
|
170 |
+
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
|
171 |
+
encoded_input = tokenizer.encode(prefix + suffix)
|
172 |
+
|
173 |
+
return prefix
|
174 |
|
175 |
|
176 |
class Fs(datasets.GeneratorBasedBuilder):
|
|
|
265 |
for line in f:
|
266 |
row = json.loads(line)
|
267 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
row["pid"] = row[self.config.id_key]
|
269 |
+
row["source"] = self.config.process_input(row[self.config.input_key])
|
270 |
+
row["target"] = self.config.process_output(row[self.config.output_key])
|
|
|
271 |
|
272 |
self.config.remove_redundant_fields(row)
|
273 |
yield row["pid"], row
|
|
|
275 |
|
276 |
def _get_task_name_from_data_url(data_url):
|
277 |
return data_url.split("/")[-1].split(".")[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|