yuvalkirstain
commited on
Commit
•
127e3fd
1
Parent(s):
b847b51
building to use prompt
Browse files
fs.py
CHANGED
@@ -4,19 +4,21 @@
|
|
4 |
|
5 |
import json
|
6 |
import os
|
|
|
|
|
7 |
import datasets
|
8 |
from citations_and_descriptions import (
|
9 |
_SUMM_SCREEN_DESCRIPTION, _SUMM_SCREEN_CITATION,
|
10 |
_GOV_REPORT_CITATION, _GOV_REPORT_DESCRIPTION,
|
11 |
_ARXIV_CITATION, _ARXIV_DESCRIPTION,
|
12 |
-
_FS_DESCRIPTION, _FS_CITATION
|
13 |
)
|
14 |
|
15 |
|
16 |
class FSConfig(datasets.BuilderConfig):
|
17 |
"""BuilderConfig for FS."""
|
18 |
|
19 |
-
def __init__(self,
|
20 |
"""BuilderConfig for FS.
|
21 |
Args:
|
22 |
features: `list[string]`, list of the features that will appear in the
|
@@ -30,43 +32,110 @@ class FSConfig(datasets.BuilderConfig):
|
|
30 |
**kwargs: keyword arguments forwarded to super.
|
31 |
"""
|
32 |
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
33 |
-
self.features =
|
34 |
self.data_url = data_url
|
35 |
self.citation = citation
|
36 |
self.url = url
|
37 |
self.max_source_length = max_source_length
|
38 |
self.tokenizer = tokenizer
|
39 |
-
self.prompt = None
|
40 |
-
self.input_key = None
|
41 |
-
self.output_key = None
|
42 |
-
self.redundant_fields = []
|
43 |
-
|
44 |
-
self.train_file = "train.jsonl"
|
45 |
-
self.validation_file = "validation.jsonl"
|
46 |
-
self.test_file = "test.jsonl"
|
47 |
|
48 |
def remove_redundant_fields(self, example):
|
49 |
for field in self.redundant_fields:
|
50 |
del example[field]
|
51 |
|
52 |
-
|
53 |
-
|
|
|
54 |
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
|
59 |
class ScrollsConfig(FSConfig):
|
60 |
def __init__(self, **kwargs):
|
61 |
super().__init__(**kwargs)
|
62 |
-
self.train_file = "train.jsonl"
|
63 |
-
self.validation_file = "validation.jsonl"
|
64 |
-
self.test_file = "test.jsonl"
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
def process_input(self, s):
|
72 |
prefix = s.strip()
|
@@ -76,6 +145,7 @@ class ScrollsConfig(FSConfig):
|
|
76 |
|
77 |
|
78 |
class ArxivConfig(FSConfig):
|
|
|
79 |
def __init__(self, **kwargs):
|
80 |
super().__init__(**kwargs)
|
81 |
self.train_file = "train.txt"
|
@@ -115,13 +185,11 @@ def _truncate_prefix(prefix, suffix, max_source_length, tokenizer):
|
|
115 |
class Fs(datasets.GeneratorBasedBuilder):
|
116 |
"""The SCROLLS benchmark."""
|
117 |
|
118 |
-
features = ["pid", "source", "target"]
|
119 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
120 |
BUILDER_CONFIGS = [
|
121 |
ScrollsConfig(
|
122 |
name="summ_screen_fd_debug",
|
123 |
description=_SUMM_SCREEN_DESCRIPTION,
|
124 |
-
features=features,
|
125 |
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
|
126 |
citation=_SUMM_SCREEN_CITATION,
|
127 |
url="https://github.com/mingdachen/SummScreen",
|
@@ -131,23 +199,21 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
131 |
ScrollsConfig(
|
132 |
name="gov_report",
|
133 |
description=_GOV_REPORT_CITATION,
|
134 |
-
features=features,
|
135 |
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
|
136 |
citation=_GOV_REPORT_DESCRIPTION,
|
137 |
url="https://gov-report-data.github.io/",
|
138 |
max_source_length=None,
|
139 |
tokenizer=None,
|
140 |
),
|
141 |
-
ArxivConfig(
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
),
|
151 |
]
|
152 |
|
153 |
def _info(self):
|
@@ -173,18 +239,12 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
173 |
name=datasets.Split.TRAIN,
|
174 |
gen_kwargs={
|
175 |
"data_file": os.path.join(dl_dir, self.config.train_file),
|
176 |
-
"split": datasets.Split.TRAIN,
|
177 |
-
"max_source_length": self.config.max_source_length,
|
178 |
-
"tokenizer": self.config.tokenizer,
|
179 |
},
|
180 |
),
|
181 |
datasets.SplitGenerator(
|
182 |
name=datasets.Split.VALIDATION,
|
183 |
gen_kwargs={
|
184 |
"data_file": os.path.join(dl_dir, self.config.validation_file),
|
185 |
-
"split": datasets.Split.VALIDATION,
|
186 |
-
"max_source_length": self.config.max_source_length,
|
187 |
-
"tokenizer": self.config.tokenizer,
|
188 |
},
|
189 |
),
|
190 |
datasets.SplitGenerator(
|
@@ -192,22 +252,19 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
192 |
gen_kwargs={
|
193 |
"data_file": os.path.join(dl_dir, self.config.test_file) if data_files is None else data_files[
|
194 |
"test"],
|
195 |
-
"split": datasets.Split.TEST,
|
196 |
-
"max_source_length": self.config.max_source_length,
|
197 |
-
"tokenizer": self.config.tokenizer,
|
198 |
},
|
199 |
),
|
200 |
]
|
201 |
|
202 |
-
def _generate_examples(self, data_file
|
203 |
with open(data_file, encoding="utf-8") as f:
|
204 |
for line in f:
|
205 |
row = json.loads(line)
|
206 |
|
207 |
row["pid"] = row[self.config.id_key]
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
self.config.remove_redundant_fields(row)
|
212 |
yield row["pid"], row
|
213 |
|
|
|
4 |
|
5 |
import json
|
6 |
import os
|
7 |
+
from abc import abstractmethod
|
8 |
+
|
9 |
import datasets
|
10 |
from citations_and_descriptions import (
|
11 |
_SUMM_SCREEN_DESCRIPTION, _SUMM_SCREEN_CITATION,
|
12 |
_GOV_REPORT_CITATION, _GOV_REPORT_DESCRIPTION,
|
13 |
_ARXIV_CITATION, _ARXIV_DESCRIPTION,
|
14 |
+
_FS_DESCRIPTION, _FS_CITATION,
|
15 |
)
|
16 |
|
17 |
|
18 |
class FSConfig(datasets.BuilderConfig):
|
19 |
"""BuilderConfig for FS."""
|
20 |
|
21 |
+
def __init__(self, data_url, citation, url, max_source_length, tokenizer, **kwargs):
|
22 |
"""BuilderConfig for FS.
|
23 |
Args:
|
24 |
features: `list[string]`, list of the features that will appear in the
|
|
|
32 |
**kwargs: keyword arguments forwarded to super.
|
33 |
"""
|
34 |
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
35 |
+
self.features = ["pid", self.source_key, self.source_key]
|
36 |
self.data_url = data_url
|
37 |
self.citation = citation
|
38 |
self.url = url
|
39 |
self.max_source_length = max_source_length
|
40 |
self.tokenizer = tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
def remove_redundant_fields(self, example):
|
43 |
for field in self.redundant_fields:
|
44 |
del example[field]
|
45 |
|
46 |
+
@abstractmethod
|
47 |
+
def postprocess(self, s):
|
48 |
+
pass
|
49 |
|
50 |
+
@property
|
51 |
+
@abstractmethod
|
52 |
+
def original_source_key(self):
|
53 |
+
pass
|
54 |
+
|
55 |
+
@property
|
56 |
+
@abstractmethod
|
57 |
+
def original_target_key(self):
|
58 |
+
pass
|
59 |
+
|
60 |
+
@property
|
61 |
+
@abstractmethod
|
62 |
+
def train_file(self):
|
63 |
+
pass
|
64 |
+
|
65 |
+
@property
|
66 |
+
@abstractmethod
|
67 |
+
def validation_file(self):
|
68 |
+
pass
|
69 |
+
|
70 |
+
@property
|
71 |
+
@abstractmethod
|
72 |
+
def test_file(self):
|
73 |
+
pass
|
74 |
+
|
75 |
+
@property
|
76 |
+
def source_key(self):
|
77 |
+
return "source"
|
78 |
+
|
79 |
+
@property
|
80 |
+
def target_key(self):
|
81 |
+
return "target"
|
82 |
+
|
83 |
+
@property
|
84 |
+
@abstractmethod
|
85 |
+
def id_key(self):
|
86 |
+
pass
|
87 |
+
|
88 |
+
@property
|
89 |
+
def redundant_fields(self):
|
90 |
+
return []
|
91 |
+
|
92 |
+
def preprocess(self, example): # TODO perhaps we can use this for base
|
93 |
+
example[self.source_key] = example[self.original_source_key].strip()
|
94 |
+
example[self.target_key] = example[self.original_target_key].strip()
|
95 |
+
|
96 |
+
def prompt(self, example):
|
97 |
+
pass # TODO
|
98 |
+
# prompt = get_prompt(self.dataset_name,
|
99 |
+
# self.template_name)
|
100 |
+
# row = prompt.apply(row)
|
101 |
+
|
102 |
+
def postprocess(self, example): # TODO truncate source
|
103 |
+
pass
|
104 |
|
105 |
|
106 |
class ScrollsConfig(FSConfig):
|
107 |
def __init__(self, **kwargs):
|
108 |
super().__init__(**kwargs)
|
|
|
|
|
|
|
109 |
|
110 |
+
@property
|
111 |
+
def original_source_key(self):
|
112 |
+
return "input"
|
113 |
+
|
114 |
+
@property
|
115 |
+
def original_target_key(self):
|
116 |
+
return "output"
|
117 |
+
|
118 |
+
@property
|
119 |
+
def train_file(self):
|
120 |
+
return "train.jsonl"
|
121 |
+
|
122 |
+
@property
|
123 |
+
def validation_file(self):
|
124 |
+
return "validation.jsonl"
|
125 |
+
|
126 |
+
@property
|
127 |
+
def test_file(self):
|
128 |
+
return "test.jsonl"
|
129 |
+
|
130 |
+
@property
|
131 |
+
def id_key(self):
|
132 |
+
return "pid"
|
133 |
+
|
134 |
+
@property
|
135 |
+
def redundant_fields(self):
|
136 |
+
return [self.original_source_key, self.original_target_key, "id"]
|
137 |
+
|
138 |
+
|
139 |
|
140 |
def process_input(self, s):
|
141 |
prefix = s.strip()
|
|
|
145 |
|
146 |
|
147 |
class ArxivConfig(FSConfig):
|
148 |
+
# TODO properties etc...
|
149 |
def __init__(self, **kwargs):
|
150 |
super().__init__(**kwargs)
|
151 |
self.train_file = "train.txt"
|
|
|
185 |
class Fs(datasets.GeneratorBasedBuilder):
|
186 |
"""The SCROLLS benchmark."""
|
187 |
|
|
|
188 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
189 |
BUILDER_CONFIGS = [
|
190 |
ScrollsConfig(
|
191 |
name="summ_screen_fd_debug",
|
192 |
description=_SUMM_SCREEN_DESCRIPTION,
|
|
|
193 |
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
|
194 |
citation=_SUMM_SCREEN_CITATION,
|
195 |
url="https://github.com/mingdachen/SummScreen",
|
|
|
199 |
ScrollsConfig(
|
200 |
name="gov_report",
|
201 |
description=_GOV_REPORT_CITATION,
|
|
|
202 |
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
|
203 |
citation=_GOV_REPORT_DESCRIPTION,
|
204 |
url="https://gov-report-data.github.io/",
|
205 |
max_source_length=None,
|
206 |
tokenizer=None,
|
207 |
),
|
208 |
+
# ArxivConfig(
|
209 |
+
# name="arxiv_debug",
|
210 |
+
# description=_ARXIV_CITATION,
|
211 |
+
# data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
|
212 |
+
# citation=_ARXIV_DESCRIPTION,
|
213 |
+
# url="https://github.com/armancohan/long-summarization",
|
214 |
+
# max_source_length=None,
|
215 |
+
# tokenizer=None,
|
216 |
+
# ),
|
|
|
217 |
]
|
218 |
|
219 |
def _info(self):
|
|
|
239 |
name=datasets.Split.TRAIN,
|
240 |
gen_kwargs={
|
241 |
"data_file": os.path.join(dl_dir, self.config.train_file),
|
|
|
|
|
|
|
242 |
},
|
243 |
),
|
244 |
datasets.SplitGenerator(
|
245 |
name=datasets.Split.VALIDATION,
|
246 |
gen_kwargs={
|
247 |
"data_file": os.path.join(dl_dir, self.config.validation_file),
|
|
|
|
|
|
|
248 |
},
|
249 |
),
|
250 |
datasets.SplitGenerator(
|
|
|
252 |
gen_kwargs={
|
253 |
"data_file": os.path.join(dl_dir, self.config.test_file) if data_files is None else data_files[
|
254 |
"test"],
|
|
|
|
|
|
|
255 |
},
|
256 |
),
|
257 |
]
|
258 |
|
259 |
+
def _generate_examples(self, data_file):
|
260 |
with open(data_file, encoding="utf-8") as f:
|
261 |
for line in f:
|
262 |
row = json.loads(line)
|
263 |
|
264 |
row["pid"] = row[self.config.id_key]
|
265 |
+
self.config.preprocess(row)
|
266 |
+
self.config.prompt(row)
|
267 |
+
self.config.postprocess(row)
|
268 |
self.config.remove_redundant_fields(row)
|
269 |
yield row["pid"], row
|
270 |
|