yuvalkirstain
commited on
Commit
•
2eacbde
1
Parent(s):
e7bb7b0
pass tokenizer
Browse files
fs.py
CHANGED
@@ -6,6 +6,7 @@ import json
|
|
6 |
import os
|
7 |
import datasets
|
8 |
from datasets import load_dataset
|
|
|
9 |
|
10 |
_FS_CITATION = """
|
11 |
TBD
|
@@ -65,7 +66,7 @@ _GOV_REPORT_CITATION = r"""
|
|
65 |
class FSConfig(datasets.BuilderConfig):
|
66 |
"""BuilderConfig for FS."""
|
67 |
|
68 |
-
def __init__(self, features, data_url, citation, url, max_source_length, **kwargs):
|
69 |
"""BuilderConfig for FS.
|
70 |
Args:
|
71 |
features: `list[string]`, list of the features that will appear in the
|
@@ -84,6 +85,7 @@ class FSConfig(datasets.BuilderConfig):
|
|
84 |
self.citation = citation
|
85 |
self.url = url
|
86 |
self.max_source_length = max_source_length
|
|
|
87 |
|
88 |
|
89 |
class Fs(datasets.GeneratorBasedBuilder):
|
@@ -100,6 +102,7 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
100 |
citation=_SUMM_SCREEN_CITATION,
|
101 |
url="https://github.com/mingdachen/SummScreen",
|
102 |
max_source_length=None,
|
|
|
103 |
),
|
104 |
FSConfig(
|
105 |
name="gov_report",
|
@@ -109,6 +112,7 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
109 |
citation=_GOV_REPORT_DESCRIPTION,
|
110 |
url="https://gov-report-data.github.io/",
|
111 |
max_source_length=None,
|
|
|
112 |
),
|
113 |
]
|
114 |
|
@@ -137,6 +141,7 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
137 |
"data_file": os.path.join(dl_dir, "train.jsonl"),
|
138 |
"split": datasets.Split.TRAIN,
|
139 |
"max_source_length": self.config.max_source_length,
|
|
|
140 |
},
|
141 |
),
|
142 |
datasets.SplitGenerator(
|
@@ -145,6 +150,7 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
145 |
"data_file": os.path.join(dl_dir, "validation.jsonl"),
|
146 |
"split": datasets.Split.VALIDATION,
|
147 |
"max_source_length": self.config.max_source_length,
|
|
|
148 |
},
|
149 |
),
|
150 |
datasets.SplitGenerator(
|
@@ -153,11 +159,12 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
153 |
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
|
154 |
"split": datasets.Split.TEST,
|
155 |
"max_source_length": self.config.max_source_length,
|
|
|
156 |
},
|
157 |
),
|
158 |
]
|
159 |
|
160 |
-
def _generate_examples(self, data_file, split, max_source_length):
|
161 |
with open(data_file, encoding="utf-8") as f:
|
162 |
for line in f:
|
163 |
row = json.loads(line)
|
@@ -169,7 +176,8 @@ def _get_task_name_from_data_url(data_url):
|
|
169 |
|
170 |
|
171 |
if __name__ == '__main__':
|
172 |
-
|
|
|
173 |
x = 5
|
174 |
# builder = Scrolls("scrolls", "summ_screen_fd")
|
175 |
# builder.download_and_prepare()
|
|
|
6 |
import os
|
7 |
import datasets
|
8 |
from datasets import load_dataset
|
9 |
+
from transformers import AutoTokenizer
|
10 |
|
11 |
_FS_CITATION = """
|
12 |
TBD
|
|
|
66 |
class FSConfig(datasets.BuilderConfig):
|
67 |
"""BuilderConfig for FS."""
|
68 |
|
69 |
+
def __init__(self, features, data_url, citation, url, max_source_length, tokenizer, **kwargs):
|
70 |
"""BuilderConfig for FS.
|
71 |
Args:
|
72 |
features: `list[string]`, list of the features that will appear in the
|
|
|
85 |
self.citation = citation
|
86 |
self.url = url
|
87 |
self.max_source_length = max_source_length
|
88 |
+
self.tokenizer = tokenizer
|
89 |
|
90 |
|
91 |
class Fs(datasets.GeneratorBasedBuilder):
|
|
|
102 |
citation=_SUMM_SCREEN_CITATION,
|
103 |
url="https://github.com/mingdachen/SummScreen",
|
104 |
max_source_length=None,
|
105 |
+
tokenizer=None,
|
106 |
),
|
107 |
FSConfig(
|
108 |
name="gov_report",
|
|
|
112 |
citation=_GOV_REPORT_DESCRIPTION,
|
113 |
url="https://gov-report-data.github.io/",
|
114 |
max_source_length=None,
|
115 |
+
tokenizer=None,
|
116 |
),
|
117 |
]
|
118 |
|
|
|
141 |
"data_file": os.path.join(dl_dir, "train.jsonl"),
|
142 |
"split": datasets.Split.TRAIN,
|
143 |
"max_source_length": self.config.max_source_length,
|
144 |
+
"tokenizer": self.config.tokenizer,
|
145 |
},
|
146 |
),
|
147 |
datasets.SplitGenerator(
|
|
|
150 |
"data_file": os.path.join(dl_dir, "validation.jsonl"),
|
151 |
"split": datasets.Split.VALIDATION,
|
152 |
"max_source_length": self.config.max_source_length,
|
153 |
+
"tokenizer": self.config.tokenizer,
|
154 |
},
|
155 |
),
|
156 |
datasets.SplitGenerator(
|
|
|
159 |
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
|
160 |
"split": datasets.Split.TEST,
|
161 |
"max_source_length": self.config.max_source_length,
|
162 |
+
"tokenizer": self.config.tokenizer,
|
163 |
},
|
164 |
),
|
165 |
]
|
166 |
|
167 |
+
def _generate_examples(self, data_file, split, max_source_length, tokenizer):
|
168 |
with open(data_file, encoding="utf-8") as f:
|
169 |
for line in f:
|
170 |
row = json.loads(line)
|
|
|
176 |
|
177 |
|
178 |
if __name__ == '__main__':
|
179 |
+
tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
180 |
+
dataset = load_dataset("tau/fs", "summ_screen_fd", max_source_length=512, tokenizer=tokenizer)
|
181 |
x = 5
|
182 |
# builder = Scrolls("scrolls", "summ_screen_fd")
|
183 |
# builder.download_and_prepare()
|