Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
Uri commited on
Commit
e76320d
1 Parent(s): f5d5532

Update fs.py

Browse files
Files changed (1) hide show
  1. fs.py +18 -17
fs.py CHANGED
@@ -6,7 +6,7 @@ import json
6
  import os
7
  import datasets
8
  from datasets import load_dataset
9
- from transformers import AutoTokenizer
10
 
11
  _FS_CITATION = """
12
  TBD
@@ -66,7 +66,7 @@ _GOV_REPORT_CITATION = r"""
66
  class FSConfig(datasets.BuilderConfig):
67
  """BuilderConfig for FS."""
68
 
69
- def __init__(self, features, data_url, citation, url, max_source_length, tokenizer, **kwargs):
70
  """BuilderConfig for FS.
71
  Args:
72
  features: `list[string]`, list of the features that will appear in the
@@ -86,12 +86,13 @@ class FSConfig(datasets.BuilderConfig):
86
  self.url = url
87
  self.max_source_length = max_source_length
88
  self.tokenizer = tokenizer
 
89
 
90
 
91
  class Fs(datasets.GeneratorBasedBuilder):
92
  """The SCROLLS benchmark."""
93
 
94
- features = ["id", "pid", "input", "output", "source", "target"]
95
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
96
  BUILDER_CONFIGS = [
97
  FSConfig(
@@ -103,6 +104,7 @@ class Fs(datasets.GeneratorBasedBuilder):
103
  url="https://github.com/mingdachen/SummScreen",
104
  max_source_length=None,
105
  tokenizer=None,
 
106
  ),
107
  FSConfig(
108
  name="gov_report",
@@ -113,6 +115,7 @@ class Fs(datasets.GeneratorBasedBuilder):
113
  url="https://gov-report-data.github.io/",
114
  max_source_length=None,
115
  tokenizer=None,
 
116
  ),
117
  ]
118
 
@@ -141,6 +144,7 @@ class Fs(datasets.GeneratorBasedBuilder):
141
  "data_file": os.path.join(dl_dir, "train.jsonl"),
142
  "split": datasets.Split.TRAIN,
143
  "max_source_length": self.config.max_source_length,
 
144
  "tokenizer": self.config.tokenizer,
145
  },
146
  ),
@@ -150,6 +154,7 @@ class Fs(datasets.GeneratorBasedBuilder):
150
  "data_file": os.path.join(dl_dir, "validation.jsonl"),
151
  "split": datasets.Split.VALIDATION,
152
  "max_source_length": self.config.max_source_length,
 
153
  "tokenizer": self.config.tokenizer,
154
  },
155
  ),
@@ -159,6 +164,7 @@ class Fs(datasets.GeneratorBasedBuilder):
159
  "data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
160
  "split": datasets.Split.TEST,
161
  "max_source_length": self.config.max_source_length,
 
162
  "tokenizer": self.config.tokenizer,
163
  },
164
  ),
@@ -168,25 +174,21 @@ class Fs(datasets.GeneratorBasedBuilder):
168
  with open(data_file, encoding="utf-8") as f:
169
  for line in f:
170
  row = json.loads(line)
 
 
 
171
 
172
- prefix, addition = row["source"].strip(), "Summary:"
173
- encoded_input = tokenizer.encode(prefix + "\n" + addition)
174
- input_length = len(encoded_input)
175
-
176
  n_truncations = 0
177
- while input_length > max_source_length:
178
- overflow = input_length - max_source_length
179
  tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
180
  if overflow > 0:
181
  tokenized_prefix = tokenized_prefix[:-overflow]
182
  n_truncations += 1
183
  prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
184
- encoded_input = tokenizer.encode(prefix + "\n" + addition)
185
- input_length = len(encoded_input)
186
-
187
- valid_input = prefix + "\n" + addition
188
 
189
- row["input"] = valid_input
190
  row["output"] = row["target"]
191
 
192
  yield row["pid"], row
@@ -198,10 +200,9 @@ def _get_task_name_from_data_url(data_url):
198
 
199
  if __name__ == '__main__':
200
  tokenizer = AutoTokenizer.from_pretrained("t5-base")
201
- dataset = load_dataset("tau/fs", "summ_screen_fd", max_source_length=512, tokenizer=tokenizer)
202
  x = 5
203
  # builder = Scrolls("scrolls", "summ_screen_fd")
204
  # builder.download_and_prepare()
205
  # dataset = builder.as_dataset("validation")
206
- # x = 5
207
-
 
6
  import os
7
  import datasets
8
  from datasets import load_dataset
9
+ from transformers import AutoTokenizer # TODO comment out when getting rid of __main__:
10
 
11
  _FS_CITATION = """
12
  TBD
 
66
  class FSConfig(datasets.BuilderConfig):
67
  """BuilderConfig for FS."""
68
 
69
+ def __init__(self, features, data_url, citation, url, max_source_length, tokenizer,prompt, **kwargs):
70
  """BuilderConfig for FS.
71
  Args:
72
  features: `list[string]`, list of the features that will appear in the
 
86
  self.url = url
87
  self.max_source_length = max_source_length
88
  self.tokenizer = tokenizer
89
+ self.prompt = prompt
90
 
91
 
92
  class Fs(datasets.GeneratorBasedBuilder):
93
  """The SCROLLS benchmark."""
94
 
95
+ features = ["id", "pid", "input", "output"]
96
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
97
  BUILDER_CONFIGS = [
98
  FSConfig(
 
104
  url="https://github.com/mingdachen/SummScreen",
105
  max_source_length=None,
106
  tokenizer=None,
107
+ prompt=None
108
  ),
109
  FSConfig(
110
  name="gov_report",
 
115
  url="https://gov-report-data.github.io/",
116
  max_source_length=None,
117
  tokenizer=None,
118
+ prompt=None
119
  ),
120
  ]
121
 
 
144
  "data_file": os.path.join(dl_dir, "train.jsonl"),
145
  "split": datasets.Split.TRAIN,
146
  "max_source_length": self.config.max_source_length,
147
+ "prompt": self.config.prompt,
148
  "tokenizer": self.config.tokenizer,
149
  },
150
  ),
 
154
  "data_file": os.path.join(dl_dir, "validation.jsonl"),
155
  "split": datasets.Split.VALIDATION,
156
  "max_source_length": self.config.max_source_length,
157
+ "prompt": self.config.prompt,
158
  "tokenizer": self.config.tokenizer,
159
  },
160
  ),
 
164
  "data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
165
  "split": datasets.Split.TEST,
166
  "max_source_length": self.config.max_source_length,
167
+ "prompt": self.config.prompt,
168
  "tokenizer": self.config.tokenizer,
169
  },
170
  ),
 
174
  with open(data_file, encoding="utf-8") as f:
175
  for line in f:
176
  row = json.loads(line)
177
+ prefix = row["source"].strip()
178
+ suffix = "\n" + self.config.prompt
179
+ encoded_input = tokenizer.encode(prefix + suffix)
180
 
 
 
 
 
181
  n_truncations = 0
182
+ while len(encoded_input) > max_source_length:
183
+ overflow = len(encoded_input) - max_source_length
184
  tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
185
  if overflow > 0:
186
  tokenized_prefix = tokenized_prefix[:-overflow]
187
  n_truncations += 1
188
  prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
189
+ encoded_input = tokenizer.encode(prefix + suffix)
 
 
 
190
 
191
+ row["input"] = prefix + suffix
192
  row["output"] = row["target"]
193
 
194
  yield row["pid"], row
 
200
 
201
  if __name__ == '__main__':
202
  tokenizer = AutoTokenizer.from_pretrained("t5-base")
203
+ dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:")
204
  x = 5
205
  # builder = Scrolls("scrolls", "summ_screen_fd")
206
  # builder.download_and_prepare()
207
  # dataset = builder.as_dataset("validation")
208
+ # x = 5