File size: 9,296 Bytes
3e38ab8 802326d e76320d 3e38ab8 89c5f61 3e38ab8 e76320d 89c5f61 3e38ab8 a1fa43e 2eacbde e76320d 3e38ab8 89c5f61 3e38ab8 e76320d 3e38ab8 89c5f61 3e38ab8 a1fa43e 2eacbde e76320d 3e38ab8 89c5f61 3e38ab8 a1fa43e 2eacbde e76320d 3e38ab8 10f525a e76320d 2eacbde 3e38ab8 10f525a e76320d 2eacbde 3e38ab8 10f525a e76320d 2eacbde 3e38ab8 2eacbde 3e38ab8 e76320d 8be7166 e76320d 8be7166 e76320d 8be7166 e76320d 8be7166 3e38ab8 802326d 2eacbde e76320d 802326d 89c5f61 e76320d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
# coding=utf-8
# Lint as: python3
"""The SCROLLS benchmark."""
import json
import os
import datasets
from datasets import load_dataset
from transformers import AutoTokenizer # TODO comment out when getting rid of __main__:
_FS_CITATION = """
TBD
"""
_FS_DESCRIPTION = """
TBD
"""
_SUMM_SCREEN_DESCRIPTION = """
SummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones).
Given a transcript of a specific episode, the goal is to produce the episode's recap.
The original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts.
For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows,
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
_GOV_REPORT_DESCRIPTION = """
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
_SUMM_SCREEN_CITATION = r"""
@misc{chen2021summscreen,
title={SummScreen: A Dataset for Abstractive Screenplay Summarization},
author={Mingda Chen and Zewei Chu and Sam Wiseman and Kevin Gimpel},
year={2021},
eprint={2104.07091},
archivePrefix={arXiv},
primaryClass={cs.CL}
}"""
_GOV_REPORT_CITATION = r"""
@inproceedings{huang-etal-2021-efficient,
title = "Efficient Attentions for Long Document Summarization",
author = "Huang, Luyang and
Cao, Shuyang and
Parulian, Nikolaus and
Ji, Heng and
Wang, Lu",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.naacl-main.112",
doi = "10.18653/v1/2021.naacl-main.112",
pages = "1419--1436",
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
}"""
class FSConfig(datasets.BuilderConfig):
"""BuilderConfig for FS."""
def __init__(self, features, data_url, citation, url, max_source_length, tokenizer,prompt, **kwargs):
"""BuilderConfig for FS.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
label_classes: `list[string]`, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.data_url = data_url
self.citation = citation
self.url = url
self.max_source_length = max_source_length
self.tokenizer = tokenizer
self.prompt = prompt
class Fs(datasets.GeneratorBasedBuilder):
"""The SCROLLS benchmark."""
features = ["id", "pid", "input", "output"]
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
BUILDER_CONFIGS = [
FSConfig(
name="summ_screen_fd",
description=_SUMM_SCREEN_DESCRIPTION,
features=features,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd.zip",
citation=_SUMM_SCREEN_CITATION,
url="https://github.com/mingdachen/SummScreen",
max_source_length=None,
tokenizer=None,
prompt=None
),
FSConfig(
name="gov_report",
description=_GOV_REPORT_CITATION,
features=features,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
citation=_GOV_REPORT_DESCRIPTION,
url="https://gov-report-data.github.io/",
max_source_length=None,
tokenizer=None,
prompt=None
),
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description=_FS_DESCRIPTION + self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _FS_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_files = {} if self.config.data_files is not None else None
if data_files is not None:
for split, paths in self.config.data_files.items():
data_files[split] = paths[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(dl_dir, "train.jsonl"),
"split": datasets.Split.TRAIN,
"max_source_length": self.config.max_source_length,
"prompt": self.config.prompt,
"tokenizer": self.config.tokenizer,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(dl_dir, "validation.jsonl"),
"split": datasets.Split.VALIDATION,
"max_source_length": self.config.max_source_length,
"prompt": self.config.prompt,
"tokenizer": self.config.tokenizer,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
"split": datasets.Split.TEST,
"max_source_length": self.config.max_source_length,
"prompt": self.config.prompt,
"tokenizer": self.config.tokenizer,
},
),
]
def _generate_examples(self, data_file, split, max_source_length, tokenizer):
with open(data_file, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
prefix = row["source"].strip()
suffix = "\n" + self.config.prompt
encoded_input = tokenizer.encode(prefix + suffix)
n_truncations = 0
while len(encoded_input) > max_source_length:
overflow = len(encoded_input) - max_source_length
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
if overflow > 0:
tokenized_prefix = tokenized_prefix[:-overflow]
n_truncations += 1
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
encoded_input = tokenizer.encode(prefix + suffix)
row["input"] = prefix + suffix
row["output"] = row["target"]
yield row["pid"], row
def _get_task_name_from_data_url(data_url):
return data_url.split("/")[-1].split(".")[0]
if __name__ == '__main__':
tokenizer = AutoTokenizer.from_pretrained("t5-base")
dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:")
x = 5
# builder = Scrolls("scrolls", "summ_screen_fd")
# builder.download_and_prepare()
# dataset = builder.as_dataset("validation")
# x = 5 |