Commit
·
3e38ab8
1
Parent(s):
2d36b82
starting reader from scrolls
Browse files
fs.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Lint as: python3
|
3 |
+
"""The SCROLLS benchmark."""
|
4 |
+
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import datasets
|
8 |
+
|
9 |
+
_FS_CITATION = """
|
10 |
+
TBD
|
11 |
+
"""
|
12 |
+
|
13 |
+
_FS_DESCRIPTION = """
|
14 |
+
TBD
|
15 |
+
"""
|
16 |
+
|
17 |
+
_SUMM_SCREEN_DESCRIPTION = """
|
18 |
+
SummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones).
|
19 |
+
Given a transcript of a specific episode, the goal is to produce the episode's recap.
|
20 |
+
The original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts.
|
21 |
+
For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows,
|
22 |
+
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
|
23 |
+
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
|
24 |
+
|
25 |
+
|
26 |
+
_GOV_REPORT_DESCRIPTION = """
|
27 |
+
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
|
28 |
+
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
|
29 |
+
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
|
30 |
+
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
_SUMM_SCREEN_CITATION = r"""
|
35 |
+
@misc{chen2021summscreen,
|
36 |
+
title={SummScreen: A Dataset for Abstractive Screenplay Summarization},
|
37 |
+
author={Mingda Chen and Zewei Chu and Sam Wiseman and Kevin Gimpel},
|
38 |
+
year={2021},
|
39 |
+
eprint={2104.07091},
|
40 |
+
archivePrefix={arXiv},
|
41 |
+
primaryClass={cs.CL}
|
42 |
+
}"""
|
43 |
+
|
44 |
+
_GOV_REPORT_CITATION = r"""
|
45 |
+
@inproceedings{huang-etal-2021-efficient,
|
46 |
+
title = "Efficient Attentions for Long Document Summarization",
|
47 |
+
author = "Huang, Luyang and
|
48 |
+
Cao, Shuyang and
|
49 |
+
Parulian, Nikolaus and
|
50 |
+
Ji, Heng and
|
51 |
+
Wang, Lu",
|
52 |
+
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
53 |
+
month = jun,
|
54 |
+
year = "2021",
|
55 |
+
address = "Online",
|
56 |
+
publisher = "Association for Computational Linguistics",
|
57 |
+
url = "https://aclanthology.org/2021.naacl-main.112",
|
58 |
+
doi = "10.18653/v1/2021.naacl-main.112",
|
59 |
+
pages = "1419--1436",
|
60 |
+
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
|
61 |
+
}"""
|
62 |
+
|
63 |
+
|
64 |
+
class FSConfig(datasets.BuilderConfig):
|
65 |
+
"""BuilderConfig for SCROLLS."""
|
66 |
+
|
67 |
+
def __init__(self, features, data_url, citation, url, **kwargs):
|
68 |
+
"""BuilderConfig for SCROLLS.
|
69 |
+
Args:
|
70 |
+
features: `list[string]`, list of the features that will appear in the
|
71 |
+
feature dict. Should not include "label".
|
72 |
+
data_url: `string`, url to download the zip file from.
|
73 |
+
citation: `string`, citation for the data set.
|
74 |
+
url: `string`, url for information about the data set.
|
75 |
+
label_classes: `list[string]`, the list of classes for the label if the
|
76 |
+
label is present as a string. Non-string labels will be cast to either
|
77 |
+
'False' or 'True'.
|
78 |
+
**kwargs: keyword arguments forwarded to super.
|
79 |
+
"""
|
80 |
+
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
81 |
+
self.features = features
|
82 |
+
self.data_url = data_url
|
83 |
+
self.citation = citation
|
84 |
+
self.url = url
|
85 |
+
|
86 |
+
|
87 |
+
class Scrolls(datasets.GeneratorBasedBuilder):
|
88 |
+
"""The SCROLLS benchmark."""
|
89 |
+
|
90 |
+
features = ["id", "pid", "input", "output"]
|
91 |
+
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
92 |
+
BUILDER_CONFIGS = [
|
93 |
+
FSConfig(
|
94 |
+
name="summ_screen_fd",
|
95 |
+
description=_SUMM_SCREEN_DESCRIPTION,
|
96 |
+
features=features,
|
97 |
+
data_url="https://scrolls-tau.s3.us-east-2.amazonaws.com/summ_screen_fd.zip",
|
98 |
+
citation=_SUMM_SCREEN_CITATION,
|
99 |
+
url="https://github.com/mingdachen/SummScreen",
|
100 |
+
),
|
101 |
+
FSConfig(
|
102 |
+
name="gov_report",
|
103 |
+
description=_GOV_REPORT_CITATION,
|
104 |
+
features=features,
|
105 |
+
data_url="https://scrolls-tau.s3.us-east-2.amazonaws.com/gov_report.zip",
|
106 |
+
citation=_GOV_REPORT_DESCRIPTION,
|
107 |
+
url="https://gov-report-data.github.io/",
|
108 |
+
),
|
109 |
+
]
|
110 |
+
|
111 |
+
def _info(self):
|
112 |
+
features = {feature: datasets.Value("string") for feature in self.config.features}
|
113 |
+
|
114 |
+
return datasets.DatasetInfo(
|
115 |
+
description=_FS_DESCRIPTION + self.config.description,
|
116 |
+
features=datasets.Features(features),
|
117 |
+
homepage=self.config.url,
|
118 |
+
citation=self.config.citation + "\n" + _FS_CITATION,
|
119 |
+
)
|
120 |
+
|
121 |
+
def _split_generators(self, dl_manager):
|
122 |
+
dl_dir = dl_manager.download_and_extract(self.config.data_url)
|
123 |
+
task_name = _get_task_name_from_data_url(self.config.data_url)
|
124 |
+
dl_dir = os.path.join(dl_dir, task_name)
|
125 |
+
|
126 |
+
data_files = {} if self.config.data_files is not None else None
|
127 |
+
if data_files is not None:
|
128 |
+
for split, paths in self.config.data_files.items():
|
129 |
+
data_files[split] = paths[0]
|
130 |
+
|
131 |
+
return [
|
132 |
+
datasets.SplitGenerator(
|
133 |
+
name=datasets.Split.TRAIN,
|
134 |
+
gen_kwargs={
|
135 |
+
"data_file": os.path.join(dl_dir, "train.jsonl"),
|
136 |
+
"split": datasets.Split.TRAIN,
|
137 |
+
},
|
138 |
+
),
|
139 |
+
datasets.SplitGenerator(
|
140 |
+
name=datasets.Split.VALIDATION,
|
141 |
+
gen_kwargs={
|
142 |
+
"data_file": os.path.join(dl_dir, "validation.jsonl"),
|
143 |
+
"split": datasets.Split.VALIDATION,
|
144 |
+
},
|
145 |
+
),
|
146 |
+
datasets.SplitGenerator(
|
147 |
+
name=datasets.Split.TEST,
|
148 |
+
gen_kwargs={
|
149 |
+
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
|
150 |
+
"split": datasets.Split.TEST,
|
151 |
+
},
|
152 |
+
),
|
153 |
+
]
|
154 |
+
|
155 |
+
def _generate_examples(self, data_file, split):
|
156 |
+
with open(data_file, encoding="utf-8") as f:
|
157 |
+
for line in f:
|
158 |
+
row = json.loads(line)
|
159 |
+
yield row["pid"], row
|
160 |
+
|
161 |
+
|
162 |
+
def _get_task_name_from_data_url(data_url):
|
163 |
+
return data_url.split("/")[-1].split(".")[0]
|
164 |
+
|
165 |
+
|
166 |
+
if __name__ == '__main__':
|
167 |
+
builder = Scrolls("scrolls", "summ_screen_fd")
|
168 |
+
builder.download_and_prepare()
|
169 |
+
dataset = builder.as_dataset("validation")
|
170 |
+
x = 5
|
171 |
+
|