|
|
|
|
|
"""The SCROLLS benchmark.""" |
|
|
|
import json |
|
import os |
|
import datasets |
|
from datasets import load_dataset |
|
from transformers import AutoTokenizer |
|
|
|
_FS_CITATION = """ |
|
TBD |
|
""" |
|
|
|
_FS_DESCRIPTION = """ |
|
TBD |
|
""" |
|
|
|
_SUMM_SCREEN_DESCRIPTION = """ |
|
SummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones). |
|
Given a transcript of a specific episode, the goal is to produce the episode's recap. |
|
The original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts. |
|
For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows, |
|
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows. |
|
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze.""" |
|
|
|
|
|
_GOV_REPORT_DESCRIPTION = """ |
|
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the |
|
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary. |
|
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets; |
|
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively.""" |
|
|
|
|
|
|
|
_SUMM_SCREEN_CITATION = r""" |
|
@misc{chen2021summscreen, |
|
title={SummScreen: A Dataset for Abstractive Screenplay Summarization}, |
|
author={Mingda Chen and Zewei Chu and Sam Wiseman and Kevin Gimpel}, |
|
year={2021}, |
|
eprint={2104.07091}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
}""" |
|
|
|
_GOV_REPORT_CITATION = r""" |
|
@inproceedings{huang-etal-2021-efficient, |
|
title = "Efficient Attentions for Long Document Summarization", |
|
author = "Huang, Luyang and |
|
Cao, Shuyang and |
|
Parulian, Nikolaus and |
|
Ji, Heng and |
|
Wang, Lu", |
|
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
month = jun, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.naacl-main.112", |
|
doi = "10.18653/v1/2021.naacl-main.112", |
|
pages = "1419--1436", |
|
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.", |
|
}""" |
|
|
|
|
|
class FSConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for FS.""" |
|
|
|
def __init__(self, features, data_url, citation, url, max_source_length, tokenizer,prompt, **kwargs): |
|
"""BuilderConfig for FS. |
|
Args: |
|
features: `list[string]`, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
data_url: `string`, url to download the zip file from. |
|
citation: `string`, citation for the data set. |
|
url: `string`, url for information about the data set. |
|
label_classes: `list[string]`, the list of classes for the label if the |
|
label is present as a string. Non-string labels will be cast to either |
|
'False' or 'True'. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.features = features |
|
self.data_url = data_url |
|
self.citation = citation |
|
self.url = url |
|
self.max_source_length = max_source_length |
|
self.tokenizer = tokenizer |
|
self.prompt = prompt |
|
|
|
|
|
class Fs(datasets.GeneratorBasedBuilder): |
|
"""The SCROLLS benchmark.""" |
|
|
|
features = ["id", "pid", "input", "output"] |
|
DEFAULT_WRITER_BATCH_SIZE = 1000 |
|
BUILDER_CONFIGS = [ |
|
FSConfig( |
|
name="summ_screen_fd", |
|
description=_SUMM_SCREEN_DESCRIPTION, |
|
features=features, |
|
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd.zip", |
|
citation=_SUMM_SCREEN_CITATION, |
|
url="https://github.com/mingdachen/SummScreen", |
|
max_source_length=None, |
|
tokenizer=None, |
|
prompt=None |
|
), |
|
FSConfig( |
|
name="gov_report", |
|
description=_GOV_REPORT_CITATION, |
|
features=features, |
|
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip", |
|
citation=_GOV_REPORT_DESCRIPTION, |
|
url="https://gov-report-data.github.io/", |
|
max_source_length=None, |
|
tokenizer=None, |
|
prompt=None |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = {feature: datasets.Value("string") for feature in self.config.features} |
|
|
|
return datasets.DatasetInfo( |
|
description=_FS_DESCRIPTION + self.config.description, |
|
features=datasets.Features(features), |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + _FS_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dl_dir = dl_manager.download_and_extract(self.config.data_url) |
|
|
|
data_files = {} if self.config.data_files is not None else None |
|
if data_files is not None: |
|
for split, paths in self.config.data_files.items(): |
|
data_files[split] = paths[0] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "train.jsonl"), |
|
"split": datasets.Split.TRAIN, |
|
"max_source_length": self.config.max_source_length, |
|
"prompt": self.config.prompt, |
|
"tokenizer": self.config.tokenizer, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "validation.jsonl"), |
|
"split": datasets.Split.VALIDATION, |
|
"max_source_length": self.config.max_source_length, |
|
"prompt": self.config.prompt, |
|
"tokenizer": self.config.tokenizer, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"], |
|
"split": datasets.Split.TEST, |
|
"max_source_length": self.config.max_source_length, |
|
"prompt": self.config.prompt, |
|
"tokenizer": self.config.tokenizer, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_file, split, max_source_length, tokenizer): |
|
with open(data_file, encoding="utf-8") as f: |
|
for line in f: |
|
row = json.loads(line) |
|
prefix = row["source"].strip() |
|
suffix = "\n" + self.config.prompt |
|
encoded_input = tokenizer.encode(prefix + suffix) |
|
|
|
n_truncations = 0 |
|
while len(encoded_input) > max_source_length: |
|
overflow = len(encoded_input) - max_source_length |
|
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False) |
|
if overflow > 0: |
|
tokenized_prefix = tokenized_prefix[:-overflow] |
|
n_truncations += 1 |
|
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip() |
|
encoded_input = tokenizer.encode(prefix + suffix) |
|
|
|
row["input"] = prefix + suffix |
|
row["output"] = row["target"] |
|
|
|
yield row["pid"], row |
|
|
|
|
|
def _get_task_name_from_data_url(data_url): |
|
return data_url.split("/")[-1].split(".")[0] |
|
|
|
|
|
if __name__ == '__main__': |
|
tokenizer = AutoTokenizer.from_pretrained("t5-base") |
|
dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:") |
|
x = 5 |
|
|
|
|
|
|
|
|