# coding=utf-8
# Lint as: python3
"""The SCROLLS benchmark."""
import json
import os
import datasets
from citations_and_descriptions import (
_SUMM_SCREEN_DESCRIPTION, _SUMM_SCREEN_CITATION,
_GOV_REPORT_CITATION, _GOV_REPORT_DESCRIPTION,
_ARXIV_CITATION, _ARXIV_DESCRIPTION,
_FS_DESCRIPTION, _FS_CITATION
)
class FSConfig(datasets.BuilderConfig):
"""BuilderConfig for FS."""
def __init__(self, features, data_url, citation, url, max_source_length, tokenizer, **kwargs):
"""BuilderConfig for FS.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
label_classes: `list[string]`, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.features = features
self.data_url = data_url
self.citation = citation
self.url = url
self.max_source_length = max_source_length
self.tokenizer = tokenizer
self.prompt = None
self.input_key = None
self.output_key = None
self.redundant_fields = []
self.train_file = "train.jsonl"
self.validation_file = "validation.jsonl"
self.test_file = "test.jsonl"
def remove_redundant_fields(self, example):
for field in self.redundant_fields:
del example[field]
def process_input(self, s):
return s.strip()
def process_output(self, s):
return s
class ScrollsConfig(FSConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.train_file = "train.jsonl"
self.validation_file = "validation.jsonl"
self.test_file = "test.jsonl"
self.input_key = "input" # TODO I think that we should keep the original fields
self.output_key = "output"
self.id_key = "pid"
self.redundant_fields = [self.input_key, self.output_key, "id"]
def process_input(self, s):
prefix = s.strip()
suffix = "\nSummarize the above:"
prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
return prefix + suffix
class ArxivConfig(FSConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.train_file = "train.txt"
self.validation_file = "val.txt"
self.test_file = "test.txt"
self.input_key = "article_text"
self.output_key = "abstract_text"
self.id_key = "article_id"
self.redundant_fields = [self.input_key, self.output_key, self.id_key, 'labels', 'section_names', 'sections']
def process_input(self, s):
prefix = ' '.join(s)
suffix = "\nSummarize the above:"
prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
return prefix + suffix
def process_output(self, s):
# TODO remove "" and "" ?
return ' '.join(s).replace("", "").replace("", "")
def _truncate_prefix(prefix, suffix, max_source_length, tokenizer):
encoded_input = tokenizer.encode(prefix + suffix)
while len(encoded_input) > max_source_length:
overflow = len(encoded_input) - max_source_length
tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
if overflow > 0:
tokenized_prefix = tokenized_prefix[:-overflow]
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
encoded_input = tokenizer.encode(prefix + suffix)
return prefix
class Fs(datasets.GeneratorBasedBuilder):
"""The SCROLLS benchmark."""
features = ["pid", "source", "target"]
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
BUILDER_CONFIGS = [
ScrollsConfig(
name="summ_screen_fd_debug",
description=_SUMM_SCREEN_DESCRIPTION,
features=features,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
citation=_SUMM_SCREEN_CITATION,
url="https://github.com/mingdachen/SummScreen",
max_source_length=None,
tokenizer=None,
),
ScrollsConfig(
name="gov_report",
description=_GOV_REPORT_CITATION,
features=features,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
citation=_GOV_REPORT_DESCRIPTION,
url="https://gov-report-data.github.io/",
max_source_length=None,
tokenizer=None,
),
ArxivConfig(
name="arxiv_debug",
description=_ARXIV_CITATION,
features=features,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
citation=_ARXIV_DESCRIPTION,
url="https://github.com/armancohan/long-summarization",
max_source_length=None,
tokenizer=None,
),
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description=_FS_DESCRIPTION + self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _FS_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_files = {} if self.config.data_files is not None else None
if data_files is not None:
for split, paths in self.config.data_files.items():
data_files[split] = paths[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.train_file),
"split": datasets.Split.TRAIN,
"max_source_length": self.config.max_source_length,
"tokenizer": self.config.tokenizer,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.validation_file),
"split": datasets.Split.VALIDATION,
"max_source_length": self.config.max_source_length,
"tokenizer": self.config.tokenizer,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.test_file) if data_files is None else data_files[
"test"],
"split": datasets.Split.TEST,
"max_source_length": self.config.max_source_length,
"tokenizer": self.config.tokenizer,
},
),
]
def _generate_examples(self, data_file, split, max_source_length, tokenizer):
with open(data_file, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
row["pid"] = row[self.config.id_key]
row["source"] = self.config.process_input(row[self.config.input_key])
row["target"] = self.config.process_output(row[self.config.output_key])
self.config.remove_redundant_fields(row)
yield row["pid"], row
def _get_task_name_from_data_url(data_url):
return data_url.split("/")[-1].split(".")[0]