Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
fs / fs.py
yuvalkirstain's picture
update
c6ded99
raw
history blame
3.95 kB
import json
import os
import datasets
from citations_and_descriptions import (
_SUMM_SCREEN_DESCRIPTION, _SUMM_SCREEN_CITATION,
_GOV_REPORT_CITATION, _GOV_REPORT_DESCRIPTION,
_ARXIV_CITATION, _ARXIV_DESCRIPTION,
_FS_DESCRIPTION, _FS_CITATION,
)
from configs.arxiv import ArxivConfig
from configs.scrolls import ScrollsConfig
from configs.super_glue import BoolQConfig
class FS(datasets.GeneratorBasedBuilder):
"""The SCROLLS benchmark."""
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
BUILDER_CONFIGS = [
ScrollsConfig(
additional_features=["id"],
name="summ_screen_fd_debug",
description=_SUMM_SCREEN_DESCRIPTION,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
citation=_SUMM_SCREEN_CITATION,
url="https://github.com/mingdachen/SummScreen"
),
ScrollsConfig(
additional_features=["id"],
name="gov_report",
description=_GOV_REPORT_CITATION,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
citation=_GOV_REPORT_DESCRIPTION,
url="https://gov-report-data.github.io/"
),
ArxivConfig(
additional_features=['section_names', 'sections'],
name="arxiv_debug",
description=_ARXIV_CITATION,
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
citation=_ARXIV_DESCRIPTION,
url="https://github.com/armancohan/long-summarization"
),
BoolQConfig(
additional_features=[],
name="boolq",
description="", # TODO
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
citation=_ARXIV_DESCRIPTION,
url="" # TODO
)
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description=_FS_DESCRIPTION + self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _FS_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = _get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name)
data_files = {} if self.config.data_files is not None else None
if data_files is not None:
for split, paths in self.config.data_files.items():
data_files[split] = paths[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.train_file),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.validation_file),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(dl_dir, self.config.test_file),
},
),
]
def _generate_examples(self, data_file):
with open(data_file, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
self.config.process(row)
if self.config.target_key not in row:
row[self.config.target_key] = None
yield row[self.config.id_key], row
def _get_task_name_from_data_url(data_url):
return data_url.split("/")[-1].split(".")[0]