Datasets:
tau
/

Modalities:
Text
Libraries:
Datasets
fs / fs.py
Uri's picture
Update fs.py
dc77d04
# coding=utf-8
# Lint as: python3
"""The SCROLLS benchmark."""
import json
import os
from abc import abstractmethod
from typing import Union, NoReturn
import datasets
class FewsionConfig(datasets.BuilderConfig):
"""BuilderConfig for SCROLLS."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for SCROLLS.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
label_classes: `list[string]`, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
super(FewsionConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
self.features = [self.source_column_name, self.target_column_name, self.id_column_name]
if self.question_column_name:
self.features.append(self.question_column_name)
@property
@abstractmethod
def source_column_name(self) -> str:
pass
@property
@abstractmethod
def target_column_name(self) -> str:
pass
@property
@abstractmethod
def question_column_name(self) -> Union[str, NoReturn]:
pass
@property
@abstractmethod
def id_column_name(self) -> str:
pass
class ArxivConfig(FewsionConfig):
@property
def source_column_name(self) -> str:
return "article"
@property
def target_column_name(self) -> str:
return "abstract"
@property
def question_column_name(self) -> Union[str, NoReturn]:
return
@property
def id_column_name(self) -> str:
return "article_id"
class RedditTIFUConfig(FewsionConfig):
@property
def source_column_name(self) -> str:
return "document"
@property
def target_column_name(self) -> str:
return "tldr"
@property
def question_column_name(self) -> Union[str, NoReturn]:
return
@property
def id_column_name(self) -> str:
return "id"
class Fewsion(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
BUILDER_CONFIGS = [
ArxivConfig(
name="arxiv",
data_url="https://fewsion.s3.us-east-2.amazonaws.com/arxiv.zip",
),
RedditTIFUConfig(
name="reddit_tifu",
data_url="https://fewsion.s3.us-east-2.amazonaws.com/reddit_tifu.zip",
)
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description="",
features=datasets.Features(features),
homepage="",
citation="",
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = _get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name)
data_files = {} if self.config.data_files is not None else None
if data_files is not None:
for split, paths in self.config.data_files.items():
data_files[split] = paths[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(dl_dir, "train.jsonl"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(dl_dir, "val.jsonl"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, data_file, split):
with open(data_file, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
yield row[self.config.id_column_name], row
def _get_task_name_from_data_url(data_url):
return data_url.split("/")[-1].split(".")[0]