File size: 4,163 Bytes
a5a81e7 3e38ab8 a5a81e7 127e3fd 3e38ab8 a5a81e7 3e38ab8 8a2bf29 a5a81e7 3e38ab8 a5a81e7 3e38ab8 a5a81e7 3e38ab8 c6ded99 3e38ab8 a5a81e7 3e38ab8 a5a81e7 3e38ab8 a5a81e7 3e38ab8 a5a81e7 3e38ab8 a5a81e7 c6ded99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
# coding=utf-8
# Lint as: python3
"""The SCROLLS benchmark."""
import json
import os
from abc import abstractmethod
import datasets
class FewsionConfig(datasets.BuilderConfig):
"""BuilderConfig for SCROLLS."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for SCROLLS.
Args:
features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
label_classes: `list[string]`, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
super(FewsionConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
self.features = [self.source_column_name, self.target_column_name, self.id_column_name]
if self.question_column_name:
self.features.append(self.question_column_name)
@property
@abstractmethod
def source_column_name(self) -> str:
pass
@property
@abstractmethod
def target_column_name(self) -> str:
pass
@property
@abstractmethod
def question_column_name(self) -> str:
pass
@property
@abstractmethod
def id_column_name(self) -> str:
pass
class ArxivConfig(FewsionConfig):
@property
def source_column_name(self) -> str:
return "article"
@property
def target_column_name(self) -> str:
return "abstract"
@property
def question_column_name(self) -> str:
pass
@property
def id_column_name(self) -> str:
return "article_id"
class Fewsion(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
BUILDER_CONFIGS = [
ArxivConfig(
name="arxiv",
data_url="https://fewsion.s3.us-east-2.amazonaws.com/arxiv.zip",
)
]
def _info(self):
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
description="",
features=datasets.Features(features),
homepage="",
citation="",
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
task_name = _get_task_name_from_data_url(self.config.data_url)
dl_dir = os.path.join(dl_dir, task_name)
data_files = {} if self.config.data_files is not None else None
if data_files is not None:
for split, paths in self.config.data_files.items():
data_files[split] = paths[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(dl_dir, "train.jsonl"),
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(dl_dir, "val.jsonl"),
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, data_file, split):
with open(data_file, encoding="utf-8") as f:
for line in f:
row = json.loads(line)
yield row[self.config.id_column_name], row
def _get_task_name_from_data_url(data_url):
return data_url.split("/")[-1].split(".")[0] |