Datasets:
Tasks:
Multiple Choice
Modalities:
Text
Sub-tasks:
multiple-choice-qa
Languages:
English
Size:
1K - 10K
ArXiv:
License:
File size: 2,242 Bytes
5dadd0b e34d68e 5dadd0b 4998156 5dadd0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import json
import datasets
_CITATION = """\
@article{lai2017large,
title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
journal={arXiv preprint arXiv:1704.04683},
year={2017}
}
"""
_DESCRIPTION = """\
Race is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The
dataset is collected from English examinations in China, which are designed for middle school and high school students.
The dataset can be served as the training and test sets for machine comprehension.
"""
_BASE_URL = "https://huggingface.co/datasets/bfattori/race/raw/main"
_URLS = {
"high": f"{_BASE_URL}/race_high_test.jsonl",
}
class Race(datasets.GeneratorBasedBuilder):
"""ReAding Comprehension Dataset From Examination dataset from CMU"""
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="high", description="Exams designed for high school students", version=VERSION),
]
def _info(self):
features = datasets.Features(
{
"article": datasets.Value("string"),
"problems": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=f"{_DESCRIPTION}\n{self.config.description}",
features=features,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
yield key, {"article": data["article"], "problems": data["problems"]} |