Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
bfattori commited on
Commit
5dadd0b
·
1 Parent(s): e30efe6

Create race.py

Browse files
Files changed (1) hide show
  1. race.py +67 -0
race.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+
5
+
6
+ _CITATION = """\
7
+ @article{lai2017large,
8
+ title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
9
+ author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
10
+ journal={arXiv preprint arXiv:1704.04683},
11
+ year={2017}
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ Race is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The
17
+ dataset is collected from English examinations in China, which are designed for middle school and high school students.
18
+ The dataset can be served as the training and test sets for machine comprehension.
19
+ """
20
+
21
+ _BASE_URL = "https://huggingface.co/datasets/bfattori/race_grouped"
22
+ #TODO: CHANGE
23
+ _URLS = {
24
+ "high": f"{_BASE_URL}/race_high_test.jsonl",
25
+ }
26
+
27
+ class Race(datasets.GeneratorBasedBuilder):
28
+ """ReAding Comprehension Dataset From Examination dataset from CMU"""
29
+
30
+ VERSION = datasets.Version("0.1.0")
31
+
32
+ BUILDER_CONFIGS = [
33
+ datasets.BuilderConfig(name="high", description="Exams designed for high school students", version=VERSION),
34
+ ]
35
+
36
+ def _info(self):
37
+ features = datasets.Features(
38
+ {
39
+ "article": datasets.Value("string"),
40
+ "problems": datasets.features.Sequence(datasets.Value("string")),
41
+ }
42
+ )
43
+ return datasets.DatasetInfo(
44
+ description=f"{_DESCRIPTION}\n{self.config.description}",
45
+ features=features,
46
+ citation=_CITATION,
47
+ )
48
+
49
+ def _split_generators(self, dl_manager):
50
+ urls = _URLS[self.config.name]
51
+ data_dir = dl_manager.download_and_extract(urls)
52
+ return [
53
+ datasets.SplitGenerator(
54
+ name=datasets.Split.TEST,
55
+ # These kwargs will be passed to _generate_examples
56
+ gen_kwargs={
57
+ "filepath": data_dir,
58
+ "split": datasets.Split.TEST,
59
+ },
60
+ ),
61
+ ]
62
+
63
+ def _generate_examples(self, filepath, split):
64
+ with open(filepath, encoding="utf-8") as f:
65
+ for key, row in enumerate(f):
66
+ data = json.loads(row)
67
+ yield key, {"article": data["article"], "problems": data["problems"]}