luozhouyang commited on
Commit
43c57e2
2 Parent(s): 2ab81d1 e250e6c

Merge branch 'master' into main

Browse files
Files changed (3) hide show
  1. .gitignore +115 -0
  2. README.md +0 -0
  3. dureader.py +198 -0
.gitignore ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .idea/
2
+ .vscode/
3
+ log/
4
+ logs/
5
+ testdata
6
+
7
+ models/
8
+ *.imp
9
+ *.pyc
10
+ .DS_Store
11
+ __pycache__
12
+ # Byte-compiled / optimized / DLL files
13
+ __pycache__/
14
+ *.py[cod]
15
+ *$py.class
16
+ *.ipynb
17
+ # C extensions
18
+ *.so
19
+
20
+ # Distribution / packaging
21
+ .Python
22
+ build/
23
+ develop-eggs/
24
+ dist/
25
+ downloads/
26
+ eggs/
27
+ .eggs/
28
+ lib/
29
+ lib64/
30
+ parts/
31
+ sdist/
32
+ var/
33
+ wheels/
34
+ *.egg-info/
35
+ .installed.cfg
36
+ *.egg
37
+ MANIFEST
38
+ ifchange
39
+ # PyInstaller
40
+ # Usually these files are written by a python script from a template
41
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
42
+ *.manifest
43
+ *.spec
44
+
45
+ # Installer logs
46
+ pip-log.txt
47
+ pip-delete-this-directory.txt
48
+
49
+ # Unit test / coverage reports
50
+ htmlcov/
51
+ .tox/
52
+ .coverage
53
+ .coverage.*
54
+ .cache
55
+ nosetests.xml
56
+ coverage.xml
57
+ *.cover
58
+ .hypothesis/
59
+ .pytest_cache/
60
+
61
+ # Translations
62
+ *.mo
63
+ *.pot
64
+
65
+ # Django stuff:
66
+ *.log
67
+ local_settings.py
68
+ db.sqlite3
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ target/
82
+
83
+ # Jupyter Notebook
84
+ .ipynb_checkpoints
85
+
86
+ # pyenv
87
+ .python-version
88
+
89
+ # celery beat schedule file
90
+ celerybeat-schedule
91
+
92
+ # SageMath parsed files
93
+ *.sage.py
94
+
95
+ # Environments
96
+ .env
97
+ .venv
98
+ env/
99
+ venv/
100
+ ENV/
101
+ env.bak/
102
+ venv.bak/
103
+
104
+ # Spyder project settings
105
+ .spyderproject
106
+ .spyproject
107
+
108
+ # Rope project settings
109
+ .ropeproject
110
+
111
+ # mkdocs documentation
112
+ /site
113
+
114
+ # mypy
115
+ .mypy_cache/
README.md ADDED
File without changes
dureader.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+ from datasets import DatasetInfo, DownloadManager
6
+
7
+
8
+ class DuReaderConfig(datasets.BuilderConfig):
9
+ """Config for DuReader dataset"""
10
+
11
+ def __init__(self, name, data_url, **kwargs):
12
+ super().__init__(name=name, version=datasets.Version("1.0.0", ""))
13
+ self.data_url = data_url
14
+
15
+
16
+ class DuReader(datasets.GeneratorBasedBuilder):
17
+ """ """
18
+
19
+ BUILDER_CONFIGS = [
20
+ DuReaderConfig(
21
+ name="robust",
22
+ data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_robust-data.tar.gz",
23
+ ),
24
+ DuReaderConfig(
25
+ name="checklist",
26
+ data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_checklist-data.tar.gz",
27
+ ),
28
+ # DuReaderConfig(
29
+ # name="yesno",
30
+ # data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_yesno-data.tar.gz",
31
+ # ),
32
+ ]
33
+
34
+ def _info(self) -> DatasetInfo:
35
+ if self.config.name == "robust":
36
+ features = {
37
+ "id": datasets.Value("string"),
38
+ "context": datasets.Value("string"),
39
+ "question": datasets.Value("string"),
40
+ "answers": datasets.Sequence(
41
+ {
42
+ "text": datasets.Value("string"),
43
+ "answer_start": datasets.Value("int32"),
44
+ }
45
+ ),
46
+ }
47
+ return datasets.DatasetInfo(
48
+ description="",
49
+ citation="",
50
+ homepage="",
51
+ features=features,
52
+ )
53
+ if self.config.name == "checklist":
54
+ features = {
55
+ "id": datasets.Value("string"),
56
+ "title": datasets.Value("string"),
57
+ "context": datasets.Value("string"),
58
+ "question": datasets.Value("string"),
59
+ "is_impossible": datasets.Value("bool"),
60
+ "answers": datasets.Sequence(
61
+ {
62
+ "text": datasets.Value("string"),
63
+ "answer_start": datasets.Value("int32"),
64
+ }
65
+ ),
66
+ }
67
+ return datasets.DatasetInfo(
68
+ description="",
69
+ citation="",
70
+ homepage="",
71
+ features=features,
72
+ )
73
+
74
+ return None
75
+
76
+ def _split_generators(self, dl_manager: DownloadManager):
77
+ """Split generators"""
78
+
79
+ def _build(train_files, valid_files, test_files):
80
+ train_split = datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={
83
+ "data_file": train_files,
84
+ "split": "train",
85
+ },
86
+ )
87
+ valid_split = datasets.SplitGenerator(
88
+ name=datasets.Split.VALIDATION,
89
+ gen_kwargs={
90
+ "data_file": valid_files,
91
+ "split": "dev",
92
+ },
93
+ )
94
+ test_split = datasets.SplitGenerator(
95
+ name=datasets.Split.TEST,
96
+ gen_kwargs={
97
+ "data_file": test_files,
98
+ "split": "test",
99
+ },
100
+ )
101
+ return [train_split, valid_split, test_split]
102
+
103
+ if self.config.name == "robust":
104
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
105
+ splits = _build(
106
+ train_files=os.path.join(dl_dir, "train.json"),
107
+ valid_files=os.path.join(dl_dir, "dev.json"),
108
+ test_files=os.path.join(dl_dir, "test.json"),
109
+ )
110
+ return splits
111
+
112
+ if self.config.name == "checklist":
113
+ dl_dir = dl_manager.download_and_extract(self.config.data_url)
114
+ splits = _build(
115
+ train_files=os.path.join(dl_dir, "train.json"),
116
+ valid_files=os.path.join(dl_dir, "dev.json"),
117
+ test_files=os.path.join(dl_dir, "test.json"),
118
+ )
119
+ return splits
120
+
121
+ return []
122
+
123
+ def _generate_examples(self, data_file, split):
124
+ if self.config.name == "robust":
125
+ if split == "train" or split == "dev":
126
+ return self._generate_robust_examples(data_file)
127
+ return self._generate_robust_test_examples(data_file)
128
+
129
+ if self.config.name == "checklist":
130
+ if split == "train" or split == "dev":
131
+ return self._generate_checklist_examples(data_file)
132
+ return self._generate_checklist_test_examples(data_file)
133
+
134
+ return None, None
135
+
136
+ def _generate_robust_examples(self, data_file):
137
+ with open(data_file, mode="rt", encoding="utf-8") as fin:
138
+ data = json.load(fin)["data"]
139
+ for d in data:
140
+ for p in d["paragraphs"]:
141
+ context = p["context"]
142
+ for qa in p["qas"]:
143
+ example = {
144
+ "id": qa["id"],
145
+ "context": context,
146
+ "question": qa["question"],
147
+ "answers": qa["answers"],
148
+ }
149
+ yield example["id"], example
150
+
151
+ def _generate_robust_test_examples(self, data_file):
152
+ with open(data_file, mode="rt", encoding="utf-8") as fin:
153
+ data = json.load(fin)["data"]
154
+ for d in data:
155
+ for p in d["paragraphs"]:
156
+ context = p["context"]
157
+ for qa in p["qas"]:
158
+ example = {
159
+ "id": qa["id"],
160
+ "context": context,
161
+ "question": qa["question"],
162
+ }
163
+ yield example["id"], example
164
+
165
+ def _generate_checklist_examples(self, data_file):
166
+ with open(data_file, mode="rt", encoding="utf-8") as fin:
167
+ data = json.load(fin)["data"]
168
+ for d in data:
169
+ for p in d["paragraphs"]:
170
+ title = p["title"]
171
+ context = p["context"]
172
+ for qa in p["qas"]:
173
+ example = {
174
+ "id": qa["id"],
175
+ "title": title,
176
+ "context": context,
177
+ "question": qa["question"],
178
+ "is_impossible": qa["is_impossible"],
179
+ "answers": qa["answers"],
180
+ "type": qa["type"],
181
+ }
182
+ yield example["id"], example
183
+
184
+ def _generate_checklist_test_examples(self, data_file):
185
+ with open(data_file, mode="rt", encoding="utf-8") as fin:
186
+ data = json.load(fin)["data"]
187
+ for d in data:
188
+ for p in d["paragraphs"]:
189
+ title = p["title"]
190
+ context = p["context"]
191
+ for qa in p["qas"]:
192
+ example = {
193
+ "id": qa["id"],
194
+ "title": title,
195
+ "context": context,
196
+ "question": qa["question"],
197
+ }
198
+ yield example["id"], example