dureader / dureader.py
luozhouyang's picture
Init dataset
e250e6c
raw
history blame
7.12 kB
import json
import os
import datasets
from datasets import DatasetInfo, DownloadManager
class DuReaderConfig(datasets.BuilderConfig):
"""Config for DuReader dataset"""
def __init__(self, name, data_url, **kwargs):
super().__init__(name=name, version=datasets.Version("1.0.0", ""))
self.data_url = data_url
class DuReader(datasets.GeneratorBasedBuilder):
""" """
BUILDER_CONFIGS = [
DuReaderConfig(
name="robust",
data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_robust-data.tar.gz",
),
DuReaderConfig(
name="checklist",
data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_checklist-data.tar.gz",
),
# DuReaderConfig(
# name="yesno",
# data_url="https://dataset-bj.cdn.bcebos.com/qianyan/dureader_yesno-data.tar.gz",
# ),
]
def _info(self) -> DatasetInfo:
if self.config.name == "robust":
features = {
"id": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
return datasets.DatasetInfo(
description="",
citation="",
homepage="",
features=features,
)
if self.config.name == "checklist":
features = {
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"is_impossible": datasets.Value("bool"),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
return datasets.DatasetInfo(
description="",
citation="",
homepage="",
features=features,
)
return None
def _split_generators(self, dl_manager: DownloadManager):
"""Split generators"""
def _build(train_files, valid_files, test_files):
train_split = datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": train_files,
"split": "train",
},
)
valid_split = datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": valid_files,
"split": "dev",
},
)
test_split = datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": test_files,
"split": "test",
},
)
return [train_split, valid_split, test_split]
if self.config.name == "robust":
dl_dir = dl_manager.download_and_extract(self.config.data_url)
splits = _build(
train_files=os.path.join(dl_dir, "train.json"),
valid_files=os.path.join(dl_dir, "dev.json"),
test_files=os.path.join(dl_dir, "test.json"),
)
return splits
if self.config.name == "checklist":
dl_dir = dl_manager.download_and_extract(self.config.data_url)
splits = _build(
train_files=os.path.join(dl_dir, "train.json"),
valid_files=os.path.join(dl_dir, "dev.json"),
test_files=os.path.join(dl_dir, "test.json"),
)
return splits
return []
def _generate_examples(self, data_file, split):
if self.config.name == "robust":
if split == "train" or split == "dev":
return self._generate_robust_examples(data_file)
return self._generate_robust_test_examples(data_file)
if self.config.name == "checklist":
if split == "train" or split == "dev":
return self._generate_checklist_examples(data_file)
return self._generate_checklist_test_examples(data_file)
return None, None
def _generate_robust_examples(self, data_file):
with open(data_file, mode="rt", encoding="utf-8") as fin:
data = json.load(fin)["data"]
for d in data:
for p in d["paragraphs"]:
context = p["context"]
for qa in p["qas"]:
example = {
"id": qa["id"],
"context": context,
"question": qa["question"],
"answers": qa["answers"],
}
yield example["id"], example
def _generate_robust_test_examples(self, data_file):
with open(data_file, mode="rt", encoding="utf-8") as fin:
data = json.load(fin)["data"]
for d in data:
for p in d["paragraphs"]:
context = p["context"]
for qa in p["qas"]:
example = {
"id": qa["id"],
"context": context,
"question": qa["question"],
}
yield example["id"], example
def _generate_checklist_examples(self, data_file):
with open(data_file, mode="rt", encoding="utf-8") as fin:
data = json.load(fin)["data"]
for d in data:
for p in d["paragraphs"]:
title = p["title"]
context = p["context"]
for qa in p["qas"]:
example = {
"id": qa["id"],
"title": title,
"context": context,
"question": qa["question"],
"is_impossible": qa["is_impossible"],
"answers": qa["answers"],
"type": qa["type"],
}
yield example["id"], example
def _generate_checklist_test_examples(self, data_file):
with open(data_file, mode="rt", encoding="utf-8") as fin:
data = json.load(fin)["data"]
for d in data:
for p in d["paragraphs"]:
title = p["title"]
context = p["context"]
for qa in p["qas"]:
example = {
"id": qa["id"],
"title": title,
"context": context,
"question": qa["question"],
}
yield example["id"], example