Datasets:
Tasks:
Question Answering
Modalities:
Text
Sub-tasks:
extractive-qa
Languages:
Japanese
Size:
10K - 100K
ArXiv:
License:
File size: 4,483 Bytes
8331f75 60e5462 8331f75 60e5462 90ac92c 8331f75 90ac92c 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 8331f75 60e5462 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
'''Dataset loading script for JaQuAD.
We refer to https://huggingface.co/datasets/squad_v2/blob/main/squad_v2.py
'''
import json
import os
import datasets
_CITATION = '''
@article{SkelterLabsInc:JaQuAD,
title = {{JaQuAD}: Japanese Question Answering Dataset for Machine
Reading Comprehension},
author = {Byunghoon So and
Kyuhong Byun and
Kyungwon Kang and
Seongjin Cho},
year = {2022},
}
'''
_DESCRIPTION = '''Japanese Question Answering Dataset (JaQuAD), released in
2022, is a human-annotated dataset created for Japanese Machine Reading
Comprehension. JaQuAD is developed to provide a SQuAD-like QA dataset in
Japanese. JaQuAD contains 39,696 question-answer pairs. Questions and answers
are manually curated by human annotators. Contexts are collected from Japanese
Wikipedia articles.
'''
_LICENSE = 'CC BY-SA 3.0'
_HOMEPAGE = 'https://skelterlabs.com/en/'
_URL = 'https://huggingface.co/datasets/SkelterLabsInc/JaQuAD/raw/main/data/'
class JaQuAD(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('0.1.0')
def _info(self):
features = datasets.Features({
'id': datasets.Value('string'),
'title': datasets.Value('string'),
'context': datasets.Value('string'),
'question': datasets.Value('string'),
'question_type': datasets.Value('string'),
'answers':
datasets.features.Sequence({
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
'answer_type': datasets.Value('string'),
}),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = {
'train': [
os.path.join(_URL, f'train/jaquad_train_{i:04d}.json')
for i in range(30)
],
'dev': [
os.path.join(_URL, f'dev/jaquad_dev_{i:04d}.json')
for i in range(4)
],
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'filepaths': downloaded_files['train']},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={'filepaths': downloaded_files['dev']},
),
]
def _generate_examples(self, filepaths):
for filename in filepaths:
with open(filename, encoding='utf-8') as ifile:
jaquad = json.load(ifile)
for article in jaquad['data']:
title = article.get('title', '').strip()
for paragraph in article['paragraphs']:
context = paragraph['context'].strip()
for qa in paragraph['qas']:
qa_id = qa['id']
question = qa['question'].strip()
question_type = qa['question_type']
answer_starts = [
answer['answer_start']
for answer in qa['answers']
]
answer_texts = [
answer['text'].strip()
for answer in qa['answers']
]
answer_types = [
answer['answer_type']
for answer in qa['answers']
]
yield qa_id, {
'title': title,
'context': context,
'question': question,
'question_type': question_type,
'id': qa_id,
'answers': {
'text': answer_texts,
'answer_start': answer_starts,
'answer_type': answer_types,
},
}
|