|
import json |
|
import datasets |
|
_CITATION = """\\ |
|
@misc{PersianQA, |
|
author = {Sajjad Ayoubi, Mohammad Yasin Davoodeh}, |
|
title = {PersianQA: a dataset for Persian Question Answering}, |
|
year = 2021, |
|
publisher = {GitHub}, |
|
journal = {GitHub repository}, |
|
howpublished = {url{https://github.com/SajjjadAyobi/PersianQA}}, |
|
} |
|
""" |
|
_DESCRIPTION = """\\\\\\\\ |
|
Persian Question Answering (PersianQA) Dataset is a reading comprehension dataset on Persian Wikipedia. |
|
The crowd-sourced dataset consists of more than 9,000 entries. Each entry can be either an impossible to answer or a question with one or more answers spanning in the passage (the context) from which the questioner proposed the question. Much like the SQuAD2.0 dataset, the impossible or unanswerable questions can be utilized to create a system which "knows that it doesn't know the answer". |
|
""" |
|
_URL = "https://raw.githubusercontent.com/sajjjadayobi/PersianQA/main/dataset/" |
|
_URLS = { |
|
"train": _URL + "pqa_train.json", |
|
"test": _URL + "pqa_test.json", |
|
} |
|
class PersianQAConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for PersianQA.""" |
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for PersianQA. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(PersianQAConfig, self).__init__(**kwargs) |
|
class PersianQA(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
PersianQAConfig(name="persian_qa", version=datasets.Version("1.0.0"), description="PersianQA plaint text version 1"), |
|
] |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"title": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.features.Sequence( |
|
{ |
|
"text": datasets.Value("string"), |
|
"answer_start": datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
|
|
homepage="https://github.com/sajjjadayobi/PersianQA/", |
|
citation=_CITATION, |
|
) |
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
urls_to_download = _URLS |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
] |
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
print(filepath) |
|
squad = json.load(f) |
|
for example in squad["data"]: |
|
title = example.get("title", "").strip() |
|
for paragraph in example["paragraphs"]: |
|
context = paragraph["context"].strip() |
|
for qa in paragraph["qas"]: |
|
question = qa["question"].strip() |
|
id_ = qa["id"] |
|
answer_starts = [answer["answer_start"] for answer in qa["answers"]] |
|
answers = [answer["text"].strip() for answer in qa["answers"]] |
|
|
|
|
|
yield id_, { |
|
"title": title, |
|
"context": context, |
|
"question": question, |
|
"id": id_, |
|
"answers": { |
|
"answer_start": answer_starts, |
|
"text": answers, |
|
}, |
|
} |