Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
afriqa / afriqa.py
ToluClassics's picture
Update afriqa.py
e457931
"""AfriQA dataset."""
import json
import os
from textwrap import dedent
import datasets
_HOMEPAGE = "https://github.com/masakhane-io/afriqa"
_DESCRIPTION = """\
AfriQA: Cross-lingual Open-Retrieval Question Answering for African Languages
AfriQA is the first cross-lingual question answering (QA) dataset with a focus on African languages.
The dataset includes over 12,000 XOR QA examples across 10 African languages, making it an invaluable resource for developing more equitable QA technology.
"""
_CITATION = """\
"""
_URL = "https://github.com/masakhane-io/afriqa/raw/main/data/queries/"
_LANG_2_PIVOT = {
"bem": "en",
"fon": "fr",
"hau": "en",
"ibo": "en",
"kin": "en",
"swa": "en",
"twi": "en",
"wol": "fr",
"yor": "en",
"zul": "en",
}
class AfriQAConfig(datasets.BuilderConfig):
"""BuilderConfig for AfriQA"""
def __init__(self, **kwargs):
"""BuilderConfig for AfriQA.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(AfriQAConfig, self).__init__(**kwargs)
class AfriQA(datasets.GeneratorBasedBuilder):
"""AfriQA dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
AfriQAConfig(name="bem", version=datasets.Version("1.0.0"), description="AfriQA Bemba dataset"),
AfriQAConfig(name="fon", version=datasets.Version("1.0.0"), description="AfriQA Fon dataset"),
AfriQAConfig(name="hau", version=datasets.Version("1.0.0"), description="AfriQA Hausa dataset"),
AfriQAConfig(name="ibo", version=datasets.Version("1.0.0"), description="AfriQA Igbo dataset"),
AfriQAConfig(name="kin", version=datasets.Version("1.0.0"), description="AfriQA Kinyarwanda dataset"),
AfriQAConfig(name="swa", version=datasets.Version("1.0.0"), description="AfriQA Swahili dataset"),
AfriQAConfig(name="twi", version=datasets.Version("1.0.0"), description="AfriQA Twi dataset"),
AfriQAConfig(name="wol", version=datasets.Version("1.0.0"), description="AfriQA Wolof dataset"),
AfriQAConfig(name="yor", version=datasets.Version("1.0.0"), description="AfriQA Yoruba dataset"),
AfriQAConfig(name="zul", version=datasets.Version("1.0.0"), description="AfriQA Zulu dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"question": datasets.Value("string"),
"answers": datasets.Value("string"),
"lang": datasets.Value("string"),
"split": datasets.Value("string"),
"translated_question": datasets.Value("string"),
"translated_answer": datasets.Value("string"),
"translation_type": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{self.config.name}/queries.afriqa.{self.config.name}.{_LANG_2_PIVOT[self.config.name]}.train.json",
"dev": f"{_URL}{self.config.name}/queries.afriqa.{self.config.name}.{_LANG_2_PIVOT[self.config.name]}.dev.json",
"test": f"{_URL}{self.config.name}/queries.afriqa.{self.config.name}.{_LANG_2_PIVOT[self.config.name]}.test.json",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8-sig") as f:
for _, row in enumerate(f):
example = json.loads(row)
_id = example["id"]
yield _id, {
"question": example["question"],
"answers": example["answers"],
"lang": example["lang"],
"split": example["split"],
"translated_question": example["translated_question"],
"translated_answer": example["translated_answer"],
"translation_type": example["translation_type"],
}