Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
ToluClassics commited on
Commit
e27efe5
1 Parent(s): 333259d

add config

Browse files
Files changed (1) hide show
  1. afriqa.py +128 -0
afriqa.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """AfriQA dataset."""
2
+
3
+
4
+ import json
5
+ import os
6
+ from textwrap import dedent
7
+
8
+ import datasets
9
+
10
+
11
+ _HOMEPAGE = "https://github.com/masakhane-io/afriqa"
12
+
13
+ _DESCRIPTION = """\
14
+ AfriQA: Cross-lingual Open-Retrieval Question Answering for African Languages
15
+
16
+ AfriQA is the first cross-lingual question answering (QA) dataset with a focus on African languages.
17
+ The dataset includes over 12,000 XOR QA examples across 10 African languages, making it an invaluable resource for developing more equitable QA technology.
18
+ """
19
+
20
+ _CITATION = """\
21
+
22
+ """
23
+
24
+ _URL = "https://github.com/masakhane-io/afriqa/raw/main/data/queries"
25
+
26
+ _LANG_2_PIVOT = {
27
+ "bem": "en",
28
+ "fon": "fr",
29
+ "hau": "en",
30
+ "ibo": "en",
31
+ "kin": "en",
32
+ "swa": "en",
33
+ "twi": "en",
34
+ "wol": "fr",
35
+ "yor": "en",
36
+ "zul": "en",
37
+ }
38
+
39
+ class AfriQAConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for AfriQA"""
41
+
42
+ def __init__(self, **kwargs):
43
+ """BuilderConfig for AfriQA.
44
+ Args:
45
+ **kwargs: keyword arguments forwarded to super.
46
+ """
47
+ super(AfriQAConfig, self).__init__(**kwargs)
48
+
49
+
50
+ class AfriQA(datasets.GeneratorBasedBuilder):
51
+ """AfriQA dataset."""
52
+
53
+ VERSION = datasets.Version("1.0.0")
54
+
55
+ BUILDER_CONFIGS = [
56
+ AfriQAConfig(name="bem", version=datasets.Version("1.0.0"), description="AfriQA Bemba dataset"),
57
+ AfriQAConfig(name="fon", version=datasets.Version("1.0.0"), description="AfriQA Fon dataset"),
58
+ AfriQAConfig(name="hau", version=datasets.Version("1.0.0"), description="AfriQA Hausa dataset"),
59
+ AfriQAConfig(name="ibo", version=datasets.Version("1.0.0"), description="AfriQA Igbo dataset"),
60
+ AfriQAConfig(name="kin", version=datasets.Version("1.0.0"), description="AfriQA Kinyarwanda dataset"),
61
+ AfriQAConfig(name="swa", version=datasets.Version("1.0.0"), description="AfriQA Swahili dataset"),
62
+ AfriQAConfig(name="twi", version=datasets.Version("1.0.0"), description="AfriQA Twi dataset"),
63
+ AfriQAConfig(name="wol", version=datasets.Version("1.0.0"), description="AfriQA Wolof dataset"),
64
+ AfriQAConfig(name="yor", version=datasets.Version("1.0.0"), description="AfriQA Yoruba dataset"),
65
+ AfriQAConfig(name="zul", version=datasets.Version("1.0.0"), description="AfriQA Zulu dataset"),
66
+ ]
67
+
68
+ @property
69
+ def manual_download_instructions(self):
70
+ return dedent("""\
71
+ To access the data for this dataset, you need to download it at:
72
+ https://github.com/masakhane-io/afriqa/tree/main/data/queries
73
+
74
+ the directory contains a folder for each language, each folder contains a json file for each split (train, valid, test)
75
+
76
+ To load the dataset, pass the full path to the destination directory
77
+ in your call to the loading function: `datasets.load_dataset("afriqa", data_dir="<path/to/directory>")`
78
+ """)
79
+
80
+ def _info(self):
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=datasets.Features(
84
+ {
85
+ "question": datasets.Value("string"),
86
+ "answers": datasets.Value("string"),
87
+ "lang": datasets.Value("string"),
88
+ "split": datasets.Value("string"),
89
+ "translated_question": datasets.Value("string"),
90
+ "translated_answer": datasets.Value("string"),
91
+ "translation_type": datasets.Value("string"),
92
+ }
93
+ ),
94
+ homepage=_HOMEPAGE,
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ """Returns SplitGenerators."""
100
+ urls_to_download = {
101
+ "train": f"{_URL}{self.config.name}/queries.afriqa.{self.config.name}.{_LANG_2_PIVOT[self.config.name]}.train.json",
102
+ "dev": f"{_URL}{self.config.name}/queries.afriqa.{self.config.name}.{_LANG_2_PIVOT[self.config.name]}.dev.json",
103
+ "test": f"{_URL}{self.config.name}/queries.afriqa.{self.config.name}.{_LANG_2_PIVOT[self.config.name]}.test.json",
104
+ }
105
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
106
+
107
+ return [
108
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
109
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
110
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
111
+ ]
112
+
113
+ def _generate_examples(self, filepath):
114
+ """Yields examples."""
115
+ with open(filepath, encoding="utf-8") as f:
116
+ data = json.load(f)
117
+ for _, example in enumerate(data):
118
+ _id = example["id"]
119
+
120
+ yield _id, {
121
+ "question": example["question"],
122
+ "answers": example["answers"],
123
+ "lang": example["lang"],
124
+ "split": example["split"],
125
+ "translated_question": example["translated_question"],
126
+ "translated_answer": example["translated_answer"],
127
+ "translation_type": example["translation_type"],
128
+ }