Datasets:

Modalities:
Text
Sub-tasks:
extractive-qa
DOI:
Libraries:
Datasets
License:
saattrupdan commited on
Commit
3c432c7
·
1 Parent(s): df59ccd

feat: Add loading script

Browse files
Files changed (1) hide show
  1. scandiqa.py +166 -0
scandiqa.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and Dan Saattrup Nielsen.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Python build script for the ScandiQA dataset."""
15
+
16
+
17
+ import json
18
+ from pathlib import Path
19
+ from typing import List
20
+
21
+ from datasets.splits import SplitGenerator, Split
22
+ from datasets.info import DatasetInfo
23
+ from datasets.builder import GeneratorBasedBuilder, BuilderConfig
24
+ from datasets.features import Features, Value
25
+ from datasets import Version
26
+ from datasets.download import DownloadManager
27
+
28
+
29
+ _DESCRIPTION = """
30
+ ScandiQA is a dataset of questions and answers in the Danish, Norwegian, and Swedish
31
+ languages. All samples come from the Natural Questions (NQ) dataset, which is a large
32
+ question answering dataset from Google searches. The Scandinavian questions and answers
33
+ come from the MKQA dataset, where 10,000 NQ samples were manually translated into,
34
+ among others, Danish, Norwegian, and Swedish. However, this did not include a
35
+ translated context, hindering the training of extractive question answering models.
36
+
37
+ We merged the NQ dataset with the MKQA dataset, and extracted contexts as either "long
38
+ answers" from the NQ dataset, being the paragraph in which the answer was found, or
39
+ otherwise we extract the context by locating the paragraphs which have the largest
40
+ cosine similarity to the question, and which contains the desired answer.
41
+
42
+ Further, many answers in the MKQA dataset were "language normalised": for instance, all
43
+ date answers were converted to the format "YYYY-MM-DD", meaning that in most cases
44
+ these answers are not appearing in any paragraphs. We solve this by extending the MKQA
45
+ answers with plausible "answer candidates", being slight perturbations or translations
46
+ of the answer.
47
+
48
+ With the contexts extracted, we translated these to Danish, Swedish and Norwegian using
49
+ the DeepL translation service for Danish and Swedish, and the Google Translation
50
+ service for Norwegian. After translation we ensured that the Scandinavian answers do
51
+ indeed occur in the translated contexts.
52
+
53
+ As we are filtering the MKQA samples at both the "merging stage" and the "translation
54
+ stage", we are not able to fully convert the 10,000 samples to the Scandinavian
55
+ languages, and instead get roughly 8,000 samples per language. These have further been
56
+ split into a training, validation and test split, with the former two containing
57
+ roughly 750 samples. The splits have been created in such a way that the proportion of
58
+ samples without an answer is roughly the same in each split.
59
+ """
60
+
61
+ _HOMEPAGE = "https://huggingface.co/alexandrainst/scandiqa"
62
+ _LICENSE = "CC BY 4.0"
63
+ _URLS = {
64
+ "da": [
65
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/da/train-00000-of-00001-e7fccfe6ae54e16a.parquet",
66
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/da/val-00000-of-00001-e7fccfe6ae54e16a.parquet",
67
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/da/test-00000-of-00001-e7fccfe6ae54e16a.parquet",
68
+ ],
69
+ "sv": [
70
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/sv/train-00000-of-00001-e7fccfe6ae54e16a.parquet",
71
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/sv/val-00000-of-00001-e7fccfe6ae54e16a.parquet",
72
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/sv/test-00000-of-00001-e7fccfe6ae54e16a.parquet",
73
+ ],
74
+ "no": [
75
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/no/train-00000-of-00001-e7fccfe6ae54e16a.parquet",
76
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/no/val-00000-of-00001-e7fccfe6ae54e16a.parquet",
77
+ "https://huggingface.co/alexandrainst/scandiqa/blob/main/data/no/test-00000-of-00001-e7fccfe6ae54e16a.parquet",
78
+ ],
79
+ }
80
+
81
+ # _CITATION = """
82
+ # @InProceedings{huggingface:dataset,
83
+ # title = {ScandiQA: A Scandinavian Question Answering Dataset},
84
+ # author={Dan Saattrup Nielsen},
85
+ # year={2022}
86
+ # }
87
+ # """
88
+
89
+
90
+ class ScandiQA(GeneratorBasedBuilder):
91
+ """Scandinavian question answering dataset."""
92
+
93
+ VERSION = Version("1.0.0")
94
+
95
+ BUILDER_CONFIGS = [
96
+ BuilderConfig(
97
+ name="da",
98
+ version=VERSION,
99
+ description="The Danish part of the ScandiQA dataset."
100
+ ),
101
+ BuilderConfig(
102
+ name="sv",
103
+ version=VERSION,
104
+ description="The Swedish part of the ScandiQA dataset."
105
+ ),
106
+ BuilderConfig(
107
+ name="no",
108
+ version=VERSION,
109
+ description="The Norwegian part of the ScandiQA dataset."
110
+ ),
111
+ ]
112
+
113
+ def _info(self) -> DatasetInfo:
114
+ features = Features(
115
+ {
116
+ "example_id": Value("int64"),
117
+ "question": Value("string"),
118
+ "answer": Value("string"),
119
+ "answer_start": Value("int64"),
120
+ "context": Value("string"),
121
+ "answer_en": Value("string"),
122
+ "answer_start_en": Value("int64"),
123
+ "context_en": Value("string"),
124
+ "title_en": Value("string"),
125
+ }
126
+ )
127
+ return DatasetInfo(
128
+ description=_DESCRIPTION,
129
+ features=features,
130
+ homepage=_HOMEPAGE,
131
+ license=_LICENSE,
132
+ #citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
136
+ urls = _URLS[self.config.name]
137
+ data_dirs = dl_manager.download_and_extract(urls)
138
+ return [
139
+ SplitGenerator(
140
+ name=str(Split.TRAIN),
141
+ gen_kwargs=dict(
142
+ filepath=Path(data_dirs[0]) / "train.jsonl",
143
+ split="train",
144
+ ),
145
+ ),
146
+ SplitGenerator(
147
+ name=str(Split.VALIDATION),
148
+ gen_kwargs=dict(
149
+ filepath=Path(data_dirs[1]) / "val.jsonl",
150
+ split="val",
151
+ ),
152
+ ),
153
+ SplitGenerator(
154
+ name=str(Split.TEST),
155
+ gen_kwargs=dict(
156
+ filepath=Path(data_dirs[2]) / "test.jsonl",
157
+ split="test"
158
+ ),
159
+ ),
160
+ ]
161
+
162
+ def _generate_examples(self, filepath: str, _):
163
+ with Path(filepath).open(encoding="utf-8") as f:
164
+ for key, row in enumerate(f):
165
+ data = json.loads(row)
166
+ yield key, data