Crystina commited on
Commit
c6a4d24
·
1 Parent(s): c2202cb
Files changed (1) hide show
  1. mmarco.py +203 -0
mmarco.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """mMARCO dataset."""
18
+
19
+ from gc import collect
20
+ import datasets
21
+
22
+
23
+ _CITATION = """
24
+ @misc{bonifacio2021mmarco,
25
+ title={mMARCO: A Multilingual Version of the MS MARCO Passage Ranking Dataset},
26
+ author={Luiz Henrique Bonifacio and Israel Campiotti and Vitor Jeronymo and Hugo Queiroz Abonizio and Roberto Lotufo and Rodrigo Nogueira},
27
+ year={2021},
28
+ eprint={2108.13897},
29
+ archivePrefix={arXiv},
30
+ primaryClass={cs.CL}
31
+ }
32
+ """
33
+
34
+ _URL = "https://github.com/unicamp-dl/mMARCO"
35
+
36
+ _DESCRIPTION = """
37
+ mMARCO translated datasets
38
+ """
39
+
40
+
41
+ _BASE_URLS = {
42
+ "collections": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/collections/",
43
+ "queries-train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/queries/train/",
44
+ "queries-dev": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/queries/dev/",
45
+ "runs": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/runs/",
46
+ "train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/triples.train.ids.small.tsv",
47
+ }
48
+
49
+ LANGUAGES = [
50
+ "arabic",
51
+ "chinese",
52
+ "dutch",
53
+ "english",
54
+ "french",
55
+ "german",
56
+ "hindi",
57
+ "indonesian",
58
+ "italian",
59
+ "japanese",
60
+ "portuguese",
61
+ "russian",
62
+ "spanish",
63
+ "vietnamese",
64
+ ]
65
+
66
+
67
+ class MMarco(datasets.GeneratorBasedBuilder):
68
+
69
+ BUILDER_CONFIGS = (
70
+ [
71
+ datasets.BuilderConfig(
72
+ name=language,
73
+ description=f"{language.capitalize()} triples",
74
+ version=datasets.Version("2.0.0"),
75
+ )
76
+ for language in LANGUAGES
77
+ ]
78
+ + [
79
+ datasets.BuilderConfig(
80
+ name=f"collection-{language}",
81
+ description=f"{language.capitalize()} collection version v2",
82
+ version=datasets.Version("2.0.0"),
83
+ )
84
+ for language in LANGUAGES
85
+ ]
86
+ + [
87
+ datasets.BuilderConfig(
88
+ name=f"queries-{language}",
89
+ description=f"{language.capitalize()} queries version v2",
90
+ version=datasets.Version("2.0.0"),
91
+ )
92
+ for language in LANGUAGES
93
+ ]
94
+ + [
95
+ datasets.BuilderConfig(
96
+ name=f"runs-{language}",
97
+ description=f"{language.capitalize()} runs version v2",
98
+ version=datasets.Version("2.0.0"),
99
+ )
100
+ for language in LANGUAGES
101
+ ]
102
+ + [
103
+ datasets.BuilderConfig(
104
+ name=f"all",
105
+ description=f"All training data version v2",
106
+ version=datasets.Version("2.0.0"),
107
+ )
108
+ ]
109
+ )
110
+
111
+ DEFAULT_CONFIG_NAME = "english"
112
+
113
+ def _info(self):
114
+ name = self.config.name
115
+ assert name in LANGUAGES + ["all"], f"Does not support languge {name}. Must be one of {LANGUAGES}."
116
+
117
+ features = {
118
+ "query_id": datasets.Value("string"),
119
+ "query": datasets.Value("string"),
120
+ "positive_passages": [
121
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string')}
122
+ ],
123
+ "negative_passages": [
124
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string')}
125
+ ],
126
+ }
127
+
128
+ return datasets.DatasetInfo(
129
+ description=f"{_DESCRIPTION}\n{self.config.description}",
130
+ features=datasets.Features(features),
131
+ supervised_keys=None,
132
+ homepage=_URL,
133
+ citation=_CITATION,
134
+ )
135
+
136
+ def _split_generators(self, dl_manager):
137
+ """Returns SplitGenerators."""
138
+ languages = [self.config.name] if self.config.name in LANGUAGES else LANGUAGES
139
+ urls = {
140
+ "collection": {lang: _BASE_URLS["collections"] + lang + "_collection.tsv" for lang in languages},
141
+ "queries": {lang: _BASE_URLS["queries-train"] + lang + "_queries.train.tsv" for lang in languages},
142
+ "train": _BASE_URLS["train"],
143
+ }
144
+ dl_path = dl_manager.download_and_extract(urls)
145
+
146
+ return [
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TRAIN,
149
+ gen_kwargs={
150
+ "files": dl_path["train"],
151
+ "args": {
152
+ "collection": dl_path["collection"],
153
+ "queries": dl_path["queries"],
154
+ },
155
+ },
156
+ )
157
+ ]
158
+
159
+ def _generate_examples(self, files, args=None):
160
+ """Yields examples."""
161
+
162
+ languages = [self.config.name] if self.config.name in LANGUAGES else LANGUAGES
163
+
164
+ # loading
165
+ lang2collection = {}
166
+ lang2query = {}
167
+ for lang in languages:
168
+ collection_path, queries_path = args["collection"][lang], args["queries"][lang]
169
+
170
+ collection = {}
171
+ with open(collection_path, encoding="utf-8") as f:
172
+ for line in f:
173
+ doc_id, doc = line.rstrip().split("\t")
174
+ collection[doc_id] = doc
175
+
176
+ queries = {}
177
+ with open(queries_path, encoding="utf-8") as f:
178
+ for line in f:
179
+ query_id, query = line.rstrip().split("\t")
180
+ queries[query_id] = query
181
+
182
+ lang2collection[lang] = collection
183
+ lang2query[lang] = queries
184
+
185
+ with open(files, encoding="utf-8") as f:
186
+ # todo: group the queries
187
+
188
+ for (idx, line) in enumerate(f):
189
+ query_id, pos_id, neg_id = line.rstrip().split("\t")
190
+ for lang in languages:
191
+ features = {
192
+ "query_id": query_id,
193
+ "query": queries[query_id],
194
+ "positive_passages": [{
195
+ "docid": pos_id,
196
+ "text": collection[pos_id],
197
+ }],
198
+ "negative_passages": [{
199
+ "docid": neg_id,
200
+ "text": collection[neg_id],
201
+ }],
202
+ }
203
+ yield f"{lang}-{query_id}", features