holylovenia commited on
Commit
2d14abb
1 Parent(s): e21fdf4

Upload ccmatrix.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ccmatrix.py +282 -0
ccmatrix.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The CCMatrix dataset was collected from web crawls and released by Meta. The dataset is constructed based on the margin-based bitext mining which can be applied to monolingual corpora of billions of sentences to produce high quality aligned translation data.
18
+ """
19
+ import os
20
+ from pathlib import Path
21
+ from typing import Dict, List, Tuple
22
+
23
+ import datasets
24
+
25
+ from seacrowd.utils import schemas
26
+ from seacrowd.utils.configs import SEACrowdConfig
27
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
28
+
29
+ _CITATION = """\
30
+ @inproceedings{schwenk-etal-2021-ccmatrix,
31
+ title = "{CCM}atrix: Mining Billions of High-Quality Parallel Sentences on the Web",
32
+ author = "Schwenk, Holger and
33
+ Wenzek, Guillaume and
34
+ Edunov, Sergey and
35
+ Grave, Edouard and
36
+ Joulin, Armand and
37
+ Fan, Angela",
38
+ editor = "Zong, Chengqing and
39
+ Xia, Fei and
40
+ Li, Wenjie and
41
+ Navigli, Roberto",
42
+ booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
43
+ month = aug,
44
+ year = "2021",
45
+ address = "Online",
46
+ publisher = "Association for Computational Linguistics",
47
+ url = "https://aclanthology.org/2021.acl-long.507",
48
+ doi = "10.18653/v1/2021.acl-long.507",
49
+ pages = "6490--6500",
50
+ abstract = "We show that margin-based bitext mining in a multilingual sentence space can be successfully scaled to operate on monolingual corpora of billions of sentences. We use 32 snapshots of a curated common crawl corpus (Wenzel et al, 2019) totaling 71 billion unique sentences. Using one unified approach for 90 languages, we were able to mine 10.8 billion parallel sentences, out of which only 2.9 billions are aligned with English. We illustrate the capability of our scalable mining system to create high quality training sets from one language to any other by training hundreds of different machine translation models and evaluating them on the many-to-many TED benchmark. Further, we evaluate on competitive translation benchmarks such as WMT and WAT. Using only mined bitext, we set a new state of the art for a single system on the WMT{'}19 test set for English-German/Russian/Chinese. In particular, our English/German and English/Russian systems outperform the best single ones by over 4 BLEU points and are on par with best WMT{'}19 systems, which train on the WMT training data and augment it with backtranslation. We also achieve excellent results for distant languages pairs like Russian/Japanese, outperforming the best submission at the 2020 WAT workshop. All of the mined bitext will be freely available.",
51
+ }
52
+ """
53
+
54
+ _DATASETNAME = "ccmatrix"
55
+
56
+ _DESCRIPTION = """\
57
+ The CCMatrix dataset was collected from web crawls and released by Meta. The dataset is constructed based on the margin-based bitext mining which can be applied to monolingual corpora of billions of sentences to produce high quality aligned translation data.
58
+ """
59
+
60
+ _HOMEPAGE = "https://opus.nlpl.eu/CCMatrix/corpus/version/CCMatrix"
61
+
62
+ _LANGUAGES = ["jav", "eng", "vie", "ind", "tgl", "mya", "zlm"]
63
+
64
+ _LICENSE = Licenses.BSD.value
65
+
66
+ _LOCAL = False
67
+
68
+ _FILE = "CCMatrix.{}.{}" # E.g. CCMatrix.en-nl.nl
69
+
70
+ _URLS = "https://object.pouta.csc.fi/OPUS-CCMatrix/v1/moses/{}.txt.zip"
71
+
72
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
73
+
74
+ _SOURCE_VERSION = "1.0.0"
75
+
76
+ _SEACROWD_VERSION = "2024.06.20"
77
+
78
+
79
+ class CCMatrixDataset(datasets.GeneratorBasedBuilder):
80
+ """The CCMatrix dataset was collected from web crawls and released by Meta. The dataset is constructed based on the margin-based bitext mining which can be applied to monolingual corpora of billions of sentences to produce high quality aligned translation data."""
81
+
82
+ SEACROWD_SCHEMA = TASK_TO_SCHEMA[Tasks.MACHINE_TRANSLATION].lower()
83
+
84
+ LANG_PAIRS = [
85
+ ("eng", "jav"), ("ind", "jav"),
86
+ ("jav", "tgl"), ("jav", "zlm"),
87
+ ("eng", "vie"), ("eng", "ind"),
88
+ ("eng", "tgl"), ("eng", "zlm"),
89
+ ("ind", "vie"), ("tgl", "vie"),
90
+ ("zlm", "vie"), ("ind", "tgl"),
91
+ ("ind", "zlm"), ("zlm", "tgl")
92
+ ]
93
+
94
+ ISO_MAPPER = {
95
+ "eng": "en",
96
+ "ind": "id",
97
+ "jav": "jv",
98
+ "vie": "vi",
99
+ "tgl": "tl",
100
+ "zlm": "ms",
101
+ }
102
+
103
+ BUILDER_CONFIGS = (
104
+ [
105
+ SEACrowdConfig(
106
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_source",
107
+ version=datasets.Version(_SOURCE_VERSION),
108
+ description=f"{_DATASETNAME} source schema for translation from {lang1} to {lang2}",
109
+ schema="source",
110
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}",
111
+ )
112
+ for lang1, lang2 in LANG_PAIRS
113
+ ]
114
+ + [
115
+ SEACrowdConfig(
116
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_{lang1}_source",
117
+ version=datasets.Version(_SEACROWD_VERSION),
118
+ description=f"{_DATASETNAME} source schema {lang1} for translation from {lang1} to {lang2}",
119
+ schema="source",
120
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}_{lang1}",
121
+ )
122
+ for lang1, lang2 in LANG_PAIRS
123
+ ]
124
+ + [
125
+ SEACrowdConfig(
126
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_{lang2}_source",
127
+ version=datasets.Version(_SEACROWD_VERSION),
128
+ description=f"{_DATASETNAME} source schema {lang2} for translation from {lang1} to {lang2}",
129
+ schema="source",
130
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}_{lang2}",
131
+ )
132
+ for lang1, lang2 in LANG_PAIRS
133
+ ]
134
+ + [
135
+ SEACrowdConfig(
136
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_seacrowd_t2t",
137
+ version=datasets.Version(_SEACROWD_VERSION),
138
+ description=f"{_DATASETNAME} SEACrowd schema",
139
+ schema="seacrowd_t2t",
140
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}",
141
+ )
142
+ for lang1, lang2 in LANG_PAIRS
143
+ ]
144
+ + [
145
+ SEACrowdConfig(
146
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_{lang1}_seacrowd_ssp",
147
+ version=datasets.Version(_SEACROWD_VERSION),
148
+ description=f"{_DATASETNAME} SEACrowd schema {lang1} for translation from {lang1} to {lang2} for Self-supervised Pretraining task",
149
+ schema="seacrowd_ssp",
150
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}_{lang1}",
151
+ )
152
+ for lang1, lang2 in LANG_PAIRS
153
+ ]
154
+ + [
155
+ SEACrowdConfig(
156
+ name=f"{_DATASETNAME}_{lang1}-{lang2}_{lang2}_seacrowd_ssp",
157
+ version=datasets.Version(_SEACROWD_VERSION),
158
+ description=f"{_DATASETNAME} SEACrowd schema {lang2} for translation from {lang1} to {lang2} for Self-supervised Pretraining task",
159
+ schema="seacrowd_ssp",
160
+ subset_id=f"{_DATASETNAME}_{lang1}-{lang2}_{lang2}",
161
+ )
162
+ for lang1, lang2 in LANG_PAIRS
163
+ ]
164
+ )
165
+
166
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_en-jv_source"
167
+
168
+ def _info(self) -> datasets.DatasetInfo:
169
+ if self.config.schema == "source":
170
+ if len(self.config.subset_id.split("_")) == 2: # MT TASK
171
+ lang1, lang2 = self._map_lang_pair_iso(self.config.subset_id.split("_")[-1]).split("-")
172
+ features = datasets.Features(
173
+ {
174
+ "id": datasets.Value("int32"),
175
+ "score": datasets.Value("float32"),
176
+ "translation": datasets.Translation(languages=(lang1, lang2)),
177
+ }
178
+ )
179
+ elif len(self.config.subset_id.split("_")) == 3: # ssp task
180
+ features = datasets.Features(
181
+ {
182
+ "id": datasets.Value("int32"),
183
+ "text": datasets.Value("string"),
184
+ }
185
+ )
186
+
187
+ elif self.config.schema == "seacrowd_t2t":
188
+ features = schemas.text2text_features
189
+
190
+ elif self.config.schema == "seacrowd_ssp":
191
+ features = schemas.ssp_features
192
+
193
+ return datasets.DatasetInfo(
194
+ description=_DESCRIPTION,
195
+ features=features,
196
+ homepage=_HOMEPAGE,
197
+ license=_LICENSE,
198
+ citation=_CITATION,
199
+ )
200
+
201
+ def _map_lang_pair_iso(self, lang_pair: str) -> str:
202
+ lang1, lang2 = [self.ISO_MAPPER[lang] for lang in lang_pair.split("-")]
203
+ return f"{lang1}-{lang2}"
204
+
205
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
206
+ """Returns SplitGenerators."""
207
+
208
+ if len(self.config.subset_id.split("_")) == 2:
209
+ lang_pair = self._map_lang_pair_iso(self.config.subset_id.split("_")[-1])
210
+ elif len(self.config.subset_id.split("_")) == 3:
211
+ lang_pair = self._map_lang_pair_iso(self.config.subset_id.split("_")[-2])
212
+
213
+ url = _URLS.format(lang_pair)
214
+ data_dir = dl_manager.download_and_extract(url)
215
+
216
+ return [
217
+ datasets.SplitGenerator(
218
+ name=datasets.Split.TRAIN,
219
+ gen_kwargs={
220
+ "filepath": data_dir,
221
+ },
222
+ )
223
+ ]
224
+
225
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
226
+ """Yields examples as (key, example) tuples."""
227
+
228
+ if len(self.config.subset_id.split("_")) == 2: # MT Task
229
+
230
+ lang_pair = self._map_lang_pair_iso(self.config.subset_id.split("_")[-1])
231
+ lang1, lang2 = lang_pair.split("-")
232
+ lang1_name, lang2_name = self.config.subset_id.split("_")[-1].split('-')
233
+
234
+ l1_path = os.path.join(filepath, _FILE.format(lang_pair, lang1))
235
+ l2_path = os.path.join(filepath, _FILE.format(lang_pair, lang2))
236
+ scores_path = os.path.join(filepath, _FILE.format(lang_pair, "scores"))
237
+
238
+ if self.config.schema == "source":
239
+ with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2, open(scores_path, encoding="utf-8") as f3:
240
+ for i, (x, y, score) in enumerate(zip(f1, f2, f3)):
241
+ yield i, {
242
+ "id": i,
243
+ "score": score,
244
+ "translation": {
245
+ lang1: x.strip(),
246
+ lang2: y.strip(),
247
+ },
248
+ }
249
+
250
+ elif self.config.schema == "seacrowd_t2t":
251
+ with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
252
+ for i, (x, y) in enumerate(zip(f1, f2)):
253
+ yield i, {
254
+ "id": str(i),
255
+ "text_1": x.strip(),
256
+ "text_2": y.strip(),
257
+ "text_1_name": lang1_name,
258
+ "text_2_name": lang2_name,
259
+ },
260
+
261
+ elif len(self.config.subset_id.split("_")) == 3: # SSP Task
262
+
263
+ lang_pair = self._map_lang_pair_iso(self.config.subset_id.split("_")[-2])
264
+ lang = self.ISO_MAPPER[self.config.subset_id.split("_")[-1]]
265
+
266
+ l_path = os.path.join(filepath, _FILE.format(lang_pair, lang))
267
+
268
+ if self.config.schema == "source":
269
+ with open(l_path, encoding="utf-8") as f:
270
+ for i, x in enumerate(f.readlines()):
271
+ yield i, {
272
+ "id": i,
273
+ "text": x.strip(),
274
+ }
275
+
276
+ elif self.config.schema == "seacrowd_ssp":
277
+ with open(l_path, encoding="utf-8") as f:
278
+ for i, x in enumerate(f.readlines()):
279
+ yield i, {
280
+ "id": str(i),
281
+ "text": x.strip(),
282
+ }