File size: 6,261 Bytes
93f9a52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
from pathlib import Path
from typing import Dict, List, Tuple

import datasets
from datasets.download.download_manager import DownloadManager

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Licenses, Tasks

_CITATION = r"""
@inproceedings{chaudhary-etal-2019-low,
    title = "Low-Resource Corpus Filtering Using Multilingual Sentence Embeddings",
    author = "Chaudhary, Vishrav  and
      Tang, Yuqing  and
      Guzm{\'a}n, Francisco  and
      Schwenk, Holger  and
      Koehn, Philipp",
    editor = "Bojar, Ond{\v{r}}ej  and
      Chatterjee, Rajen  and
      Federmann, Christian  and
      Fishel, Mark  and
      Graham, Yvette  and
      Haddow, Barry  and
      Huck, Matthias  and
      Yepes, Antonio Jimeno  and
      Koehn, Philipp  and
      Martins, Andr{\'e}  and
      Monz, Christof  and
      Negri, Matteo  and
      N{\'e}v{\'e}ol, Aur{\'e}lie  and
      Neves, Mariana  and
      Post, Matt  and
      Turchi, Marco  and
      Verspoor, Karin",
    booktitle = "Proceedings of the Fourth Conference on Machine Translation (Volume 3: Shared Task Papers, Day 2)",
    month = aug,
    year = "2019",
    address = "Florence, Italy",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/W19-5435",
    doi = "10.18653/v1/W19-5435",
    pages = "261--266",
}
"""

_LOCAL = False
_LANGUAGES = ["ind", "jav", "sun", "tha", "vie", "zlm", "lao", "khm", "mya", "ceb"]
_DATASETNAME = "cc_aligned_sent"
_DESCRIPTION = """\
This dataset contains the sentence pairs extracted from CC-Aligned document
pairs using similarity scores of LASER embeddings (minimum similarity 1.04,
sorted based on decreasing similarity score). It misses some languages not
covered by LASER.
"""

_HOMEPAGE = "https://www2.statmt.org/cc-aligned/"
_LICENSE = Licenses.UNKNOWN.value
_URL = "https://data.statmt.org/cc-aligned/sentence-aligned/"

_SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"

_SUBSETS = ["id_ID", "jv_ID", "su_ID", "th_TH", "vi_VN", "ms_MY", "lo_LA", "km_KH", "my_MM", "cx_PH"]


class CCAlignedSentencesDataset(datasets.GeneratorBasedBuilder):
    """CC Aligned Sentences dataset by Chaudhary et al., (2019)"""

    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
    SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)

    SEACROWD_SCHEMA_NAME = "t2t"

    # Add configurations for loading a dataset per language.
    dataset_names = sorted([f"{_DATASETNAME}_{subset}" for subset in _SUBSETS])
    BUILDER_CONFIGS = []
    for name in dataset_names:
        source_config = SEACrowdConfig(
            name=f"{name}_source",
            version=SOURCE_VERSION,
            description=f"{_DATASETNAME} source schema",
            schema="source",
            subset_id=name,
        )
        BUILDER_CONFIGS.append(source_config)
        seacrowd_config = SEACrowdConfig(
            name=f"{name}_seacrowd_{SEACROWD_SCHEMA_NAME}",
            version=SEACROWD_VERSION,
            description=f"{_DATASETNAME} SEACrowd schema",
            schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
            subset_id=name,
        )
        BUILDER_CONFIGS.append(seacrowd_config)

    # Choose first language as default
    first_subset = sorted(_SUBSETS)[0]
    DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_{first_subset}_source"

    def _info(self) -> datasets.DatasetInfo:
        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "Source_Sentence": datasets.Value("string"),
                    "Target_Sentence": datasets.Value("string"),
                    "LASER_similarity": datasets.Value("float64"),
                }
            )

        if self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
            features = schemas.text_to_text.features

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
        """Return SplitGenerators."""
        # Define some functions for parsing config and URL names
        def _split_at_n(text: str, n: int) -> Tuple[str, str]:
            """Split text on the n-th instance"""
            return ("_".join(text.split("_")[:n]), "_".join(text.split("_")[n:]))

        # Get URL. For cx_PH, the source and target languages are reversed
        _, subset = _split_at_n(_split_at_n(self.config.name, 5)[0], 3)
        (source_lang, target_lang) = (subset, "en_XX") if subset == "cx_PH" else ("en_XX", subset)
        url = _URL + f"{source_lang}-{target_lang}.tsv.xz"
        filepath = dl_manager.download_and_extract(url)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": filepath,
                    "source_lang": source_lang,
                    "target_lang": target_lang,
                },
            )
        ]

    def _generate_examples(self, filepath: Path, source_lang: str, target_lang: str) -> Tuple[int, Dict]:
        """Yield examples as (key, example) tuples"""
        with open(filepath, encoding="utf-8") as file:
            for idx, row in enumerate(file):
                text_1, text_2, score = row.strip().split("\t")
                if self.config.schema == "source":
                    example = {
                        "id": idx,
                        "Source_Sentence": text_1,
                        "Target_Sentence": text_2,
                        "LASER_similarity": float(score),
                    }
                if self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
                    example = {
                        "id": idx,
                        "text_1": text_1,
                        "text_2": text_2,
                        "text_1_name": source_lang,
                        "text_2_name": target_lang,
                    }
                yield idx, example