File size: 5,227 Bytes
548923f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
"""OPUS liv4ever dataset."""

from pathlib import Path

import datasets


_DESCRIPTION = """\
This is the Livonian 4-lingual parallel corpus. Livonian is a Uralic / Finnic language with just about 20 fluent
speakers and no native speakers (as of 2021). The texts and translations in this corpus were collected from all the
digital text resources that could be found by the authors; scanned and printed materials are left for future work.
"""

_HOMEPAGE = "https://opus.nlpl.eu/liv4ever.php"

_LICENSE = "CC BY-SA"

_CITATION = r"""
@inproceedings{rikters-etal-2022-machine,
    title = "Machine Translation for {L}ivonian: Catering to 20 Speakers",
    author = "Rikters, Mat{\=\i}ss  and
      Tomingas, Marili  and
      Tuisk, Tuuli  and
      Ern{\v{s}}treits, Valts  and
      Fishel, Mark",
    booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
    month = may,
    year = "2022",
    address = "Dublin, Ireland",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.acl-short.55",
    doi = "10.18653/v1/2022.acl-short.55",
    pages = "508--514",
}
@inproceedings{tiedemann-2012-parallel,
    title = "Parallel Data, Tools and Interfaces in {OPUS}",
    author = {Tiedemann, J{\"o}rg},
    booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)",
    month = may,
    year = "2012",
    address = "Istanbul, Turkey",
    publisher = "European Language Resources Association (ELRA)",
    url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf",
    pages = "2214--2218",
}
"""

_URLS = {
    "parallel": "https://opus.nlpl.eu/download.php?f=liv4ever/v1/moses/{}-{}.txt.zip",
    "monolingual": "https://opus.nlpl.eu/download.php?f=liv4ever/v1/mono/{}.txt.gz",
}
_LANGUAGES = ["en", "et", "fr", "liv", "lv"]
_LANGUAGE_PAIRS = [("en", "liv"), ("et", "liv"), ("fr", "liv"), ("liv", "lv")]


class Liv4EverConfig(datasets.BuilderConfig):
    def __init__(self, language_pair=None, language=None, version=datasets.Version("1.0.0"), **kwargs):
        if (language_pair and language) or (not language_pair and not language):
            raise ValueError("Pass either 'language_pair' or 'language'")
        if language_pair:
            if isinstance(language_pair, str):
                language_pair = language_pair.split("-")
            language_pair = tuple(sorted(language_pair))
            name = f"{'-'.join(language_pair) if language_pair else language}"
        else:
            name = f"{language}"
        super().__init__(name=name, version=version, **kwargs)
        self.language_pair = language_pair
        self.language = language


class Liv4Ever(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIG_CLASS = Liv4EverConfig

    BUILDER_CONFIGS = [Liv4EverConfig(language_pair=language_pair) for language_pair in _LANGUAGE_PAIRS] + [
        Liv4EverConfig(language=language) for language in _LANGUAGES
    ]

    def _info(self):
        if self.config.language_pair:
            features = datasets.Features(
                {
                    "translation": datasets.Translation(languages=self.config.language_pair),
                }
            )
        else:
            features = datasets.Features(
                {
                    "text": datasets.Value("string"),
                }
            )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        url = (
            _URLS["parallel"].format(*self.config.language_pair)
            if self.config.language_pair
            else _URLS["monolingual"].format(self.config.language)
        )
        path = dl_manager.download_and_extract(url)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "path": path,
                },
            ),
        ]

    def _generate_examples(self, path):
        if self.config.language_pair:
            for key, item in enumerate(_parse_txt_pair(path, self.config.language_pair)):
                # Skip first line containing just the language name
                if key == 0:
                    continue
                yield key, {"translation": item}
        else:
            for key, line in enumerate(_parse_txt(path)):
                # Skip first line containing just the language name
                if key == 0:
                    continue
                yield key, {
                    "text": line,
                }


def _parse_txt(path):
    with open(path, encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if line:
                yield line


def _parse_txt_pair(path, language_pair):
    paths = [sorted(Path(path).glob(f"*.{language}"))[0] for language in language_pair]
    for line_pair in zip(_parse_txt(paths[0]), _parse_txt(paths[1])):
        yield {language: line for language, line in zip(language_pair, line_pair)}