albertvillanova HF staff commited on
Commit
548923f
·
verified ·
1 Parent(s): 9eac5d0

Add loading script

Browse files
Files changed (1) hide show
  1. liv4ever.py +145 -0
liv4ever.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """OPUS liv4ever dataset."""
2
+
3
+ from pathlib import Path
4
+
5
+ import datasets
6
+
7
+
8
+ _DESCRIPTION = """\
9
+ This is the Livonian 4-lingual parallel corpus. Livonian is a Uralic / Finnic language with just about 20 fluent
10
+ speakers and no native speakers (as of 2021). The texts and translations in this corpus were collected from all the
11
+ digital text resources that could be found by the authors; scanned and printed materials are left for future work.
12
+ """
13
+
14
+ _HOMEPAGE = "https://opus.nlpl.eu/liv4ever.php"
15
+
16
+ _LICENSE = "CC BY-SA"
17
+
18
+ _CITATION = r"""
19
+ @inproceedings{rikters-etal-2022-machine,
20
+ title = "Machine Translation for {L}ivonian: Catering to 20 Speakers",
21
+ author = "Rikters, Mat{\=\i}ss and
22
+ Tomingas, Marili and
23
+ Tuisk, Tuuli and
24
+ Ern{\v{s}}treits, Valts and
25
+ Fishel, Mark",
26
+ booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
27
+ month = may,
28
+ year = "2022",
29
+ address = "Dublin, Ireland",
30
+ publisher = "Association for Computational Linguistics",
31
+ url = "https://aclanthology.org/2022.acl-short.55",
32
+ doi = "10.18653/v1/2022.acl-short.55",
33
+ pages = "508--514",
34
+ }
35
+ @inproceedings{tiedemann-2012-parallel,
36
+ title = "Parallel Data, Tools and Interfaces in {OPUS}",
37
+ author = {Tiedemann, J{\"o}rg},
38
+ booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)",
39
+ month = may,
40
+ year = "2012",
41
+ address = "Istanbul, Turkey",
42
+ publisher = "European Language Resources Association (ELRA)",
43
+ url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf",
44
+ pages = "2214--2218",
45
+ }
46
+ """
47
+
48
+ _URLS = {
49
+ "parallel": "https://opus.nlpl.eu/download.php?f=liv4ever/v1/moses/{}-{}.txt.zip",
50
+ "monolingual": "https://opus.nlpl.eu/download.php?f=liv4ever/v1/mono/{}.txt.gz",
51
+ }
52
+ _LANGUAGES = ["en", "et", "fr", "liv", "lv"]
53
+ _LANGUAGE_PAIRS = [("en", "liv"), ("et", "liv"), ("fr", "liv"), ("liv", "lv")]
54
+
55
+
56
+ class Liv4EverConfig(datasets.BuilderConfig):
57
+ def __init__(self, language_pair=None, language=None, version=datasets.Version("1.0.0"), **kwargs):
58
+ if (language_pair and language) or (not language_pair and not language):
59
+ raise ValueError("Pass either 'language_pair' or 'language'")
60
+ if language_pair:
61
+ if isinstance(language_pair, str):
62
+ language_pair = language_pair.split("-")
63
+ language_pair = tuple(sorted(language_pair))
64
+ name = f"{'-'.join(language_pair) if language_pair else language}"
65
+ else:
66
+ name = f"{language}"
67
+ super().__init__(name=name, version=version, **kwargs)
68
+ self.language_pair = language_pair
69
+ self.language = language
70
+
71
+
72
+ class Liv4Ever(datasets.GeneratorBasedBuilder):
73
+
74
+ BUILDER_CONFIG_CLASS = Liv4EverConfig
75
+
76
+ BUILDER_CONFIGS = [Liv4EverConfig(language_pair=language_pair) for language_pair in _LANGUAGE_PAIRS] + [
77
+ Liv4EverConfig(language=language) for language in _LANGUAGES
78
+ ]
79
+
80
+ def _info(self):
81
+ if self.config.language_pair:
82
+ features = datasets.Features(
83
+ {
84
+ "translation": datasets.Translation(languages=self.config.language_pair),
85
+ }
86
+ )
87
+ else:
88
+ features = datasets.Features(
89
+ {
90
+ "text": datasets.Value("string"),
91
+ }
92
+ )
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=features,
96
+ homepage=_HOMEPAGE,
97
+ license=_LICENSE,
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ url = (
103
+ _URLS["parallel"].format(*self.config.language_pair)
104
+ if self.config.language_pair
105
+ else _URLS["monolingual"].format(self.config.language)
106
+ )
107
+ path = dl_manager.download_and_extract(url)
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={
112
+ "path": path,
113
+ },
114
+ ),
115
+ ]
116
+
117
+ def _generate_examples(self, path):
118
+ if self.config.language_pair:
119
+ for key, item in enumerate(_parse_txt_pair(path, self.config.language_pair)):
120
+ # Skip first line containing just the language name
121
+ if key == 0:
122
+ continue
123
+ yield key, {"translation": item}
124
+ else:
125
+ for key, line in enumerate(_parse_txt(path)):
126
+ # Skip first line containing just the language name
127
+ if key == 0:
128
+ continue
129
+ yield key, {
130
+ "text": line,
131
+ }
132
+
133
+
134
+ def _parse_txt(path):
135
+ with open(path, encoding="utf-8") as f:
136
+ for line in f:
137
+ line = line.strip()
138
+ if line:
139
+ yield line
140
+
141
+
142
+ def _parse_txt_pair(path, language_pair):
143
+ paths = [sorted(Path(path).glob(f"*.{language}"))[0] for language in language_pair]
144
+ for line_pair in zip(_parse_txt(paths[0]), _parse_txt(paths[1])):
145
+ yield {language: line for language, line in zip(language_pair, line_pair)}