File size: 2,401 Bytes
2839b87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import os

import datasets

_DESCRIPTION = """\
General Corpora for the Maltese language.
"""

_HOMEPAGE = "https://mlrs.research.um.edu.mt/"

_URL = "data/"
_SHUFFLED_URL = {
    "train": os.path.join(_URL, "shuffled/train.txt"),
    "validation": os.path.join(_URL, "shuffled/validation.txt"),
    "test": os.path.join(_URL, "shuffled/test.txt"),
}


class KorpusMalti(datasets.GeneratorBasedBuilder):
    """Korpus Malti: General Corpora for the Maltese Language"""

    VERSION = datasets.Version("4.0.0")

    DEFAULT_CONFIG_NAME = "shuffled"

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=DEFAULT_CONFIG_NAME,
                               version=VERSION,
                               description="The shuffled data from all subsets.",
                               ),
    ]

    def _info(self):
        if self.config.name == self.DEFAULT_CONFIG_NAME:
            features = {
                "text": datasets.Value("string"),
            }

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(features),
            homepage=_HOMEPAGE,
        )

    def _split_generators(self, dl_manager):
        if self.config.name == self.DEFAULT_CONFIG_NAME:
            data_files = dl_manager.download_and_extract(_SHUFFLED_URL)
            data_split = [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "filepath": data_files["train"],
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    gen_kwargs={
                        "filepath": data_files["validation"],
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                        "filepath": data_files["test"],
                    },
                ),
            ]

        return data_split

    def _generate_examples(self, filepath):
        if self.config.name == self.DEFAULT_CONFIG_NAME:
            with open(filepath, encoding="utf-8") as file:
                for key, line in enumerate(file):
                    if len(line) > 0:
                        yield key, {
                            "text": line,
                        }