File size: 2,512 Bytes
d5e9978
 
 
 
 
 
 
 
 
 
 
fc6628c
70f032b
 
 
 
 
 
 
 
 
 
d5e9978
e192c17
d5e9978
70f032b
 
 
 
 
 
 
 
 
 
 
d5e9978
 
 
 
 
 
 
 
 
 
 
 
e192c17
d5e9978
 
70f032b
 
 
 
 
d5e9978
70f032b
 
 
 
 
cba3474
70f032b
 
 
cba3474
70f032b
 
d5e9978
 
70f032b
 
0d16286
1fed1dd
70f032b
 
69d9116
cec99f6
 
7041c81
cec99f6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import os

import datasets

_DESCRIPTION = """\
UTSText
"""

_CITATION = """\
"""

_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_Text/resolve/main/data/"
TRAIN_FILE = "train.txt"
VALIDATION_FILE = "validation.txt"
TEST_FILE = "test.txt"


class UTSTextConfig(datasets.BuilderConfig):
    """BuilderConfig"""

    def __init__(self, **kwargs):
        super(UTSTextConfig, self).__init__(**kwargs)


class UTSText(datasets.GeneratorBasedBuilder):
    """UTS Word Tokenize datasets"""
    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIGS = [
        UTSTextConfig(
            name="small", version=VERSION, description="UTS_Text Small"),
        UTSTextConfig(
            name="base", version=VERSION, description="UTS_Text Base"),
        UTSTextConfig(
            name="large", version=VERSION, description="UTS_Text Large")
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=None,
            citation=_CITATION
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        subset_folder = self.config.name
        train_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TRAIN_FILE))
        validation_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, VALIDATION_FILE))
        test_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TEST_FILE))

        splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": train_file}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": validation_file}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": test_file}
            ),
        ]
        return splits

    def _generate_examples(self, filepath):
        print(filepath)
        guid = 0
        with open(filepath, encoding="utf-8") as f:
            for line in f:
                print(line)
                if line.strip() != "":
                    item = {
                        "text": line.strip()
                    }
                    yield guid, item
                    guid += 1