Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Thai
Libraries:
Datasets
pandas
License:
File size: 5,619 Bytes
f5baa18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
from __future__ import absolute_import, division, print_function

import os
from functools import reduce
from pathlib import Path

import datasets


_CITATION = """\
@inproceedings{kosawat2009best,
  title={BEST 2009: Thai word segmentation software contest},
  author={Kosawat, Krit and Boriboon, Monthika and Chootrakool, Patcharika and Chotimongkol, Ananlada and Klaithin, Supon and Kongyoung, Sarawoot and Kriengket, Kanyanut and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and others},
  booktitle={2009 Eighth International Symposium on Natural Language Processing},
  pages={83--88},
  year={2009},
  organization={IEEE}
}
@inproceedings{boriboon2009best,
  title={Best corpus development and analysis},
  author={Boriboon, Monthika and Kriengket, Kanyanut and Chootrakool, Patcharika and Phaholphinyo, Sitthaa and Purodakananda, Sumonmas and Thanakulwarapas, Tipraporn and Kosawat, Krit},
  booktitle={2009 International Conference on Asian Language Processing},
  pages={322--327},
  year={2009},
  organization={IEEE}
}
"""

_LICENSE = "CC-BY-NC-SA 3.0"

_DESCRIPTION = """\
`best2009` is a Thai word-tokenization dataset from encyclopedia, novels, news and articles by
[NECTEC](https://www.nectec.or.th/) (148,995/2,252 lines of train/test). It was created for
[BEST 2010: Word Tokenization Competition](https://thailang.nectec.or.th/archive/indexa290.html?q=node/10).
The test set answers are not provided publicly.
"""


class Best2009Config(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        """BuilderConfig

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(Best2009Config, self).__init__(**kwargs)


class Best2009(datasets.GeneratorBasedBuilder):

    _DOWNLOAD_URL = "https://archive.org/download/best_dataset/data.zip"
    _TRAIN_FOLDER = "train"
    _TEST_FOLDER = "test"

    _USELESS_TAGS = {"<NE>": "", "</NE>": "", "<AB>": "", "</AB>": ""}
    # character type mapping from https://github.com/rkcosmos/deepcut/blob/master/deepcut/utils.py
    _CHAR_TYPES_DICT = {
        "กขฃคฆงจชซญฎฏฐฑฒณดตถทธนบปพฟภมยรลวศษสฬอ": "c",
        "ฅฉผฟฌหฮ": "n",
        "ะาำิีืึุู": "v",  # า ะ ำ ิ ี ึ ื ั ู ุ
        "เแโใไ": "w",
        "่้๊๋": "t",  # วรรณยุกต์ ่ ้ ๊ ๋
        "์ๆฯ.": "s",  # ์  ๆ ฯ .
        "0123456789๑๒๓๔๕๖๗๘๙": "d",
        '"': "q",
        "‘": "q",
        "’": "q",
        "'": "q",
        " ": "p",
        "abcdefghijklmnopqrstuvwxyz": "s_e",
        "ABCDEFGHIJKLMNOPQRSTUVWXYZ": "b_e",
    }
    _CHAR_TYPE_FLATTEN = {}
    for ks, v in _CHAR_TYPES_DICT.items():
        for k in ks:
            _CHAR_TYPE_FLATTEN[k] = v
    _CHAR_TYPES = ["b_e", "c", "d", "n", "o", "p", "q", "s", "s_e", "t", "v", "w"]

    BUILDER_CONFIGS = [
        Best2009Config(
            name="best2009",
            version=datasets.Version("1.0.0"),
            description=_DESCRIPTION,
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "fname": datasets.Value("string"),
                    "char": datasets.Sequence(datasets.Value("string")),
                    "char_type": datasets.Sequence(datasets.features.ClassLabel(names=self._CHAR_TYPES)),
                    "is_beginning": datasets.Sequence(datasets.features.ClassLabel(names=["neg", "pos"])),
                }
            ),
            supervised_keys=None,
            homepage="https://aiforthai.in.th/",
            citation=_CITATION,
            license=_LICENSE,
        )

    def _split_generators(self, dl_manager):
        arch_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
        data_dir = os.path.join(arch_path, "data")
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": os.path.join(data_dir, self._TRAIN_FOLDER), "split": "train"},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": os.path.join(data_dir, self._TEST_FOLDER), "split": "train"},
            ),
        ]

    def _generate_examples(self, filepath, split):
        for fname in sorted(Path(filepath).rglob("*.txt")):
            with open(fname, encoding="utf-8") as f:
                for _id, line in enumerate(f):
                    chars = []
                    char_types = []
                    is_beginnings = []
                    # replace useless tokens
                    line = reduce(lambda a, kv: a.replace(*kv), self._USELESS_TAGS.items(), line)
                    # tokens are pipe separated
                    splits = line.split("|")
                    for token in splits:
                        for i in range(len(token)):
                            chars.append(token[i])
                            char_types.append(self._CHAR_TYPE_FLATTEN.get(token[i], "o"))
                            is_beginning = 1 if i == 0 else 0
                            is_beginnings.append(is_beginning)
                    yield _id, {
                        "fname": fname.name,
                        "char": chars,
                        "char_type": char_types,
                        "is_beginning": is_beginnings if split == "train" else [0 for i in range(len(chars))],
                    }