Datasets:

Languages:
Burmese
ArXiv:
License:
holylovenia commited on
Commit
3d2a35d
·
verified ·
1 Parent(s): 78826ca

Upload mypos.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mypos.py +129 -0
mypos.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ from datasets.download.download_manager import DownloadManager
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @inproceedings{hlaing-2020-myanmar,
13
+ author={Hlaing, Zar Zar and Thu, Ye Kyaw and Wai, Myat Myo Nwe and Supnithi, Thepchai and Netisopakul, Ponrudee},
14
+ booktitle={2020 15th International Joint Symposium on Artificial Intelligence and Natural Language Processing (iSAI-NLP)},
15
+ title={Myanmar POS Resource Extension Effects on Automatic Tagging Methods},
16
+ year={2020},
17
+ pages={1-6},
18
+ doi={10.1109/iSAI-NLP51646.2020.9376835}}
19
+ @inproceedings{htike2017comparison,
20
+ title={Comparison of six POS tagging methods on 10K sentences Myanmar language (Burmese) POS tagged corpus},
21
+ author={Htike, Khin War War and Thu, Ye Kyaw and Zuping Zhang, Win Pa Pa and Sagisaka, Yoshinori and Iwahashi, Naoto},
22
+ booktitle={Proceedings of the CICLING},
23
+ year={2017}
24
+ }
25
+ """
26
+
27
+ _LOCAL = False
28
+ _LANGUAGES = ["mya"]
29
+ _DATASETNAME = "mypos"
30
+ _DESCRIPTION = """\
31
+ This version of the myPOS corpus extends the original myPOS corpus from
32
+ 11,000 to 43,196 Burmese sentences by adding data from the ASEAN MT NECTEC
33
+ corpus and two developed parallel corpora (Myanmar-Chinese and
34
+ Myanmar-Korean). The original 11,000 sentences were collected from Wikipedia
35
+ and includes various topics such as economics, history, news, politics and
36
+ philosophy. The format used in the corpus is word/POS-tag, and the pipe
37
+ delimiter "
38
+ """
39
+
40
+ _HOMEPAGE = "https://github.com/ye-kyaw-thu/myPOS"
41
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
42
+ _URL = "https://raw.githubusercontent.com/ye-kyaw-thu/myPOS/master/corpus-ver-3.0/corpus/mypos-ver.3.0.txt"
43
+
44
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING]
45
+ _SOURCE_VERSION = "3.0.0"
46
+ _SEACROWD_VERSION = "2024.06.20"
47
+
48
+
49
+ class MyPOSDataset(datasets.GeneratorBasedBuilder):
50
+ """MyPOS dataset from https://github.com/ye-kyaw-thu/myPOS"""
51
+
52
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
53
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
54
+
55
+ SEACROWD_SCHEMA_NAME = "seq_label"
56
+ # Reference: https://github.com/ye-kyaw-thu/myPOS/tree/master#pos-tags
57
+ LABEL_CLASSES = ["abb", "adj", "adv", "conj", "fw", "int", "n", "num", "part", "ppm", "pron", "punc", "sb", "tn", "v"]
58
+
59
+ BUILDER_CONFIGS = [
60
+ SEACrowdConfig(
61
+ name=f"{_DATASETNAME}_source",
62
+ version=SOURCE_VERSION,
63
+ description=f"{_DATASETNAME} source schema",
64
+ schema="source",
65
+ subset_id=_DATASETNAME,
66
+ ),
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
69
+ version=SEACROWD_VERSION,
70
+ description=f"{_DATASETNAME} SEACrowd schema",
71
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
72
+ subset_id=_DATASETNAME,
73
+ ),
74
+ ]
75
+
76
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
77
+
78
+ def _info(self) -> datasets.DatasetInfo:
79
+ # No specific schema from source, we will just reuse the seacrowd schema
80
+ features = schemas.seq_label_features(self.LABEL_CLASSES)
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=features,
84
+ homepage=_HOMEPAGE,
85
+ license=_LICENSE,
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
90
+ """Returns SplitGenerators."""
91
+ data_file = Path(dl_manager.download_and_extract(_URL))
92
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_file})]
93
+
94
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
95
+ """Yield examples as (key, example) tuples"""
96
+
97
+ with open(filepath, encoding="utf-8") as f:
98
+ lines = f.readlines()
99
+
100
+ for idx, line in enumerate(lines):
101
+
102
+ line = line.rstrip("\n")
103
+ tags = self._tokenize(line)
104
+
105
+ split_token = [tag.split("/") for tag in tags if tag]
106
+ tokens = [split[0] for split in split_token]
107
+ labels = [split[1] for split in split_token]
108
+ example = {"id": str(idx), "tokens": tokens, "labels": labels}
109
+
110
+ yield idx, example
111
+
112
+ def _tokenize(self, sentence: str) -> List[str]:
113
+ """Tokenize Myanmar text
114
+
115
+ From the README: https://github.com/ye-kyaw-thu/myPOS/tree/master#word-segmentation
116
+ Important things to point out:
117
+ - Words composed of single or multiple syllables are usually not separated by white space.
118
+ - There are no clear rules for using spaces in Myanmar language.
119
+ - The authors used six rules for word segmentation
120
+ """
121
+ final_tokens = []
122
+
123
+ # Segment via spaces (c.f. "Spaces are used for easier reading and generally put between phrases")
124
+ init_tokens = sentence.split(" ")
125
+ # Segment breakpoints ('|' pipe character) for compount words
126
+ for token in init_tokens:
127
+ final_tokens.extend(token.split("|"))
128
+
129
+ return final_tokens