Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
4e0feca
·
verified ·
1 Parent(s): 6b2c6e0

Upload icon.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. icon.py +216 -0
icon.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from abc import ABC, abstractmethod
3
+ from pathlib import Path
4
+ from typing import Dict, List, Optional, Tuple
5
+
6
+ import datasets
7
+ import nltk
8
+ from nltk import Tree
9
+ from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
10
+
11
+ from seacrowd.utils import schemas
12
+ from seacrowd.utils.configs import SEACrowdConfig
13
+ from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
14
+ DEFAULT_SOURCE_VIEW_NAME, Licenses,
15
+ Tasks)
16
+
17
+ _DATASETNAME = "icon"
18
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
19
+ _UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
20
+ _CITATION = """\
21
+ @inproceedings{lim2023icon,
22
+ title={ICON: Building a Large-Scale Benchmark Constituency Treebank for the Indonesian Language},
23
+ author={Lim, Ee Suan and Leong, Wei Qi and Nguyen, Ngan Thanh and Adhista, Dea and Kng, Wei Ming and Tjh, William Chandra and Purwarianti, Ayu},
24
+ booktitle={Proceedings of the 21st International Workshop on Treebanks and Linguistic Theories (TLT, GURT/SyntaxFest 2023)},
25
+ pages={37--53},
26
+ year={2023}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ ICON (Indonesian CONstituency treebank) is a large-scale high-quality constituency treebank (10000 sentences)
32
+ for the Indonesian language, sourced from Wikipedia and news data from Tempo, spanning the period from 1971 to 2016.
33
+ The annotation guidelines were formulated with the Penn Treebank POS tagging and bracketing guidelines as a reference,
34
+ with additional adaptations to account for the characteristics of the Indonesian language.
35
+ """
36
+
37
+ _HOMEPAGE = "https://github.com/aisingapore/seacorenlp-data/tree/main/id/constituency"
38
+
39
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
40
+ _LANGUAGES = ["ind"]
41
+ _LOCAL = False
42
+ _URLS = {
43
+ "train": "https://raw.githubusercontent.com/aisingapore/seacorenlp-data/main/id/constituency/train.txt",
44
+ "validation": "https://raw.githubusercontent.com/aisingapore/seacorenlp-data/main/id/constituency/dev.txt",
45
+ "test": "https://raw.githubusercontent.com/aisingapore/seacorenlp-data/main/id/constituency/test.txt",
46
+ }
47
+
48
+ _SUPPORTED_TASKS = [Tasks.CONSTITUENCY_PARSING]
49
+ _SOURCE_VERSION = "1.0.0"
50
+ _SEACROWD_VERSION = "2024.06.20"
51
+
52
+
53
+ class ICONDataset(datasets.GeneratorBasedBuilder):
54
+
55
+ BUILDER_CONFIGS = [
56
+ SEACrowdConfig(name=f"{_DATASETNAME}_source", version=datasets.Version(_SOURCE_VERSION), description=_DESCRIPTION, schema="source", subset_id=f"{_DATASETNAME}"),
57
+ SEACrowdConfig(name=f"{_DATASETNAME}_seacrowd_tree", version=datasets.Version(_SEACROWD_VERSION), description=_DESCRIPTION, schema="seacrowd_tree", subset_id=f"{_DATASETNAME}"),
58
+ ]
59
+
60
+ DEFAULT_CONFIG_NAME = "icon_source"
61
+
62
+ def _info(self) -> datasets.DatasetInfo:
63
+ if self.config.schema == "source":
64
+ features = datasets.Features(
65
+ {
66
+ "index": datasets.Value("string"), # index
67
+ "tree": datasets.Value("string"), # nltk.tree
68
+ "sentence": datasets.Value("string"), # bracketed sentence tree
69
+ "words": datasets.Sequence(datasets.Value("string")), # words
70
+ "POS": datasets.Sequence(datasets.Value("string")), # pos-tags
71
+ }
72
+ )
73
+ elif self.config.schema == "seacrowd_tree":
74
+ features = schemas.tree_features
75
+
76
+ else:
77
+ raise ValueError(f"Invalid config: {self.config.name}")
78
+
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=features,
82
+ homepage=_HOMEPAGE,
83
+ license=_LICENSE,
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
88
+
89
+ train_txt = Path(dl_manager.download_and_extract(_URLS["train"]))
90
+ dev_txt = Path(dl_manager.download_and_extract(_URLS["validation"]))
91
+ test_txt = Path(dl_manager.download_and_extract(_URLS["test"]))
92
+
93
+ data_dir = {
94
+ "train": train_txt,
95
+ "validation": dev_txt,
96
+ "test": test_txt,
97
+ }
98
+
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TRAIN,
102
+ gen_kwargs={
103
+ "filepath": data_dir["train"],
104
+ "split": "train",
105
+ },
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ gen_kwargs={
110
+ "filepath": data_dir["test"],
111
+ "split": "test",
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.VALIDATION,
116
+ gen_kwargs={
117
+ "filepath": data_dir["validation"],
118
+ "split": "dev",
119
+ },
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
124
+ """Yields examples as (key, example) tuples."""
125
+ trees = nltk_load_trees(filepath)
126
+ if self.config.schema == "source":
127
+ for idx, tree in enumerate(trees):
128
+ ex = {"index": str(idx), "tree": tree.tree, "words": tree.words, "sentence": tree.bra_sent, "POS": [itm[1] for itm in tree.pos()]}
129
+ yield idx, ex
130
+ if self.config.schema == "seacrowd_tree":
131
+ for idx, tree in enumerate(trees):
132
+ ex = get_node_char_indices_with_ids(tree.tree, str(idx))
133
+ yield idx, ex
134
+
135
+
136
+ class BaseInputExample(ABC):
137
+ """Parser input for a single sentence (abstract interface)."""
138
+
139
+ words: List[str]
140
+ space_after: List[bool]
141
+ tree: Optional[nltk.Tree]
142
+
143
+ @abstractmethod
144
+ def leaves(self) -> Optional[List[str]]:
145
+ """Returns leaves to use in the parse tree."""
146
+ pass
147
+
148
+ @abstractmethod
149
+ def pos(self) -> Optional[List[Tuple[str, str]]]:
150
+ """Returns a list of (leaf, part-of-speech tag) tuples."""
151
+ pass
152
+
153
+
154
+ @dataclasses.dataclass
155
+ class ParsingExample(BaseInputExample):
156
+ """A single parse tree and sentence."""
157
+
158
+ words: List[str]
159
+ bra_sent: str
160
+ tree: Optional[nltk.Tree] = None
161
+ _pos: Optional[List[Tuple[str, str]]] = None
162
+
163
+ def leaves(self) -> Optional[List[str]]:
164
+ return self.tree.leaves() if self.tree else None
165
+
166
+ def pos(self) -> Optional[List[Tuple[str, str]]]:
167
+ return self.tree.pos() if self.tree else self._pos
168
+
169
+ def without_gold_annotations(self) -> "ParsingExample":
170
+ return dataclasses.replace(self, tree=None, _pos=self.pos())
171
+
172
+
173
+ def nltk_load_trees(const_path: str) -> List[ParsingExample]:
174
+ reader = BracketParseCorpusReader("", [const_path])
175
+ trees = reader.parsed_sents()
176
+ with open(const_path, "r") as filein:
177
+ bracketed_sentences = [itm.strip() for itm in filein.readlines()]
178
+ sents = [tree.leaves() for tree in trees]
179
+ assert len(trees) == len(sents) == len(bracketed_sentences), f"Number Mismatched: {len(trees)} vs {len(bracketed_sentences)}"
180
+ treebank = [ParsingExample(tree=tree, words=words, bra_sent=bra_sent) for tree, bra_sent, words, in zip(trees, bracketed_sentences, sents)]
181
+ for example in treebank:
182
+ assert len(example.words) == len(example.leaves()), "Token count mismatch."
183
+ return treebank
184
+
185
+
186
+ def get_node_char_indices_with_ids(tree, sent_id):
187
+ def traverse_tree(subtree, start_index):
188
+ nonlocal node_id
189
+ current_id = node_id
190
+ node_id += 1
191
+ node_text = " ".join(subtree.leaves())
192
+ end_index = start_index + len(node_text)
193
+
194
+ # Record the current node
195
+ node_data = {
196
+ "id": f"{sent_id}_{current_id}",
197
+ "type": subtree.label(),
198
+ "text": node_text,
199
+ "offsets": [start_index, end_index],
200
+ "subnodes": [],
201
+ }
202
+ node_indices.append(node_data)
203
+
204
+ for child in subtree:
205
+ if isinstance(child, Tree):
206
+ child_id = traverse_tree(child, start_index)
207
+ node_data["subnodes"].append(child_id)
208
+ start_index += len(" ".join(child.leaves())) + 1
209
+ return f"{sent_id}_{current_id}"
210
+
211
+ node_indices = []
212
+ node_id = 0
213
+ traverse_tree(tree, 0)
214
+ sentence = " ".join(tree.leaves())
215
+ passage = {"id": "p" + sent_id, "type": None, "text": tree.leaves(), "offsets": [0, len(sentence)]}
216
+ return {"id": "s" + sent_id, "passage": passage, "nodes": node_indices}