Datasets:

Languages:
Indonesian
ArXiv:
holylovenia commited on
Commit
9f2a498
·
1 Parent(s): b5379a5

Upload term_a.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. term_a.py +135 -0
term_a.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.utils import schemas
7
+ from nusacrowd.utils.common_parser import load_conll_data
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
10
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
11
+
12
+ _DATASETNAME = "term_a"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
15
+
16
+ _LANGUAGES = ["ind"]
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @article{winatmoko2019aspect,
20
+ title={Aspect and opinion term extraction for hotel reviews using transfer learning and auxiliary labels},
21
+ author={Winatmoko, Yosef Ardhito and Septiandri, Ali Akbar and Sutiono, Arie Pratama},
22
+ journal={arXiv preprint arXiv:1909.11879},
23
+ year={2019}
24
+ }
25
+ @inproceedings{fernando2019aspect,
26
+ title={Aspect and opinion terms extraction using double embeddings and attention mechanism for indonesian hotel reviews},
27
+ author={Fernando, Jordhy and Khodra, Masayu Leylia and Septiandri, Ali Akbar},
28
+ booktitle={2019 International Conference of Advanced Informatics: Concepts, Theory and Applications (ICAICTA)},
29
+ pages={1--6},
30
+ year={2019},
31
+ organization={IEEE}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ TermA is a span-extraction dataset collected from the hotel aggregator platform, AiryRooms
37
+ (Septiandri and Sutiono, 2019; Fernando et al.,
38
+ 2019) consisting of thousands of hotel reviews,each containing a span label for aspect
39
+ and sentiment words representing the opinion of the reviewer on the corresponding aspect.
40
+ The labels use Inside-Outside-Beginning tagging (IOB) with two kinds of tags, aspect and
41
+ sentiment.
42
+ """
43
+
44
+ _HOMEPAGE = "https://github.com/IndoNLP/indonlu"
45
+
46
+ _LICENSE = "Creative Common Attribution Share-Alike 4.0 International"
47
+
48
+ _URLs = {
49
+ "train": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/terma_term-extraction-airy/train_preprocess.txt",
50
+ "validation": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/terma_term-extraction-airy/valid_preprocess.txt",
51
+ "test": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/terma_term-extraction-airy/test_preprocess_masked_label.txt",
52
+ }
53
+
54
+ _SUPPORTED_TASKS = [Tasks.KEYWORD_TAGGING]
55
+
56
+ _SOURCE_VERSION = "1.0.0"
57
+ _NUSANTARA_VERSION = "1.0.0"
58
+
59
+
60
+ class BaPOSDataset(datasets.GeneratorBasedBuilder):
61
+ """TermA is a span-extraction dataset containing 3k, 1k, 1k colloquial sentences in train, valid & test respectively of hotel domain with a total of 5 tags."""
62
+
63
+ label_classes = ["B-ASPECT", "I-ASPECT", "B-SENTIMENT", "I-SENTIMENT", "O"]
64
+
65
+ BUILDER_CONFIGS = [
66
+ NusantaraConfig(
67
+ name="term_a_source",
68
+ version=datasets.Version(_SOURCE_VERSION),
69
+ description="TermA source schema",
70
+ schema="source",
71
+ subset_id="term_a",
72
+ ),
73
+ NusantaraConfig(
74
+ name="term_a_nusantara_seq_label",
75
+ version=datasets.Version(_NUSANTARA_VERSION),
76
+ description="TermA Nusantara schema",
77
+ schema="nusantara_seq_label",
78
+ subset_id="term_a",
79
+ ),
80
+ ]
81
+
82
+ DEFAULT_CONFIG_NAME = "term_a_source"
83
+
84
+ def _info(self):
85
+ if self.config.schema == "source":
86
+ features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "token_tag": [datasets.Value("string")]})
87
+ elif self.config.schema == "nusantara_seq_label":
88
+ features = schemas.seq_label_features(self.label_classes)
89
+
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=features,
93
+ homepage=_HOMEPAGE,
94
+ license=_LICENSE,
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
99
+ train_tsv_path = Path(dl_manager.download_and_extract(_URLs["train"]))
100
+ validation_tsv_path = Path(dl_manager.download_and_extract(_URLs["validation"]))
101
+ test_tsv_path = Path(dl_manager.download_and_extract(_URLs["test"]))
102
+ data_files = {
103
+ "train": train_tsv_path,
104
+ "validation": validation_tsv_path,
105
+ "test": test_tsv_path,
106
+ }
107
+
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={"filepath": data_files["train"]},
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.VALIDATION,
115
+ gen_kwargs={"filepath": data_files["validation"]},
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ gen_kwargs={"filepath": data_files["test"]},
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, filepath: Path):
124
+ conll_dataset = load_conll_data(filepath)
125
+
126
+ if self.config.schema == "source":
127
+ for i, row in enumerate(conll_dataset):
128
+ ex = {"index": str(i), "tokens": row["sentence"], "token_tag": row["label"]}
129
+ yield i, ex
130
+ elif self.config.schema == "nusantara_seq_label":
131
+ for i, row in enumerate(conll_dataset):
132
+ ex = {"id": str(i), "tokens": row["sentence"], "labels": row["label"]}
133
+ yield i, ex
134
+ else:
135
+ raise ValueError(f"Invalid config: {self.config.name}")