File size: 5,363 Bytes
9f2a498 2f95f9a 9f2a498 2f95f9a 9f2a498 2f95f9a 9f2a498 2f95f9a 9f2a498 2f95f9a 9f2a498 2f95f9a 9f2a498 2f95f9a 9f2a498 2f95f9a 9f2a498 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
from pathlib import Path
from typing import List
import datasets
from seacrowd.utils import schemas
from seacrowd.utils.common_parser import load_conll_data
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
DEFAULT_SOURCE_VIEW_NAME, Tasks)
_DATASETNAME = "term_a"
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
_LANGUAGES = ["ind"]
_LOCAL = False
_CITATION = """\
@article{winatmoko2019aspect,
title={Aspect and opinion term extraction for hotel reviews using transfer learning and auxiliary labels},
author={Winatmoko, Yosef Ardhito and Septiandri, Ali Akbar and Sutiono, Arie Pratama},
journal={arXiv preprint arXiv:1909.11879},
year={2019}
}
@inproceedings{fernando2019aspect,
title={Aspect and opinion terms extraction using double embeddings and attention mechanism for indonesian hotel reviews},
author={Fernando, Jordhy and Khodra, Masayu Leylia and Septiandri, Ali Akbar},
booktitle={2019 International Conference of Advanced Informatics: Concepts, Theory and Applications (ICAICTA)},
pages={1--6},
year={2019},
organization={IEEE}
}
"""
_DESCRIPTION = """\
TermA is a span-extraction dataset collected from the hotel aggregator platform, AiryRooms
(Septiandri and Sutiono, 2019; Fernando et al.,
2019) consisting of thousands of hotel reviews,each containing a span label for aspect
and sentiment words representing the opinion of the reviewer on the corresponding aspect.
The labels use Inside-Outside-Beginning tagging (IOB) with two kinds of tags, aspect and
sentiment.
"""
_HOMEPAGE = "https://github.com/IndoNLP/indonlu"
_LICENSE = "Creative Common Attribution Share-Alike 4.0 International"
_URLs = {
"train": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/terma_term-extraction-airy/train_preprocess.txt",
"validation": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/terma_term-extraction-airy/valid_preprocess.txt",
"test": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/terma_term-extraction-airy/test_preprocess_masked_label.txt",
}
_SUPPORTED_TASKS = [Tasks.KEYWORD_TAGGING]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"
class BaPOSDataset(datasets.GeneratorBasedBuilder):
"""TermA is a span-extraction dataset containing 3k, 1k, 1k colloquial sentences in train, valid & test respectively of hotel domain with a total of 5 tags."""
label_classes = ["B-ASPECT", "I-ASPECT", "B-SENTIMENT", "I-SENTIMENT", "O"]
BUILDER_CONFIGS = [
SEACrowdConfig(
name="term_a_source",
version=datasets.Version(_SOURCE_VERSION),
description="TermA source schema",
schema="source",
subset_id="term_a",
),
SEACrowdConfig(
name="term_a_seacrowd_seq_label",
version=datasets.Version(_SEACROWD_VERSION),
description="TermA Nusantara schema",
schema="seacrowd_seq_label",
subset_id="term_a",
),
]
DEFAULT_CONFIG_NAME = "term_a_source"
def _info(self):
if self.config.schema == "source":
features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "token_tag": [datasets.Value("string")]})
elif self.config.schema == "seacrowd_seq_label":
features = schemas.seq_label_features(self.label_classes)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
train_tsv_path = Path(dl_manager.download_and_extract(_URLs["train"]))
validation_tsv_path = Path(dl_manager.download_and_extract(_URLs["validation"]))
test_tsv_path = Path(dl_manager.download_and_extract(_URLs["test"]))
data_files = {
"train": train_tsv_path,
"validation": validation_tsv_path,
"test": test_tsv_path,
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": data_files["train"]},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": data_files["validation"]},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_files["test"]},
),
]
def _generate_examples(self, filepath: Path):
conll_dataset = load_conll_data(filepath)
if self.config.schema == "source":
for i, row in enumerate(conll_dataset):
ex = {"index": str(i), "tokens": row["sentence"], "token_tag": row["label"]}
yield i, ex
elif self.config.schema == "seacrowd_seq_label":
for i, row in enumerate(conll_dataset):
ex = {"id": str(i), "tokens": row["sentence"], "labels": row["label"]}
yield i, ex
else:
raise ValueError(f"Invalid config: {self.config.name}")
|