holylovenia commited on
Commit
fba7b0d
·
1 Parent(s): 5e0b03e

Upload keps.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. keps.py +131 -0
keps.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.utils import schemas
7
+ from nusacrowd.utils.common_parser import load_conll_data
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
10
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
11
+
12
+ _DATASETNAME = "keps"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
15
+
16
+ _LANGUAGES = ["ind"]
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @inproceedings{mahfuzh2019improving,
20
+ title={Improving Joint Layer RNN based Keyphrase Extraction by Using Syntactical Features},
21
+ author={Miftahul Mahfuzh, Sidik Soleman, and Ayu Purwarianti},
22
+ booktitle={Proceedings of the 2019 International Conference of Advanced Informatics: Concepts, Theory and Applications (ICAICTA)},
23
+ pages={1--6},
24
+ year={2019},
25
+ organization={IEEE}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ The KEPS dataset (Mahfuzh, Soleman and Purwarianti, 2019) consists of text from Twitter
31
+ discussing banking products and services and is written in the Indonesian language. A phrase
32
+ containing important information is considered a keyphrase. Text may contain one or more
33
+ keyphrases since important phrases can be located at different positions.
34
+ - tokens: a list of string features.
35
+ - seq_label: a list of classification labels, with possible values including O, B, I.
36
+ The labels use Inside-Outside-Beginning (IOB) tagging.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/IndoNLP/indonlu"
40
+
41
+ _LICENSE = "Creative Common Attribution Share-Alike 4.0 International"
42
+
43
+ _URLs = {
44
+ "train": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/keps_keyword-extraction-prosa/train_preprocess.txt",
45
+ "validation": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/keps_keyword-extraction-prosa/valid_preprocess.txt",
46
+ "test": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/keps_keyword-extraction-prosa/test_preprocess.txt",
47
+ }
48
+
49
+ _SUPPORTED_TASKS = [Tasks.KEYWORD_EXTRACTION]
50
+
51
+ _SOURCE_VERSION = "1.0.0"
52
+ _NUSANTARA_VERSION = "1.0.0"
53
+
54
+
55
+ class KepsDataset(datasets.GeneratorBasedBuilder):
56
+ """KEPS is an keyphrase extraction dataset contains about (train=800,valid=200,test=247) sentences, with 3 classes."""
57
+
58
+ label_classes = ["B", "I", "O"]
59
+
60
+ BUILDER_CONFIGS = [
61
+ NusantaraConfig(
62
+ name="keps_source",
63
+ version=datasets.Version(_SOURCE_VERSION),
64
+ description="KEPS source schema",
65
+ schema="source",
66
+ subset_id="keps",
67
+ ),
68
+ NusantaraConfig(
69
+ name="keps_nusantara_seq_label",
70
+ version=datasets.Version(_NUSANTARA_VERSION),
71
+ description="KEPS Nusantara schema",
72
+ schema="nusantara_seq_label",
73
+ subset_id="keps",
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "keps_source"
78
+
79
+ def _info(self):
80
+ print(datasets)
81
+ if self.config.schema == "source":
82
+ features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ke_tag": [datasets.Value("string")]})
83
+ elif self.config.schema == "nusantara_seq_label":
84
+ features = schemas.seq_label_features(self.label_classes)
85
+
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=features,
89
+ homepage=_HOMEPAGE,
90
+ license=_LICENSE,
91
+ citation=_CITATION,
92
+ )
93
+
94
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
95
+ train_tsv_path = Path(dl_manager.download_and_extract(_URLs["train"]))
96
+ validation_tsv_path = Path(dl_manager.download_and_extract(_URLs["validation"]))
97
+ test_tsv_path = Path(dl_manager.download_and_extract(_URLs["test"]))
98
+ data_files = {
99
+ "train": train_tsv_path,
100
+ "validation": validation_tsv_path,
101
+ "test": test_tsv_path,
102
+ }
103
+
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ gen_kwargs={"filepath": data_files["train"]},
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ gen_kwargs={"filepath": data_files["validation"]},
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TEST,
115
+ gen_kwargs={"filepath": data_files["test"]},
116
+ ),
117
+ ]
118
+
119
+ def _generate_examples(self, filepath: Path):
120
+ conll_dataset = load_conll_data(filepath)
121
+
122
+ if self.config.schema == "source":
123
+ for i, row in enumerate(conll_dataset):
124
+ ex = {"index": str(i), "tokens": row["sentence"], "ke_tag": row["label"]}
125
+ yield i, ex
126
+ elif self.config.schema == "nusantara_seq_label":
127
+ for i, row in enumerate(conll_dataset):
128
+ ex = {"id": str(i), "tokens": row["sentence"], "labels": row["label"]}
129
+ yield i, ex
130
+ else:
131
+ raise ValueError(f"Invalid config: {self.config.name}")