holylovenia commited on
Commit
cc2da11
·
1 Parent(s): 746da61

Upload sampiran.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. sampiran.py +142 -0
sampiran.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+ from nusacrowd.utils.constants import Tasks
19
+ from nusacrowd.utils import schemas
20
+
21
+ import datasets
22
+
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+
25
+ # TODO: Add BibTeX citation
26
+ _CITATION = """\
27
+ @inproceedings{siallagan2022sampiran,
28
+ title={Poetry Generation for Indonesian Pantun: Comparison Between SeqGAN and GPT-2},
29
+ author={Emmanuella Anggi Siallagan and Ika Alfina},
30
+ booktitle={Jurnal Ilmu Komputer dan Informasi (Journal of Computer Science and Information) Vol 1x No x February 2023 (Minor Revision)},
31
+ year={2023},
32
+ }
33
+ """
34
+
35
+ _DATASETNAME = "sampiran"
36
+
37
+
38
+ _DESCRIPTION = """\
39
+ Sampiran is a dataset for pantun generation. It consists of 7.8K Indonesian pantun, collected from various sources (online).
40
+ Pantun is a traditional Malay poem consisting of four lines: two lines of deliverance and two lines of message.
41
+ This dataset filtered the gathered Pantun to follow the general rules of Pantun; four lines with ABAB rhyme and eight to twelve syllables per line.
42
+ """
43
+
44
+ _LANGUAGES = ["ind"]
45
+ _LOCAL = False
46
+ _HOMEPAGE = "https://github.com/ir-nlp-csui/sampiran"
47
+ _LICENSE = "AGPL-3.0"
48
+
49
+ _URLS = "https://raw.githubusercontent.com/ir-nlp-csui/sampiran/main/sampiran.txt"
50
+
51
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
52
+
53
+ _SOURCE_VERSION = "1.0.0"
54
+
55
+ _NUSANTARA_VERSION = "1.0.0"
56
+
57
+
58
+ class SampiranDataset(datasets.GeneratorBasedBuilder):
59
+ """Sampiran is a dataset for pantun generation. It consists of 7.8K Indonesian pantun,
60
+ collected from various sources (online)."""
61
+
62
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
63
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
64
+
65
+ BUILDER_CONFIGS = [
66
+ NusantaraConfig(
67
+ name="sampiran_source",
68
+ version=SOURCE_VERSION,
69
+ description="sampiran source schema",
70
+ schema="source",
71
+ subset_id="sampiran",
72
+ ),
73
+ NusantaraConfig(
74
+ name="sampiran_nusantara_ssp",
75
+ version=NUSANTARA_VERSION,
76
+ description="sampiran Nusantara schema",
77
+ schema="nusantara_ssp",
78
+ subset_id="sampiran",
79
+ ),
80
+ ]
81
+
82
+ DEFAULT_CONFIG_NAME = "sampiran_source"
83
+
84
+ def _info(self) -> datasets.DatasetInfo:
85
+
86
+ if self.config.schema == "source":
87
+ features = datasets.Features(
88
+ {
89
+ "id": datasets.Value("string"),
90
+ "pantun": datasets.Value("string"),
91
+ }
92
+ )
93
+ elif self.config.schema == "nusantara_ssp":
94
+ # e.g. features = schemas.kb_features
95
+ # TODO: Choose your nusantara schema here
96
+ features = schemas.self_supervised_pretraining.features
97
+
98
+ return datasets.DatasetInfo(
99
+ description=_DESCRIPTION,
100
+ features=features,
101
+ homepage=_HOMEPAGE,
102
+ license=_LICENSE,
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(
107
+ self, dl_manager: datasets.DownloadManager
108
+ ) -> List[datasets.SplitGenerator]:
109
+ """Returns SplitGenerators."""
110
+ filepath = Path(dl_manager.download(_URLS))
111
+
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs={"filepath": filepath},
116
+ ),
117
+ ]
118
+
119
+ def _read_data(self, filepath: Path) -> List[Dict]:
120
+ """Reads the data from the source file and returns a list of dicts."""
121
+
122
+ def _generate_examples(self, filepath: Path, split: str = None) -> Tuple[int, Dict]:
123
+ """Yields examples as (key, example) tuples."""
124
+ if self.config.schema != "source" and self.config.schema != "nusantara_ssp":
125
+ raise ValueError(f"Invalid config schema: {self.config.schema}")
126
+
127
+ # Read the file line by line
128
+
129
+ if self.config.name == "sampiran_source":
130
+ with open(filepath, encoding="utf-8") as f:
131
+ for id_, row in enumerate(f):
132
+ ex = {
133
+ "id": str(id_),
134
+ "pantun": str(row).rstrip(),
135
+ }
136
+ yield id_, ex
137
+
138
+ elif self.config.name == "sampiran_nusantara_ssp":
139
+ with open(filepath, encoding="utf-8") as f:
140
+ for id_, row in enumerate(f):
141
+ ex = {"id": str(id_), "text": str(row).rstrip()}
142
+ yield id_, ex