holylovenia commited on
Commit
20cdc6b
·
verified ·
1 Parent(s): 4956e59

Upload identifikasi_bahasa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. identifikasi_bahasa.py +136 -0
identifikasi_bahasa.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from pathlib import Path
16
+ from typing import Dict, List, Tuple
17
+
18
+ import datasets
19
+ import pandas as pd
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """\
26
+ @article{Tuhenay2021,
27
+ title = {Perbandingan Klasifikasi Bahasa Menggunakan Metode Naïve Bayes Classifier (NBC) Dan Support Vector Machine (SVM)},
28
+ volume = {4},
29
+ ISSN = {2656-1948},
30
+ url = {http://dx.doi.org/10.33387/jiko.v4i2.2958},
31
+ DOI = {10.33387/jiko.v4i2.2958},
32
+ number = {2},
33
+ journal = {JIKO (Jurnal Informatika dan Komputer)},
34
+ publisher = {LPPM Universitas Khairun},
35
+ author = {Tuhenay, Deglorians},
36
+ year = {2021},
37
+ month = aug,
38
+ pages = {105-111}
39
+ }
40
+ """
41
+
42
+ _DATASETNAME = "identifikasi_bahasa"
43
+
44
+ _DESCRIPTION = """\
45
+ The identifikasi-bahasa dataset includes text samples in Indonesian, Ambonese, and Javanese. \
46
+ Each entry is comprised of cleantext, representing the sentence content, and a label identifying the language. \
47
+ The manual input process involved grouping the data by language categories, \
48
+ with labels for language identification and cleantext representing sentence content. The dataset, excluding punctuation and numbers, \
49
+ consists of a minimum of 3,000 Ambonese, 10,000 Javanese, \
50
+ and 3,500 Indonesian language entries, meeting the research's minimum standard for effective language identification.
51
+ """
52
+
53
+ _HOMEPAGE = "https://github.com/joanitolopo/identifikasi-bahasa"
54
+ _LANGUAGES = ["ind", "jav", "abs"]
55
+
56
+ _LICENSE = Licenses.APACHE_2_0.value
57
+ _LOCAL = False
58
+
59
+ _URLS = {
60
+ _DATASETNAME: "https://github.com/joanitolopo/identifikasi-bahasa/raw/main/DataKlasifikasi.xlsx",
61
+ }
62
+
63
+ _SUPPORTED_TASKS = [Tasks.LANGUAGE_IDENTIFICATION]
64
+ _SOURCE_VERSION = "1.0.0"
65
+ _SEACROWD_VERSION = "2024.06.20"
66
+ _TAGS = ["Ambon", "Indo", "Jawa"]
67
+
68
+
69
+ class IdentifikasiBahasaDataset(datasets.GeneratorBasedBuilder):
70
+ """The "identifikasi-bahasa" dataset, manually grouped by language, \
71
+ contains labeled Indonesian, Ambonese, and Javanese text entries, excluding \
72
+ punctuation and numbers, with a minimum of 3,000 Ambonese, 10,000 Javanese, \
73
+ and 3,500 Indonesian entries for effective language identification."""
74
+
75
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
76
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
77
+ SEACROWD_SCHEMA_NAME = "text"
78
+
79
+ BUILDER_CONFIGS = [
80
+ SEACrowdConfig(
81
+ name=f"{_DATASETNAME}_source",
82
+ version=SOURCE_VERSION,
83
+ description=f"{_DATASETNAME} source schema",
84
+ schema="source",
85
+ subset_id=_DATASETNAME,
86
+ ),
87
+ SEACrowdConfig(
88
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
89
+ version=SEACROWD_VERSION,
90
+ description=f"{_DATASETNAME} SEACrowd schema",
91
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
92
+ subset_id=_DATASETNAME,
93
+ ),
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+ if self.config.schema == "source":
100
+ features = datasets.Features({"cleanText": datasets.Value("string"), "label": datasets.Value("string")})
101
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
102
+ features = schemas.text_features(_TAGS)
103
+
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=features,
107
+ homepage=_HOMEPAGE,
108
+ license=_LICENSE,
109
+ citation=_CITATION,
110
+ )
111
+
112
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
113
+ """Returns SplitGenerators."""
114
+ urls = _URLS[_DATASETNAME]
115
+ data_dir = dl_manager.download(urls)
116
+ return [
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TRAIN,
119
+ gen_kwargs={
120
+ "filepath": data_dir,
121
+ "split": "train",
122
+ },
123
+ )
124
+ ]
125
+
126
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
127
+ """Yields examples as (key, example) tuples."""
128
+ dataset = pd.read_excel(filepath)
129
+
130
+ if self.config.schema == "source":
131
+ for i, row in dataset.iterrows():
132
+ yield i, {"cleanText": row["cleanText"], "label": row["label"]}
133
+
134
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
135
+ for i, row in dataset.iterrows():
136
+ yield i, {"id": i, "text": row["cleanText"], "label": row["label"]}