File size: 4,409 Bytes
a3ad0b2
 
 
 
 
 
9c5d078
 
 
a3ad0b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c5d078
a3ad0b2
 
 
 
 
 
 
9c5d078
a3ad0b2
 
9c5d078
a3ad0b2
 
 
 
 
 
9c5d078
 
 
a3ad0b2
9c5d078
a3ad0b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c5d078
a3ad0b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c5d078
a3ad0b2
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
from pathlib import Path
from typing import Dict, List, Tuple

import datasets
import pandas as pd

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Tasks

_CITATION = """\
@article{hidayatullah2020attention,
  title={Attention-based cnn-bilstm for dialect identification on javanese text},
  author={Hidayatullah, Ahmad Fathan and Cahyaningtyas, Siwi and Pamungkas, Rheza Daffa},
  journal={Kinetik: Game Technology, Information System, Computer Network, Computing, Electronics, and Control},
  pages={317--324},
  year={2020}
}
"""

_LANGUAGES = ["ind"]  # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
_LOCAL = False

_DATASETNAME = "jadi_ide"

_DESCRIPTION = """\
The JaDi-Ide dataset is a Twitter dataset for Javanese dialect identification, containing 16,498 
data samples. The dialect is classified into `Standard Javanese`, `Ngapak Javanese`, and `East 
Javanese` dialects.
"""

_HOMEPAGE = "https://github.com/fathanick/Javanese-Dialect-Identification-from-Twitter-Data"
_LICENSE = "Unknown"
_URLS = {
    _DATASETNAME: "https://github.com/fathanick/Javanese-Dialect-Identification-from-Twitter-Data/raw/main/Update 16K_Dataset.xlsx",
}
# TODO check supported tasks
_SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"


class JaDi_Ide(datasets.GeneratorBasedBuilder):
    """The JaDi-Ide dataset is a Twitter dataset for Javanese dialect identification, containing 16,498 
    data samples."""

    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
    SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name="jadi_ide_source",
            version=SOURCE_VERSION,
            description="JaDi-Ide source schema",
            schema="source",
            subset_id="jadi_ide",
        ),
        SEACrowdConfig(
            name="jadi_ide_seacrowd_text",
            version=SEACROWD_VERSION,
            description="JaDi-Ide Nusantara schema",
            schema="seacrowd_text",
            subset_id="jadi_ide",
        ),
    ]

    DEFAULT_CONFIG_NAME = "jadi_ide_source"

    def _info(self) -> datasets.DatasetInfo:
        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "id": datasets.Value("string"), 
                    "text": datasets.Value("string"), 
                    "label": datasets.Value("string")
                }
            )
        elif self.config.schema == "seacrowd_text":
            features = schemas.text_features(["Jawa Timur", "Jawa Standar", "Jawa Ngapak"])
   

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        """Returns SplitGenerators."""
        # Dataset does not have predetermined split, putting all as TRAIN
        urls = _URLS[_DATASETNAME]
        base_dir = Path(dl_manager.download(urls))
        data_files = {"train": base_dir}

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": data_files["train"],
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
        """Yields examples as (key, example) tuples."""
        df = pd.read_excel(filepath)
        df.columns = ["id", "text", "label"]

        if self.config.schema == "source":
            for idx, row in enumerate(df.itertuples()):
                ex = {
                    "id": str(idx),
                    "text": row.text,
                    "label": row.label,
                }
                yield idx, ex

        elif self.config.schema == "seacrowd_text":
            for idx, row in enumerate(df.itertuples()):
                ex = {
                    "id": str(idx),
                    "text": row.text,
                    "label": row.label,
                }
                yield idx, ex
        else:
            raise ValueError(f"Invalid config: {self.config.name}")