Upload id_hsd_nofaaulia.py with huggingface_hub
Browse files- id_hsd_nofaaulia.py +13 -13
id_hsd_nofaaulia.py
CHANGED
@@ -21,9 +21,9 @@ import datasets
|
|
21 |
import pandas as pd
|
22 |
from sklearn.model_selection import train_test_split
|
23 |
|
24 |
-
from
|
25 |
-
from
|
26 |
-
from
|
27 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
28 |
|
29 |
_CITATION = """\
|
@@ -64,27 +64,27 @@ _URLS = {
|
|
64 |
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
|
65 |
|
66 |
_SOURCE_VERSION = "1.0.0"
|
67 |
-
|
68 |
|
69 |
class IdHSDNofaaulia(datasets.GeneratorBasedBuilder):
|
70 |
"""Indonesian hate speech detection for long article."""
|
71 |
|
72 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
73 |
-
|
74 |
|
75 |
BUILDER_CONFIGS = [
|
76 |
-
|
77 |
name="id_hsd_nofaaulia_source",
|
78 |
version=SOURCE_VERSION,
|
79 |
description="id_hsd_nofaaulia source schema",
|
80 |
schema="source",
|
81 |
subset_id="id_hsd_nofaaulia",
|
82 |
),
|
83 |
-
|
84 |
-
name="
|
85 |
-
version=
|
86 |
description="id_hsd_nofaaulia Nusantara schema",
|
87 |
-
schema="
|
88 |
subset_id="id_hsd_nofaaulia",
|
89 |
),
|
90 |
]
|
@@ -95,7 +95,7 @@ class IdHSDNofaaulia(datasets.GeneratorBasedBuilder):
|
|
95 |
|
96 |
if self.config.schema == "source":
|
97 |
features = datasets.Features({"index": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Value("string")})
|
98 |
-
elif self.config.schema == "
|
99 |
features = schemas.text_features(["0", "1"])
|
100 |
|
101 |
return datasets.DatasetInfo(
|
@@ -121,7 +121,7 @@ class IdHSDNofaaulia(datasets.GeneratorBasedBuilder):
|
|
121 |
target = "label"
|
122 |
|
123 |
# The split follows the implementation below
|
124 |
-
# https://github.com/IndoNLP/nusa-crowd/blob/master/
|
125 |
# test_size=0.1, random_state=42
|
126 |
# tested locally using :
|
127 |
# scikit-learn 1.1.2
|
@@ -163,7 +163,7 @@ class IdHSDNofaaulia(datasets.GeneratorBasedBuilder):
|
|
163 |
}
|
164 |
yield row.Index, ex
|
165 |
|
166 |
-
elif self.config.schema == "
|
167 |
for row in df.itertuples():
|
168 |
ex = {
|
169 |
"id": str(row.Index),
|
|
|
21 |
import pandas as pd
|
22 |
from sklearn.model_selection import train_test_split
|
23 |
|
24 |
+
from seacrowd.utils import schemas
|
25 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
26 |
+
from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
|
27 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
28 |
|
29 |
_CITATION = """\
|
|
|
64 |
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
|
65 |
|
66 |
_SOURCE_VERSION = "1.0.0"
|
67 |
+
_SEACROWD_VERSION = "2024.06.20"
|
68 |
|
69 |
class IdHSDNofaaulia(datasets.GeneratorBasedBuilder):
|
70 |
"""Indonesian hate speech detection for long article."""
|
71 |
|
72 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
73 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
74 |
|
75 |
BUILDER_CONFIGS = [
|
76 |
+
SEACrowdConfig(
|
77 |
name="id_hsd_nofaaulia_source",
|
78 |
version=SOURCE_VERSION,
|
79 |
description="id_hsd_nofaaulia source schema",
|
80 |
schema="source",
|
81 |
subset_id="id_hsd_nofaaulia",
|
82 |
),
|
83 |
+
SEACrowdConfig(
|
84 |
+
name="id_hsd_nofaaulia_seacrowd_text",
|
85 |
+
version=SEACROWD_VERSION,
|
86 |
description="id_hsd_nofaaulia Nusantara schema",
|
87 |
+
schema="seacrowd_text",
|
88 |
subset_id="id_hsd_nofaaulia",
|
89 |
),
|
90 |
]
|
|
|
95 |
|
96 |
if self.config.schema == "source":
|
97 |
features = datasets.Features({"index": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Value("string")})
|
98 |
+
elif self.config.schema == "seacrowd_text":
|
99 |
features = schemas.text_features(["0", "1"])
|
100 |
|
101 |
return datasets.DatasetInfo(
|
|
|
121 |
target = "label"
|
122 |
|
123 |
# The split follows the implementation below
|
124 |
+
# https://github.com/IndoNLP/nusa-crowd/blob/master/seacrowd/utils/schemas/pairs.py
|
125 |
# test_size=0.1, random_state=42
|
126 |
# tested locally using :
|
127 |
# scikit-learn 1.1.2
|
|
|
163 |
}
|
164 |
yield row.Index, ex
|
165 |
|
166 |
+
elif self.config.schema == "seacrowd_text":
|
167 |
for row in df.itertuples():
|
168 |
ex = {
|
169 |
"id": str(row.Index),
|