holylovenia
commited on
Upload multilexnorm.py with huggingface_hub
Browse files- multilexnorm.py +12 -12
multilexnorm.py
CHANGED
@@ -3,9 +3,9 @@ from typing import Dict, List, Tuple
|
|
3 |
|
4 |
import datasets
|
5 |
|
6 |
-
from
|
7 |
-
from
|
8 |
-
from
|
9 |
|
10 |
_CITATION = """\
|
11 |
@inproceedings{multilexnorm,
|
@@ -42,7 +42,7 @@ _SUPPORTED_TASKS = [Tasks.MULTILEXNORM]
|
|
42 |
|
43 |
_SOURCE_VERSION = "1.0.0"
|
44 |
|
45 |
-
|
46 |
|
47 |
|
48 |
class MultiLexNorm(datasets.GeneratorBasedBuilder):
|
@@ -53,21 +53,21 @@ class MultiLexNorm(datasets.GeneratorBasedBuilder):
|
|
53 |
"""
|
54 |
|
55 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
56 |
-
|
57 |
|
58 |
BUILDER_CONFIGS = [
|
59 |
-
|
60 |
name="multilexnorm_source",
|
61 |
version=_SOURCE_VERSION,
|
62 |
description="multilexnorm source schema",
|
63 |
schema="source",
|
64 |
subset_id="multilexnorm",
|
65 |
),
|
66 |
-
|
67 |
-
name="
|
68 |
-
version=
|
69 |
description="multilexnorm Nusantara schema",
|
70 |
-
schema="
|
71 |
subset_id="multilexnorm",
|
72 |
),
|
73 |
]
|
@@ -86,7 +86,7 @@ class MultiLexNorm(datasets.GeneratorBasedBuilder):
|
|
86 |
}
|
87 |
)
|
88 |
|
89 |
-
elif self.config.schema == "
|
90 |
features = schemas.text2text_features
|
91 |
|
92 |
return datasets.DatasetInfo(
|
@@ -156,7 +156,7 @@ class MultiLexNorm(datasets.GeneratorBasedBuilder):
|
|
156 |
tok.append("")
|
157 |
curSent.append(tok)
|
158 |
|
159 |
-
elif self.config.schema == "
|
160 |
i = 0
|
161 |
for line in open(filepath):
|
162 |
tok = line.strip("\n").split("\t")
|
|
|
3 |
|
4 |
import datasets
|
5 |
|
6 |
+
from seacrowd.utils import schemas
|
7 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
8 |
+
from seacrowd.utils.constants import Tasks
|
9 |
|
10 |
_CITATION = """\
|
11 |
@inproceedings{multilexnorm,
|
|
|
42 |
|
43 |
_SOURCE_VERSION = "1.0.0"
|
44 |
|
45 |
+
_SEACROWD_VERSION = "2024.06.20"
|
46 |
|
47 |
|
48 |
class MultiLexNorm(datasets.GeneratorBasedBuilder):
|
|
|
53 |
"""
|
54 |
|
55 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
56 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
57 |
|
58 |
BUILDER_CONFIGS = [
|
59 |
+
SEACrowdConfig(
|
60 |
name="multilexnorm_source",
|
61 |
version=_SOURCE_VERSION,
|
62 |
description="multilexnorm source schema",
|
63 |
schema="source",
|
64 |
subset_id="multilexnorm",
|
65 |
),
|
66 |
+
SEACrowdConfig(
|
67 |
+
name="multilexnorm_seacrowd_t2t",
|
68 |
+
version=_SEACROWD_VERSION,
|
69 |
description="multilexnorm Nusantara schema",
|
70 |
+
schema="seacrowd_t2t",
|
71 |
subset_id="multilexnorm",
|
72 |
),
|
73 |
]
|
|
|
86 |
}
|
87 |
)
|
88 |
|
89 |
+
elif self.config.schema == "seacrowd_t2t":
|
90 |
features = schemas.text2text_features
|
91 |
|
92 |
return datasets.DatasetInfo(
|
|
|
156 |
tok.append("")
|
157 |
curSent.append(tok)
|
158 |
|
159 |
+
elif self.config.schema == "seacrowd_t2t":
|
160 |
i = 0
|
161 |
for line in open(filepath):
|
162 |
tok = line.strip("\n").split("\t")
|