update
Browse files- UTS_Text.py +43 -13
UTS_Text.py
CHANGED
@@ -12,9 +12,29 @@ _CITATION = """\
|
|
12 |
"""
|
13 |
|
14 |
_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_Text_v1/resolve/main/data/"
|
15 |
-
TRAIN_FILE = "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
class UTSText(datasets.GeneratorBasedBuilder):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
def _info(self):
|
19 |
return datasets.DatasetInfo(
|
20 |
description=_DESCRIPTION,
|
@@ -30,18 +50,28 @@ class UTSText(datasets.GeneratorBasedBuilder):
|
|
30 |
|
31 |
def _split_generators(self, dl_manager):
|
32 |
"""Returns SplitGenerators."""
|
33 |
-
|
34 |
-
train_file = dl_manager.download(os.path.join(
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
"train": train_file,
|
39 |
-
# "dev": dev_file,
|
40 |
-
# "test": test_file,
|
41 |
-
}
|
42 |
splits = [
|
43 |
-
datasets.SplitGenerator(
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
]
|
47 |
return splits
|
|
|
|
|
|
|
|
|
|
|
|
12 |
"""
|
13 |
|
14 |
_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_Text_v1/resolve/main/data/"
|
15 |
+
TRAIN_FILE = "train.txt"
|
16 |
+
VALIDATION_FILE = "validation.txt"
|
17 |
+
TEST_FILE = "test.txt"
|
18 |
+
|
19 |
+
|
20 |
+
class UTSTextConfig(datasets.BuilderConfig):
|
21 |
+
"""BuilderConfig"""
|
22 |
+
|
23 |
+
def __init__(self, **kwargs):
|
24 |
+
super(UTSTextConfig, self).__init__(**kwargs)
|
25 |
|
26 |
class UTSText(datasets.GeneratorBasedBuilder):
|
27 |
+
"""UTS Word Tokenize datasets"""
|
28 |
+
VERSION = datasets.Version("1.0.0")
|
29 |
+
BUILDER_CONFIGS = [
|
30 |
+
UTSTextConfig(
|
31 |
+
name="small", version=VERSION, description="UTS_Text Small"),
|
32 |
+
UTSTextConfig(
|
33 |
+
name="base", version=VERSION, description="UTS_Text Base"),
|
34 |
+
UTSTextConfig(
|
35 |
+
name="large", version=VERSION, description="UTS_Text Large")
|
36 |
+
]
|
37 |
+
|
38 |
def _info(self):
|
39 |
return datasets.DatasetInfo(
|
40 |
description=_DESCRIPTION,
|
|
|
50 |
|
51 |
def _split_generators(self, dl_manager):
|
52 |
"""Returns SplitGenerators."""
|
53 |
+
subset_folder = self.config.name
|
54 |
+
train_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TRAIN_FILE))
|
55 |
+
validation_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, VALIDATION_FILE))
|
56 |
+
test_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TEST_FILE))
|
57 |
+
|
|
|
|
|
|
|
|
|
58 |
splits = [
|
59 |
+
datasets.SplitGenerator(
|
60 |
+
name=datasets.Split.TRAIN,
|
61 |
+
gen_kwargs={"filepath": train_file}
|
62 |
+
),
|
63 |
+
datasets.SplitGenerator(
|
64 |
+
name=datasets.Split.TRAIN,
|
65 |
+
gen_kwargs={"filepath": validation_file}
|
66 |
+
),
|
67 |
+
datasets.SplitGenerator(
|
68 |
+
name=datasets.Split.TRAIN,
|
69 |
+
gen_kwargs={"filepath": test_file}
|
70 |
+
),
|
71 |
]
|
72 |
return splits
|
73 |
+
|
74 |
+
def _generate_examples(self, filepath):
|
75 |
+
with open(filepath, encoding="utf-8") as f:
|
76 |
+
for line in f:
|
77 |
+
yield line.strip()
|