Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Languages:
Thai
Size:
100K - 1M
Tags:
word-tokenization
License:
Update files from the datasets library (from 1.7.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.7.0
- README.md +2 -1
- best2009.py +3 -3
README.md
CHANGED
@@ -17,6 +17,7 @@ task_categories:
|
|
17 |
- structure-prediction
|
18 |
task_ids:
|
19 |
- structure-prediction-other-word-tokenization
|
|
|
20 |
---
|
21 |
|
22 |
# Dataset Card for `best2009`
|
@@ -24,7 +25,7 @@ task_ids:
|
|
24 |
## Table of Contents
|
25 |
- [Dataset Description](#dataset-description)
|
26 |
- [Dataset Summary](#dataset-summary)
|
27 |
-
- [Supported Tasks](#supported-tasks-and-leaderboards)
|
28 |
- [Languages](#languages)
|
29 |
- [Dataset Structure](#dataset-structure)
|
30 |
- [Data Instances](#data-instances)
|
|
|
17 |
- structure-prediction
|
18 |
task_ids:
|
19 |
- structure-prediction-other-word-tokenization
|
20 |
+
paperswithcode_id: null
|
21 |
---
|
22 |
|
23 |
# Dataset Card for `best2009`
|
|
|
25 |
## Table of Contents
|
26 |
- [Dataset Description](#dataset-description)
|
27 |
- [Dataset Summary](#dataset-summary)
|
28 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
29 |
- [Languages](#languages)
|
30 |
- [Dataset Structure](#dataset-structure)
|
31 |
- [Data Instances](#data-instances)
|
best2009.py
CHANGED
@@ -114,9 +114,9 @@ class Best2009(datasets.GeneratorBasedBuilder):
|
|
114 |
]
|
115 |
|
116 |
def _generate_examples(self, filepath, split):
|
117 |
-
for fname in sorted(Path(filepath).rglob("*.txt")):
|
118 |
with open(fname, encoding="utf-8") as f:
|
119 |
-
for
|
120 |
chars = []
|
121 |
char_types = []
|
122 |
is_beginnings = []
|
@@ -130,7 +130,7 @@ class Best2009(datasets.GeneratorBasedBuilder):
|
|
130 |
char_types.append(self._CHAR_TYPE_FLATTEN.get(token[i], "o"))
|
131 |
is_beginning = 1 if i == 0 else 0
|
132 |
is_beginnings.append(is_beginning)
|
133 |
-
yield
|
134 |
"fname": fname.name,
|
135 |
"char": chars,
|
136 |
"char_type": char_types,
|
|
|
114 |
]
|
115 |
|
116 |
def _generate_examples(self, filepath, split):
|
117 |
+
for file_idx, fname in enumerate(sorted(Path(filepath).rglob("*.txt"))):
|
118 |
with open(fname, encoding="utf-8") as f:
|
119 |
+
for line_idx, line in enumerate(f):
|
120 |
chars = []
|
121 |
char_types = []
|
122 |
is_beginnings = []
|
|
|
130 |
char_types.append(self._CHAR_TYPE_FLATTEN.get(token[i], "o"))
|
131 |
is_beginning = 1 if i == 0 else 0
|
132 |
is_beginnings.append(is_beginning)
|
133 |
+
yield f"{file_idx}_{line_idx}", {
|
134 |
"fname": fname.name,
|
135 |
"char": chars,
|
136 |
"char_type": char_types,
|