Datasets:
Update files from the datasets library (from 1.3.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.3.0
- README.md +5 -0
- dataset_infos.json +1 -1
- lst20.py +9 -8
README.md
CHANGED
@@ -47,6 +47,7 @@ task_ids:
|
|
47 |
- [Dataset Curators](#dataset-curators)
|
48 |
- [Licensing Information](#licensing-information)
|
49 |
- [Citation Information](#citation-information)
|
|
|
50 |
|
51 |
## Dataset Description
|
52 |
|
@@ -208,3 +209,7 @@ In both options, please contact Dr. Thepchai Supnithi via [email protected]
|
|
208 |
year={2020}
|
209 |
}
|
210 |
```
|
|
|
|
|
|
|
|
|
|
47 |
- [Dataset Curators](#dataset-curators)
|
48 |
- [Licensing Information](#licensing-information)
|
49 |
- [Citation Information](#citation-information)
|
50 |
+
- [Contributions](#contributions)
|
51 |
|
52 |
## Dataset Description
|
53 |
|
|
|
209 |
year={2020}
|
210 |
}
|
211 |
```
|
212 |
+
|
213 |
+
### Contributions
|
214 |
+
|
215 |
+
Thanks to [@cstorm125](https://github.com/cstorm125) for adding this dataset.
|
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"lst20": {"description": "LST20 Corpus is a dataset for Thai language processing developed by National Electronics and Computer Technology Center (NECTEC), Thailand.\nIt offers five layers of linguistic annotation: word boundaries, POS tagging, named entities, clause boundaries, and sentence boundaries.\nAt a large scale, it consists of 3,164,002 words, 288,020 named entities, 248,181 clauses, and 74,180 sentences, while it is annotated with\n16 distinct POS tags. All 3,745 documents are also annotated with one of 15 news genres. Regarding its sheer size, this dataset is\nconsidered large enough for developing joint neural models for NLP.\nManually download at https://aiforthai.in.th/corpus.php\n", "citation": "@article{boonkwan2020annotation,\n title={The Annotation Guideline of LST20 Corpus},\n author={Boonkwan, Prachya and Luantangsrisuk, Vorapon and Phaholphinyo, Sitthaa and Kriengket, Kanyanat and Leenoi, Dhanon and Phrombut, Charun and Boriboon, Monthika and Kosawat, Krit and Supnithi, Thepchai},\n journal={arXiv preprint arXiv:2008.05055},\n year={2020}\n}\n", "homepage": "https://aiforthai.in.th/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "fname": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 16, "names": ["NN", "VV", "PU", "CC", "PS", "AX", "AV", "FX", "NU", "AJ", "CL", "PR", "NG", "PA", "XX", "IJ"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 31, "names": ["O", "B_BRN", "B_DES", "B_DTM", "B_LOC", "B_MEA", "B_NUM", "B_ORG", "B_PER", "B_TRM", "B_TTL", "I_BRN", "I_DES", "I_DTM", "I_LOC", "I_MEA", "I_NUM", "I_ORG", "I_PER", "I_TRM", "I_TTL", "E_BRN", "E_DES", "E_DTM", "E_LOC", "E_MEA", "E_NUM", "E_ORG", "E_PER", "E_TRM", "E_TTL"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "clause_tags": {"feature": {"num_classes": 4, "names": ["O", "B_CLS", "I_CLS", "E_CLS"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "lst20", "config_name": "lst20", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes":
|
|
|
1 |
+
{"lst20": {"description": "LST20 Corpus is a dataset for Thai language processing developed by National Electronics and Computer Technology Center (NECTEC), Thailand.\nIt offers five layers of linguistic annotation: word boundaries, POS tagging, named entities, clause boundaries, and sentence boundaries.\nAt a large scale, it consists of 3,164,002 words, 288,020 named entities, 248,181 clauses, and 74,180 sentences, while it is annotated with\n16 distinct POS tags. All 3,745 documents are also annotated with one of 15 news genres. Regarding its sheer size, this dataset is\nconsidered large enough for developing joint neural models for NLP.\nManually download at https://aiforthai.in.th/corpus.php\n", "citation": "@article{boonkwan2020annotation,\n title={The Annotation Guideline of LST20 Corpus},\n author={Boonkwan, Prachya and Luantangsrisuk, Vorapon and Phaholphinyo, Sitthaa and Kriengket, Kanyanat and Leenoi, Dhanon and Phrombut, Charun and Boriboon, Monthika and Kosawat, Krit and Supnithi, Thepchai},\n journal={arXiv preprint arXiv:2008.05055},\n year={2020}\n}\n", "homepage": "https://aiforthai.in.th/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "fname": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pos_tags": {"feature": {"num_classes": 16, "names": ["NN", "VV", "PU", "CC", "PS", "AX", "AV", "FX", "NU", "AJ", "CL", "PR", "NG", "PA", "XX", "IJ"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 31, "names": ["O", "B_BRN", "B_DES", "B_DTM", "B_LOC", "B_MEA", "B_NUM", "B_ORG", "B_PER", "B_TRM", "B_TTL", "I_BRN", "I_DES", "I_DTM", "I_LOC", "I_MEA", "I_NUM", "I_ORG", "I_PER", "I_TRM", "I_TTL", "E_BRN", "E_DES", "E_DTM", "E_LOC", "E_MEA", "E_NUM", "E_ORG", "E_PER", "E_TRM", "E_TTL"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "clause_tags": {"feature": {"num_classes": 4, "names": ["O", "B_CLS", "I_CLS", "E_CLS"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "lst20", "config_name": "lst20", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 107725145, "num_examples": 63310, "dataset_name": "lst20"}, "validation": {"name": "validation", "num_bytes": 9646167, "num_examples": 5620, "dataset_name": "lst20"}, "test": {"name": "test", "num_bytes": 8217425, "num_examples": 5250, "dataset_name": "lst20"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 125588737, "size_in_bytes": 125588737}}
|
lst20.py
CHANGED
@@ -188,11 +188,12 @@ class Lst20(datasets.GeneratorBasedBuilder):
|
|
188 |
ner_tags.append(ner_tag)
|
189 |
clause_tags.append(splits[3].rstrip())
|
190 |
# last example
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
|
|
|
188 |
ner_tags.append(ner_tag)
|
189 |
clause_tags.append(splits[3].rstrip())
|
190 |
# last example
|
191 |
+
if tokens:
|
192 |
+
yield guid, {
|
193 |
+
"id": str(guid),
|
194 |
+
"fname": Path(fname).name,
|
195 |
+
"tokens": tokens,
|
196 |
+
"pos_tags": pos_tags,
|
197 |
+
"ner_tags": ner_tags,
|
198 |
+
"clause_tags": clause_tags,
|
199 |
+
}
|