Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
csv
Languages:
English
Size:
10K - 100K
License:
File size: 2,364 Bytes
457b47f b376881 0f9c4ba b376881 457b47f b376881 457b47f f5f5bc2 457b47f b376881 457b47f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
"""Multilang Dataset loading script."""
from datasets import DatasetDict, DatasetInfo, BuilderConfig, Version, GeneratorBasedBuilder
from datasets import SplitGenerator, Split, Features, Value
import os
_DESCRIPTION = """
This dataset includes multilingual data for language classification tasks across several languages.
"""
_CITATION = """\
@InProceedings{huggingface:multilang_dataset,
title = {Multilingual Text Dataset},
authors = {Your Name},
year = {2024}
}
"""
_LICENSE = "Your dataset's license here."
class MultilangDataset(GeneratorBasedBuilder):
"""A multilingual text dataset."""
BUILDER_CONFIGS = [
BuilderConfig(name="multilang_dataset", version=Version("1.0.0"), description="Multilingual dataset for text classification."),
]
DEFAULT_CONFIG_NAME = "multilang_dataset" # Default configuration name.
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"Sentence_id": Value("string"),
"Text": Value("string"),
"class_label": Value("string"),
}),
supervised_keys=("Text", "class_label"),
homepage="https://www.example.com",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Assumes your dataset is located in "."
data_dir = os.path.abspath(".")
splits = {"train": Split.TRAIN, "dev": Split.VALIDATION, "dev-test": Split.TEST}
return [
SplitGenerator(
name=splits[split],
gen_kwargs={
"filepath": os.path.join(data_dir, f"{split}.tsv"),
"split": splits[split]
},
)
for split in splits.keys()
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
if id_ == 0: # Optionally skip header
continue
cols = row.strip().split('\t')
yield f"{split}_{id_}", {
"sentence_id": cols[0],
"sentence": cols[1],
"label": cols[2],
}
|