clef2024_checkthat_task1_en / clef24_dataset_en.py
vsty's picture
Update clef24_dataset_en.py
f5f5bc2 verified
raw
history blame
2.36 kB
"""Multilang Dataset loading script."""
from datasets import DatasetDict, DatasetInfo, BuilderConfig, Version, GeneratorBasedBuilder
from datasets import SplitGenerator, Split, Features, Value
import os
_DESCRIPTION = """
This dataset includes multilingual data for language classification tasks across several languages.
"""
_CITATION = """\
@InProceedings{huggingface:multilang_dataset,
title = {Multilingual Text Dataset},
authors = {Your Name},
year = {2024}
}
"""
_LICENSE = "Your dataset's license here."
class MultilangDataset(GeneratorBasedBuilder):
"""A multilingual text dataset."""
BUILDER_CONFIGS = [
BuilderConfig(name="multilang_dataset", version=Version("1.0.0"), description="Multilingual dataset for text classification."),
]
DEFAULT_CONFIG_NAME = "multilang_dataset" # Default configuration name.
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"Sentence_id": Value("string"),
"Text": Value("string"),
"class_label": Value("string"),
}),
supervised_keys=("Text", "class_label"),
homepage="https://www.example.com",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Assumes your dataset is located in "."
data_dir = os.path.abspath(".")
splits = {"train": Split.TRAIN, "dev": Split.VALIDATION, "dev-test": Split.TEST}
return [
SplitGenerator(
name=splits[split],
gen_kwargs={
"filepath": os.path.join(data_dir, f"{split}.tsv"),
"split": splits[split]
},
)
for split in splits.keys()
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
if id_ == 0: # Optionally skip header
continue
cols = row.strip().split('\t')
yield f"{split}_{id_}", {
"sentence_id": cols[0],
"sentence": cols[1],
"label": cols[2],
}