File size: 5,101 Bytes
123bdb6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
from pathlib import Path
import datasets
import pandas as pd
from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Licenses, Tasks
_CITATION = """
@misc{myXNLI2023,
title = "myXNLI",
author = "akhtet",
year = "202",
url = "https://github.com/akhtet/myXNLI",
}
"""
_DATASETNAME = "myxnli"
_DESCRIPTION = """
The myXNLI corpus is a collection of Myanmar language data designed for the Natural Language Inference (NLI) task, which
originated from the XNLI and MultiNLI English datasets. The 7,500 sentence pairs from the XNLI English development and
test sets are human-translated into Myanmar. The 392,702 data from the NLI English training data is translated using
machine translation. In addition, it also extends its scope by adding Myanmar translations to the XNLI 15-language
parallel corpus, to create a 16-language parallel corpus.
"""
_HOMEPAGE = "https://github.com/akhtet/myXNLI"
_LANGUAGES = ["mya"]
_LICENSE = Licenses.CC_BY_NC_4_0.value
_LOCAL = False
_URLS = {
_DATASETNAME: {
"train": "https://huggingface.co/datasets/akhtet/myXNLI/resolve/main/data/train-00000-of-00001-2614419e00195781.parquet",
"dev": "https://huggingface.co/datasets/akhtet/myXNLI/resolve/main/data/validation-00000-of-00001-9c168eb31d1d810b.parquet",
"test": "https://huggingface.co/datasets/akhtet/myXNLI/resolve/main/data/test-00000-of-00001-0fd9f93baf8c9cdb.parquet",
},
}
_SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
_SOURCE_VERSION = "1.1.0"
_SEACROWD_VERSION = "2024.06.20"
class MyXNLIDataset(datasets.GeneratorBasedBuilder):
"""The myXNLI corpus is a collection of Myanmar language data designed for the Natural Language Inference task."""
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
BUILDER_CONFIGS = [
SEACrowdConfig(
name=f"{_DATASETNAME}_source",
version=SOURCE_VERSION,
description=f"{_DATASETNAME} source schema",
schema="source",
subset_id=_DATASETNAME,
),
SEACrowdConfig(
name=f"{_DATASETNAME}_seacrowd_pairs",
version=SEACROWD_VERSION,
description=f"{_DATASETNAME} SEACrowd schema",
schema="seacrowd_pairs",
subset_id=_DATASETNAME,
),
]
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
def _info(self) -> datasets.DatasetInfo:
if self.config.schema == "source":
features = datasets.Features(
{
"genre": datasets.Value("string"),
"label": datasets.ClassLabel(names=["contradiction", "entailment", "neutral"]),
"sentence1_en": datasets.Value("string"),
"sentence2_en": datasets.Value("string"),
"sentence1_my": datasets.Value("string"),
"sentence2_my": datasets.Value("string"),
}
)
elif self.config.schema == "seacrowd_pairs":
features = schemas.pairs_features(["contradiction", "entailment", "neutral"])
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]:
"""Returns SplitGenerators."""
urls = _URLS[_DATASETNAME]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": data_dir, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_dir, "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": data_dir, "split": "dev"},
),
]
def _generate_examples(self, filepath: Path, split: str) -> tuple[int, dict]:
if self.config.schema == "source":
df = pd.read_parquet(filepath[split])
for i, row in df.iterrows():
yield i, {
"genre": row["genre"],
"label": row["label"],
"sentence1_en": row["sentence1_en"],
"sentence2_en": row["sentence2_en"],
"sentence1_my": row["sentence1_my"],
"sentence2_my": row["sentence2_my"],
}
elif self.config.schema == "seacrowd_pairs":
df = pd.read_parquet(filepath[split])
for i, row in df.iterrows():
yield i, {
"id": str(i),
"text_1": row["sentence1_my"],
"text_2": row["sentence2_my"],
"label": row["label"],
}
|