Datasets:

Languages:
Burmese
ArXiv:
License:
holylovenia commited on
Commit
123bdb6
1 Parent(s): bf39853

Upload myxnli.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. myxnli.py +143 -0
myxnli.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+ from seacrowd.utils import schemas
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Licenses, Tasks
9
+
10
+
11
+ _CITATION = """
12
+ @misc{myXNLI2023,
13
+ title = "myXNLI",
14
+ author = "akhtet",
15
+ year = "202",
16
+ url = "https://github.com/akhtet/myXNLI",
17
+ }
18
+ """
19
+
20
+ _DATASETNAME = "myxnli"
21
+
22
+ _DESCRIPTION = """
23
+ The myXNLI corpus is a collection of Myanmar language data designed for the Natural Language Inference (NLI) task, which
24
+ originated from the XNLI and MultiNLI English datasets. The 7,500 sentence pairs from the XNLI English development and
25
+ test sets are human-translated into Myanmar. The 392,702 data from the NLI English training data is translated using
26
+ machine translation. In addition, it also extends its scope by adding Myanmar translations to the XNLI 15-language
27
+ parallel corpus, to create a 16-language parallel corpus.
28
+ """
29
+
30
+ _HOMEPAGE = "https://github.com/akhtet/myXNLI"
31
+
32
+ _LANGUAGES = ["mya"]
33
+
34
+ _LICENSE = Licenses.CC_BY_NC_4_0.value
35
+
36
+ _LOCAL = False
37
+
38
+ _URLS = {
39
+ _DATASETNAME: {
40
+ "train": "https://huggingface.co/datasets/akhtet/myXNLI/resolve/main/data/train-00000-of-00001-2614419e00195781.parquet",
41
+ "dev": "https://huggingface.co/datasets/akhtet/myXNLI/resolve/main/data/validation-00000-of-00001-9c168eb31d1d810b.parquet",
42
+ "test": "https://huggingface.co/datasets/akhtet/myXNLI/resolve/main/data/test-00000-of-00001-0fd9f93baf8c9cdb.parquet",
43
+ },
44
+ }
45
+
46
+ _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
47
+
48
+ _SOURCE_VERSION = "1.1.0"
49
+
50
+ _SEACROWD_VERSION = "2024.06.20"
51
+
52
+
53
+ class MyXNLIDataset(datasets.GeneratorBasedBuilder):
54
+ """The myXNLI corpus is a collection of Myanmar language data designed for the Natural Language Inference task."""
55
+
56
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
57
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
58
+
59
+ BUILDER_CONFIGS = [
60
+ SEACrowdConfig(
61
+ name=f"{_DATASETNAME}_source",
62
+ version=SOURCE_VERSION,
63
+ description=f"{_DATASETNAME} source schema",
64
+ schema="source",
65
+ subset_id=_DATASETNAME,
66
+ ),
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_seacrowd_pairs",
69
+ version=SEACROWD_VERSION,
70
+ description=f"{_DATASETNAME} SEACrowd schema",
71
+ schema="seacrowd_pairs",
72
+ subset_id=_DATASETNAME,
73
+ ),
74
+ ]
75
+
76
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
77
+
78
+ def _info(self) -> datasets.DatasetInfo:
79
+ if self.config.schema == "source":
80
+ features = datasets.Features(
81
+ {
82
+ "genre": datasets.Value("string"),
83
+ "label": datasets.ClassLabel(names=["contradiction", "entailment", "neutral"]),
84
+ "sentence1_en": datasets.Value("string"),
85
+ "sentence2_en": datasets.Value("string"),
86
+ "sentence1_my": datasets.Value("string"),
87
+ "sentence2_my": datasets.Value("string"),
88
+ }
89
+ )
90
+
91
+ elif self.config.schema == "seacrowd_pairs":
92
+ features = schemas.pairs_features(["contradiction", "entailment", "neutral"])
93
+
94
+ return datasets.DatasetInfo(
95
+ description=_DESCRIPTION,
96
+ features=features,
97
+ homepage=_HOMEPAGE,
98
+ license=_LICENSE,
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> list[datasets.SplitGenerator]:
103
+ """Returns SplitGenerators."""
104
+ urls = _URLS[_DATASETNAME]
105
+ data_dir = dl_manager.download_and_extract(urls)
106
+
107
+ return [
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.TRAIN,
110
+ gen_kwargs={"filepath": data_dir, "split": "train"},
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ gen_kwargs={"filepath": data_dir, "split": "test"},
115
+ ),
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.VALIDATION,
118
+ gen_kwargs={"filepath": data_dir, "split": "dev"},
119
+ ),
120
+ ]
121
+
122
+ def _generate_examples(self, filepath: Path, split: str) -> tuple[int, dict]:
123
+ if self.config.schema == "source":
124
+ df = pd.read_parquet(filepath[split])
125
+ for i, row in df.iterrows():
126
+ yield i, {
127
+ "genre": row["genre"],
128
+ "label": row["label"],
129
+ "sentence1_en": row["sentence1_en"],
130
+ "sentence2_en": row["sentence2_en"],
131
+ "sentence1_my": row["sentence1_my"],
132
+ "sentence2_my": row["sentence2_my"],
133
+ }
134
+
135
+ elif self.config.schema == "seacrowd_pairs":
136
+ df = pd.read_parquet(filepath[split])
137
+ for i, row in df.iterrows():
138
+ yield i, {
139
+ "id": str(i),
140
+ "text_1": row["sentence1_my"],
141
+ "text_2": row["sentence2_my"],
142
+ "label": row["label"],
143
+ }