Datasets:

ArXiv:
License:
holylovenia commited on
Commit
a361845
1 Parent(s): 049fad8

Upload xnli.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. xnli.py +214 -0
xnli.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Some code referenced from https://huggingface.co/datasets/xnli/blob/main/xnli.py
2
+
3
+ """
4
+ The Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and 2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into 14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, Hindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the corresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations.
5
+ """
6
+
7
+ import csv
8
+ import os
9
+ from pathlib import Path
10
+ from typing import Dict, List, Tuple
11
+
12
+ import datasets
13
+
14
+ from seacrowd.utils import schemas
15
+ from seacrowd.utils.configs import SEACrowdConfig
16
+ from seacrowd.utils.constants import Licenses, Tasks
17
+
18
+ _CITATION = """\
19
+ @InProceedings{conneau2018xnli,
20
+ author = "Conneau, Alexis
21
+ and Rinott, Ruty
22
+ and Lample, Guillaume
23
+ and Williams, Adina
24
+ and Bowman, Samuel R.
25
+ and Schwenk, Holger
26
+ and Stoyanov, Veselin",
27
+ title = "XNLI: Evaluating Cross-lingual Sentence Representations",
28
+ booktitle = "Proceedings of the 2018 Conference on Empirical Methods
29
+ in Natural Language Processing",
30
+ year = "2018",
31
+ publisher = "Association for Computational Linguistics",
32
+ location = "Brussels, Belgium",
33
+ }
34
+ """
35
+
36
+ _DATASETNAME = "xnli"
37
+
38
+ _DESCRIPTION = """\
39
+ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels).
40
+ """
41
+
42
+ _HOMEPAGE = "https://github.com/facebookresearch/XNLI"
43
+
44
+ # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
45
+ _LANGUAGES = ["tha", "vie"]
46
+ _LANGUAGE_MAPPER = {"tha": "th", "vie": "vi"}
47
+
48
+ _LICENSE = Licenses.CC_BY_NC_4_0.value
49
+
50
+ _LOCAL = False
51
+
52
+ _URLS = {
53
+ _DATASETNAME: {
54
+ "train": "https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip",
55
+ "test": "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip",
56
+ }
57
+ }
58
+
59
+ _SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT]
60
+
61
+ _SOURCE_VERSION = "1.1.0"
62
+
63
+ _SEACROWD_VERSION = "2024.06.20"
64
+
65
+
66
+ class XNLIDataset(datasets.GeneratorBasedBuilder):
67
+ """
68
+ XNLI is an evaluation corpus for language transfer and cross-lingual sentence classification in 15 languages.
69
+ In SeaCrowd, we currently only have Thailand and Vietnam Language.
70
+ """
71
+
72
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
74
+
75
+ subsets = ["xnli.tha", "xnli.vie"]
76
+
77
+ BUILDER_CONFIGS = [
78
+ SEACrowdConfig(
79
+ name=f"{sub}_source",
80
+ version=datasets.Version(_SOURCE_VERSION),
81
+ description=f"{sub} source schema",
82
+ schema="source",
83
+ subset_id=f"{sub}",
84
+ )
85
+ for sub in subsets
86
+ ] + [
87
+ SEACrowdConfig(
88
+ name=f"{sub}_seacrowd_pairs",
89
+ version=datasets.Version(_SEACROWD_VERSION),
90
+ description=f"{sub} SEACrowd schema",
91
+ schema="seacrowd_pairs",
92
+ subset_id=f"{sub}",
93
+ )
94
+ for sub in subsets
95
+ ]
96
+
97
+ DEFAULT_CONFIG_NAME = "xnli.vie_source"
98
+ labels = ["contradiction", "entailment", "neutral"]
99
+
100
+ def _info(self) -> datasets.DatasetInfo:
101
+ if self.config.schema == "source":
102
+ features = datasets.Features(
103
+ {
104
+ "premise": datasets.Value("string"),
105
+ "hypothesis": datasets.Value("string"),
106
+ "label": datasets.ClassLabel(names=self.labels),
107
+ }
108
+ )
109
+
110
+ elif self.config.schema == "seacrowd_pairs":
111
+ features = schemas.pairs_features(self.labels)
112
+
113
+ return datasets.DatasetInfo(
114
+ description=_DESCRIPTION,
115
+ features=features,
116
+ homepage=_HOMEPAGE,
117
+ license=_LICENSE,
118
+ citation=_CITATION,
119
+ )
120
+
121
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
122
+ """Returns SplitGenerators."""
123
+
124
+ urls = _URLS[_DATASETNAME]
125
+ data_dir = dl_manager.download_and_extract(urls)
126
+
127
+ xnli_train = os.path.join(data_dir["train"], "XNLI-MT-1.0", "multinli")
128
+ train_data_path = os.path.join(xnli_train, "multinli.train.{}.tsv")
129
+
130
+ xnli_test = os.path.join(data_dir["test"], "XNLI-1.0")
131
+ val_data_path = os.path.join(xnli_test, "xnli.dev.tsv")
132
+ test_data_path = os.path.join(xnli_test, "xnli.test.tsv")
133
+
134
+ lang = self.config.name.split("_")[0].split(".")[-1]
135
+
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.TRAIN,
139
+ gen_kwargs={
140
+ "filepath": train_data_path,
141
+ "split": "train",
142
+ "language": _LANGUAGE_MAPPER[lang],
143
+ },
144
+ ),
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.VALIDATION,
147
+ gen_kwargs={
148
+ "filepath": val_data_path,
149
+ "split": "dev",
150
+ "language": _LANGUAGE_MAPPER[lang],
151
+ },
152
+ ),
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TEST,
155
+ gen_kwargs={
156
+ "filepath": test_data_path,
157
+ "split": "test",
158
+ "language": _LANGUAGE_MAPPER[lang],
159
+ },
160
+ ),
161
+ ]
162
+
163
+ def _generate_examples(self, filepath: Path, split: str, language: str) -> Tuple[int, Dict]:
164
+ """Yields examples as (key, example) tuples."""
165
+
166
+ if self.config.schema == "source":
167
+ if split == "train":
168
+ file = open(filepath.format(language), encoding="utf-8")
169
+ reader = csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
170
+ for row_idx, row in enumerate(reader):
171
+ key = str(row_idx)
172
+ yield key, {
173
+ "premise": row["premise"],
174
+ "hypothesis": row["hypo"],
175
+ "label": row["label"].replace("contradictory", "contradiction"),
176
+ }
177
+ else:
178
+ with open(filepath, encoding="utf-8") as f:
179
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
180
+ for row in reader:
181
+ if row["language"] == language:
182
+ yield row["pairID"], {
183
+ "premise": row["sentence1"],
184
+ "hypothesis": row["sentence2"],
185
+ "label": row["gold_label"],
186
+ }
187
+ elif self.config.schema == "seacrowd_pairs":
188
+ if split == "train":
189
+ file = open(filepath.format(language), encoding="utf-8")
190
+ reader = csv.DictReader(file, delimiter="\t", quoting=csv.QUOTE_NONE)
191
+ for row_idx, row in enumerate(reader):
192
+ yield str(row_idx), {
193
+ "id": str(row_idx),
194
+ "text_1": row["premise"],
195
+ "text_2": row["hypo"],
196
+ "label": row["label"].replace("contradictory", "contradiction"),
197
+ }
198
+ else:
199
+ with open(filepath, encoding="utf-8") as f:
200
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
201
+ skip = set()
202
+ for row in reader:
203
+ if row["language"] == language:
204
+ if row["pairID"] in skip:
205
+ continue
206
+ skip.add(row["pairID"])
207
+ yield row["pairID"], {
208
+ "id": row["pairID"],
209
+ "text_1": row["sentence1"],
210
+ "text_2": row["sentence2"],
211
+ "label": row["gold_label"],
212
+ }
213
+ else:
214
+ raise ValueError(f"Invalid config: {self.config.name}")