Datasets:
Tasks:
Text Classification
Sub-tasks:
natural-language-inference
Languages:
Japanese
Size:
10K - 100K
License:
import datasets as ds | |
import pandas as pd | |
_CITATION = """\ | |
@InProceedings{yanaka-EtAl:2021:blackbox, | |
author = {Yanaka, Hitomi and Mineshima, Koji}, | |
title = {Assessing the Generalization Capacity of Pre-trained Language Models through Japanese Adversarial Natural Language Inference}, | |
booktitle = {Proceedings of the 2021 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP (BlackboxNLP2021)}, | |
year = {2021}, | |
} | |
""" | |
_DESCRIPTION = "The JaNLI (Japanese Adversarial NLI) dataset, inspired by the English HANS dataset, is designed to necessitate an understanding of Japanese linguistic phenomena and to illuminate the vulnerabilities of models." | |
_HOMEPAGE = "https://github.com/verypluming/JaNLI" | |
_LICENSE = "CC BY-SA 4.0" | |
_DOWNLOAD_URL = "https://raw.githubusercontent.com/verypluming/JaNLI/main/janli.tsv" | |
class JaNLIDataset(ds.GeneratorBasedBuilder): | |
VERSION = ds.Version("1.0.0") | |
DEFAULT_CONFIG_NAME = "base" | |
BUILDER_CONFIGS = [ | |
ds.BuilderConfig( | |
name="base", | |
version=VERSION, | |
description="A version adopting the column names of a typical NLI dataset.", | |
), | |
ds.BuilderConfig( | |
name="original", | |
version=VERSION, | |
description="The original version retaining the unaltered column names.", | |
), | |
] | |
def _info(self) -> ds.DatasetInfo: | |
if self.config.name == "base": | |
features = ds.Features( | |
{ | |
"id": ds.Value("int64"), | |
"premise": ds.Value("string"), | |
"hypothesis": ds.Value("string"), | |
"label": ds.ClassLabel(names=["entailment", "non-entailment"]), | |
"heuristics": ds.Value("string"), | |
"number_of_NPs": ds.Value("int32"), | |
"semtag": ds.Value("string"), | |
} | |
) | |
elif self.config.name == "original": | |
features = ds.Features( | |
{ | |
"id": ds.Value("int64"), | |
"sentence_A_Ja": ds.Value("string"), | |
"sentence_B_Ja": ds.Value("string"), | |
"entailment_label_Ja": ds.ClassLabel(names=["entailment", "non-entailment"]), | |
"heuristics": ds.Value("string"), | |
"number_of_NPs": ds.Value("int32"), | |
"semtag": ds.Value("string"), | |
} | |
) | |
return ds.DatasetInfo( | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
features=features, | |
) | |
def _split_generators(self, dl_manager: ds.DownloadManager): | |
data_path = dl_manager.download_and_extract(_DOWNLOAD_URL) | |
df: pd.DataFrame = pd.read_table(data_path, header=0, sep="\t", index_col=0) | |
df["id"] = df.index | |
if self.config.name == "base": | |
df = df.rename( | |
columns={ | |
"sentence_A_Ja": "premise", | |
"sentence_B_Ja": "hypothesis", | |
"entailment_label_Ja": "label", | |
} | |
) | |
return [ | |
ds.SplitGenerator( | |
name=ds.Split.TRAIN, | |
gen_kwargs={"df": df[df["split"] == "train"]}, | |
), | |
ds.SplitGenerator( | |
name=ds.Split.TEST, | |
gen_kwargs={"df": df[df["split"] == "test"]}, | |
), | |
] | |
def _generate_examples(self, df: pd.DataFrame): | |
df = df.drop("split", axis=1) | |
for i, row in enumerate(df.to_dict("records")): | |
yield i, row | |