|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The Adversarial NLI Corpus.""" |
|
|
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{nie2019adversarial, |
|
title={Adversarial NLI: A New Benchmark for Natural Language Understanding}, |
|
author={Nie, Yixin |
|
and Williams, Adina |
|
and Dinan, Emily |
|
and Bansal, Mohit |
|
and Weston, Jason |
|
and Kiela, Douwe}, |
|
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
year = "2020", |
|
publisher = "Association for Computational Linguistics", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Adversarial Natural Language Inference (ANLI) is a new large-scale NLI benchmark dataset, |
|
The dataset is collected via an iterative, adversarial human-and-model-in-the-loop procedure. |
|
ANLI is much more difficult than its predecessors including SNLI and MNLI. |
|
It contains three rounds. Each round has train/dev/test splits. |
|
""" |
|
|
|
class ANLIConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for ANLI.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for ANLI. |
|
|
|
Args: |
|
. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(ANLIConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs) |
|
|
|
|
|
class ANLI(datasets.GeneratorBasedBuilder): |
|
"""ANLI: The ANLI Dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
ANLIConfig( |
|
name=bias_amplified_splits_type, |
|
description="", |
|
) for bias_amplified_splits_type in ["minority_examples", "partial_input"] |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"round": datasets.Value("string"), |
|
"uid": datasets.Value("string"), |
|
"premise": datasets.Value("string"), |
|
"hypothesis": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]), |
|
"reason": datasets.Value("string"), |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://github.com/facebookresearch/anli/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _vocab_text_gen(self, filepath): |
|
for _, ex in self._generate_examples(filepath): |
|
yield " ".join([ex["premise"], ex["hypothesis"]]) |
|
|
|
def _split_generators(self, dl_manager): |
|
return [ |
|
datasets.SplitGenerator(name="train.biased", gen_kwargs={"filepath": dl_manager.download(os.path.join(self.config.name, "train.biased.jsonl"))}), |
|
datasets.SplitGenerator(name="train.anti_biased", gen_kwargs={"filepath": dl_manager.download(os.path.join(self.config.name, "train.anti_biased.jsonl"))}), |
|
datasets.SplitGenerator(name="validation.biased", gen_kwargs={"filepath": dl_manager.download(os.path.join(self.config.name, "validation.biased.jsonl"))}), |
|
datasets.SplitGenerator(name="validation.anti_biased", gen_kwargs={"filepath": dl_manager.download(os.path.join(self.config.name, "validation.anti_biased.jsonl"))}), |
|
datasets.SplitGenerator(name="test.biased", gen_kwargs={"filepath": dl_manager.download(os.path.join(self.config.name, "test.biased.jsonl"))}), |
|
datasets.SplitGenerator(name="test.anti_biased", gen_kwargs={"filepath": dl_manager.download(os.path.join(self.config.name, "test.anti_biased.jsonl"))}) |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Generate examples. |
|
|
|
Args: |
|
filepath: a string |
|
|
|
Yields: |
|
dictionaries containing "premise", "hypothesis" and "label" strings |
|
""" |
|
for idx, line in enumerate(open(filepath, "rb")): |
|
if line is not None: |
|
line = line.strip().decode("utf-8") |
|
item = json.loads(line) |
|
|
|
reason_text = "" |
|
if "reason" in item: |
|
reason_text = item["reason"] |
|
|
|
yield f'{item["round"]}-{item["uid"]}', { |
|
"round": item["round"], |
|
"uid": item["uid"], |
|
"premise": item["premise"], |
|
"hypothesis": item["hypothesis"], |
|
"label": item["label"], |
|
"reason": reason_text, |
|
} |
|
|