Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
resnli / README.md
sileod's picture
Upload README.md with huggingface_hub
6b02a49
metadata
license: cc-by-4.0
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: validation
        path: data/validation-*
      - split: test
        path: data/test-*
dataset_info:
  features:
    - name: premise
      dtype: string
    - name: hypothesis
      dtype: string
    - name: label
      dtype: string
    - name: config
      dtype: string
  splits:
    - name: train
      num_bytes: 4691316
      num_examples: 25232
    - name: validation
      num_bytes: 801878
      num_examples: 4624
    - name: test
      num_bytes: 1224540
      num_examples: 7216
  download_size: 956275
  dataset_size: 6717734

https://github.com/ruixiangcui/WikiResNLI_NatResNLI

@inproceedings{cui-etal-2023-failure,
    title = "What does the Failure to Reason with {``}Respectively{''} in Zero/Few-Shot Settings Tell Us about Language Models?",
    author = "Cui, Ruixiang  and
      Lee, Seolhwa  and
      Hershcovich, Daniel  and
      S{\o}gaard, Anders",
    booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
    month = jul,
    year = "2023",
    address = "Toronto, Canada",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2023.acl-long.489",
    pages = "8786--8800",
    abstract = "Humans can effortlessly understand the coordinate structure of sentences such as {``}Niels Bohr and Kurt Cobain were born in Copenhagen and Seattle, *respectively*{''}. In the context of natural language inference (NLI), we examine how language models (LMs) reason with respective readings (Gawron and Kehler, 2004) from two perspectives: syntactic-semantic and commonsense-world knowledge. We propose a controlled synthetic dataset WikiResNLI and a naturally occurring dataset NatResNLI to encompass various explicit and implicit realizations of {``}respectively{''}. We show that fine-tuned NLI models struggle with understanding such readings without explicit supervision. While few-shot learning is easy in the presence of explicit cues, longer training is required when the reading is evoked implicitly, leaving models to rely on common sense inferences. Furthermore, our fine-grained analysis indicates models fail to generalize across different constructions. To conclude, we demonstrate that LMs still lag behind humans in generalizing to the long tail of linguistic constructions.",
}