LogicNLI / README.md
sileod's picture
Upload README.md with huggingface_hub
ac9d30a verified
metadata
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: validation
        path: data/validation-*
      - split: test
        path: data/test-*
dataset_info:
  features:
    - name: premise
      dtype: string
    - name: hypothesis
      dtype: string
    - name: label
      dtype: string
  splits:
    - name: train
      num_bytes: 19241020
      num_examples: 16000
    - name: validation
      num_bytes: 2359422
      num_examples: 2000
    - name: test
      num_bytes: 2368137
      num_examples: 2000
  download_size: 713871
  dataset_size: 23968579

Dataset Card for "LogicNLI"

@inproceedings{tian-etal-2021-diagnosing,
    title = "Diagnosing the First-Order Logical Reasoning Ability Through {L}ogic{NLI}",
    author = "Tian, Jidong  and
      Li, Yitian  and
      Chen, Wenqing  and
      Xiao, Liqiang  and
      He, Hao  and
      Jin, Yaohui",
    editor = "Moens, Marie-Francine  and
      Huang, Xuanjing  and
      Specia, Lucia  and
      Yih, Scott Wen-tau",
    booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
    month = nov,
    year = "2021",
    address = "Online and Punta Cana, Dominican Republic",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.emnlp-main.303",
    doi = "10.18653/v1/2021.emnlp-main.303",
    pages = "3738--3747",
    abstract = "Recently, language models (LMs) have achieved significant performance on many NLU tasks, which has spurred widespread interest for their possible applications in the scientific and social area. However, LMs have faced much criticism of whether they are truly capable of reasoning in NLU. In this work, we propose a diagnostic method for first-order logic (FOL) reasoning with a new proposed benchmark, LogicNLI. LogicNLI is an NLI-style dataset that effectively disentangles the target FOL reasoning from commonsense inference and can be used to diagnose LMs from four perspectives: accuracy, robustness, generalization, and interpretability. Experiments on BERT, RoBERTa, and XLNet, have uncovered the weaknesses of these LMs on FOL reasoning, which motivates future exploration to enhance the reasoning ability.",
}

https://github.com/omnilabNLP/LogicNLI