|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
|
This π€ dataset provides data for the GenBench CBT task 'The ICL consistency test' (see https://github.com/GenBench/genbench_cbt/tree/main/src/genbench/tasks/icl_consistency_test). |
|
The ICL consistency test measures the consistency of LLM predictions on the same data points across many different equivalent prompting setups. |
|
The score in the associated metric (Cohen's kappa) can be understood as a measure of a model's prediction consistency in the face of task-irrelevant information. |
|
|
|
For an easy evaluation of any π€ models, we refer to the code provided in the GenBench task. For in-depth information on the task, we refer to the associated |
|
publications (Weber et al., 2023,2023) and the respective GenBench doc.md (https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md). |
|
|
|
Evaluation on the relevant metrics can be done via the example_evaluation.py script in the GenBench repository. |
|
|
|
- Weber, L., Bruni, E., & Hupkes, D. (2023, December). Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning. |
|
In Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL) (pp. 294-313). |
|
- Weber, L., Bruni, E., & Hupkes, D. (2023). The ICL Consistency Test. arXiv preprint arXiv:2312.04945. |
|
|
|
""" |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{weber2023mind, |
|
title={Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning}, |
|
author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke}, |
|
booktitle={Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)}, |
|
pages={294--313}, |
|
year={2023} |
|
}, |
|
@article{weber2023icl, |
|
title={The ICL Consistency Test}, |
|
author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke}, |
|
journal={arXiv preprint arXiv:2312.04945}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any π€ model. |
|
The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md" |
|
|
|
_LICENSE = "" |
|
|
|
_URL = "https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/" |
|
_URLS = { |
|
"anli": _URL + "genbench_all_anli.jsonl", |
|
"mnli": _URL + "genbench_all_glue%2Bmnli.jsonl", |
|
} |
|
|
|
|
|
class ICLConsistencyTest(datasets.GeneratorBasedBuilder): |
|
""" |
|
In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any π€ model. |
|
The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups. |
|
""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="anli", version=VERSION, description="This part of the ICL consistency test covers data points from ANLI"), |
|
datasets.BuilderConfig(name="mnli", version=VERSION, description="This part of the ICL consistency test covers data points from MNLI"), |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"input": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"target_numeric": datasets.Value("int32"), |
|
"data_ID": datasets.Value("int32"), |
|
"setup_ID": datasets.Value("string") |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=("input", "target"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS[self.config.name] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": data_dir, |
|
"split": "test"}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
print(filepath) |
|
with open(filepath, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
yield key, { |
|
"input": data["input"], |
|
"target": data["target"], |
|
"target_numeric": data["target_numeric"], |
|
"data_ID": data["data_ID"], |
|
"setup_ID": data["setup_ID"], |
|
} |
|
|