File size: 5,597 Bytes
87dce2c 38ac005 87dce2c 73783a4 87dce2c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This 🤗 dataset provides data for the GenBench CBT task 'The ICL consistency test' (see https://github.com/GenBench/genbench_cbt/tree/main/src/genbench/tasks/icl_consistency_test).
The ICL consistency test measures the consistency of LLM predictions on the same data points across many different equivalent prompting setups.
The score in the associated metric (Cohen's kappa) can be understood as a measure of a model's prediction consistency in the face of task-irrelevant information.
For an easy evaluation of any 🤗 models, we refer to the code provided in the GenBench task. For in-depth information on the task, we refer to the associated
publications (Weber et al., 2023,2023) and the respective GenBench doc.md (https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md).
Evaluation on the relevant metrics can be done via the example_evaluation.py script in the GenBench repository.
- Weber, L., Bruni, E., & Hupkes, D. (2023, December). Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning.
In Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL) (pp. 294-313).
- Weber, L., Bruni, E., & Hupkes, D. (2023). The ICL Consistency Test. arXiv preprint arXiv:2312.04945.
"""
import json
import os
import datasets
_CITATION = """\
@inproceedings{weber2023mind,
title={Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning},
author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke},
booktitle={Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
pages={294--313},
year={2023}
},
@article{weber2023icl,
title={The ICL Consistency Test},
author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke},
journal={arXiv preprint arXiv:2312.04945},
year={2023}
}
"""
_DESCRIPTION = """\
In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any 🤗 model.
The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups.
"""
_HOMEPAGE = "https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md"
_LICENSE = ""
_URL = "https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/" #"https://huggingface.co/datasets/LucasWeber/icl_consistency_test/blob/main/"
_URLS = {
"anli": _URL + "genbench_all_anli.jsonl",
"mnli": _URL + "genbench_all_glue%2Bmnli.jsonl",
}
class ICLConsistencyTest(datasets.GeneratorBasedBuilder):
"""
In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any 🤗 model.
The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups.
"""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="anli", version=VERSION, description="This part of the ICL consistency test covers data points from ANLI"),
datasets.BuilderConfig(name="mnli", version=VERSION, description="This part of the ICL consistency test covers data points from MNLI"),
]
def _info(self):
features = datasets.Features(
{
"input": datasets.Value("string"),
"target": datasets.Value("string"),
"target_numeric": datasets.Value("int32"),
"data_ID": datasets.Value("int32"),
"setup_ID": datasets.Value("string")
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("input", "target"),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_dir,
"split": "test"},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
yield key, {
"input": data["input"],
"target": data["target"],
"target_numeric": data["target_numeric"],
"data_ID": data["data_ID"],
"setup_ID": data["setup_ID"],
}
|