|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Blimp Metric.""" |
|
|
|
import datasets |
|
import evaluate |
|
import numpy as np |
|
import torch |
|
from evaluate import logging |
|
from torch.nn import CrossEntropyLoss |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
_CITATION = """\ |
|
@article{warstadt2020blimp, |
|
author = {Warstadt, Alex and Parrish, Alicia and Liu, Haokun and Mohananey, Anhad and Peng, Wei and Wang, Sheng-Fu and Bowman, Samuel R.}, |
|
title = {BLiMP: The Benchmark of Linguistic Minimal Pairs for English}, |
|
journal = {Transactions of the Association for Computational Linguistics}, |
|
volume = {8}, |
|
number = {}, |
|
pages = {377-392}, |
|
year = {2020}, |
|
doi = {10.1162/tacl\_a\_00321}, |
|
URL = {https://doi.org/10.1162/tacl_a_00321}, |
|
eprint = {https://doi.org/10.1162/tacl_a_00321}, |
|
abstract = { We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs—that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4\%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands. } |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
BLiMP is a challenge set for evaluating what language models (LMs) know about major grammatical phenomena in English. |
|
BLiMP consists of 67 sub-datasets, each containing 1000 minimal pairs isolating specific contrasts in syntax, morphology, or semantics. |
|
The data is automatically generated according to expert-crafted grammars. Aggregate human agreement with the labels is 96.4%. |
|
We use BLiMP to evaluate an n-gram LM, LSTM LM, GPT-2, and Transformer-XL. |
|
|
|
For more info see https://github.com/alexwarstadt/blimp. |
|
""" |
|
|
|
_KWARGS_DESCRIPTION = """ |
|
Args: |
|
model_id (str): model used for calculating Blimp |
|
batch_size (int): the batch size to run texts through the model. Defaults to 16. |
|
device (str): device to run on, defaults to 'cuda' when available |
|
Returns: |
|
blimp: dictionary containing the blimp scores for each of the 67 sub-datasets, as well as the overall accuracy. |
|
An LM’s overall accuracy on BLiMP is simply the proportion of the 67,000 minimal pairs in which the model assigns a higher probability to the acceptable sentence. |
|
Examples: |
|
TODO: examples. |
|
""" |
|
|
|
|
|
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) |
|
class Perplexity(evaluate.Metric): |
|
def _info(self): |
|
return evaluate.MetricInfo( |
|
module_type="metric", |
|
description=_DESCRIPTION, |
|
citation=_CITATION, |
|
inputs_description=_KWARGS_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"predictions": datasets.Value("string"), |
|
} |
|
), |
|
reference_urls=[ |
|
"https://github.com/alexwarstadt/blimp", |
|
"https://huggingface.co/datasets/nyu-mll/blimp", |
|
], |
|
) |
|
|
|
def _compute( |
|
self, |
|
predictions, |
|
model_id, |
|
batch_size: int = 16, |
|
add_start_token: bool = True, |
|
device=None, |
|
max_length=None, |
|
): |
|
if device is not None: |
|
assert device in ["gpu", "cpu", "cuda", "mps"], ( |
|
"device should be either gpu, cpu or mps." |
|
) |
|
if device == "gpu": |
|
device = "cuda" |
|
else: |
|
device = ( |
|
"cuda" |
|
if torch.cuda.is_available() |
|
else ("mps" if torch.mps.is_available() else "cpu") |
|
) |
|
|
|
model = AutoModelForCausalLM.from_pretrained(model_id) |
|
model = model.to(device) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
|
|
|
|
|
|
|
if tokenizer.pad_token is None and batch_size > 1: |
|
existing_special_tokens = list( |
|
tokenizer.special_tokens_map_extended.values() |
|
) |
|
|
|
assert len(existing_special_tokens) > 0, ( |
|
"If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." |
|
) |
|
|
|
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]}) |
|
|
|
if add_start_token and max_length: |
|
|
|
assert tokenizer.bos_token is not None, ( |
|
"Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" |
|
) |
|
max_tokenized_len = max_length - 1 |
|
else: |
|
max_tokenized_len = max_length |
|
|
|
encodings = tokenizer( |
|
predictions, |
|
add_special_tokens=False, |
|
padding=True, |
|
truncation=True if max_tokenized_len else False, |
|
max_length=max_tokenized_len, |
|
return_tensors="pt", |
|
return_attention_mask=True, |
|
).to(device) |
|
|
|
encoded_texts = encodings["input_ids"] |
|
attn_masks = encodings["attention_mask"] |
|
|
|
|
|
if add_start_token: |
|
assert torch.all(torch.ge(attn_masks.sum(1), 1)), ( |
|
"Each input text must be at least one token long." |
|
) |
|
else: |
|
assert torch.all(torch.ge(attn_masks.sum(1), 2)), ( |
|
"When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." |
|
) |
|
|
|
ppls = [] |
|
loss_fct = CrossEntropyLoss(reduction="none") |
|
|
|
for start_index in logging.tqdm(range(0, len(encoded_texts), batch_size)): |
|
end_index = min(start_index + batch_size, len(encoded_texts)) |
|
encoded_batch = encoded_texts[start_index:end_index] |
|
attn_mask = attn_masks[start_index:end_index] |
|
|
|
if add_start_token: |
|
bos_tokens_tensor = torch.tensor( |
|
[[tokenizer.bos_token_id]] * encoded_batch.size(dim=0) |
|
).to(device) |
|
encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1) |
|
attn_mask = torch.cat( |
|
[ |
|
torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to( |
|
device |
|
), |
|
attn_mask, |
|
], |
|
dim=1, |
|
) |
|
|
|
labels = encoded_batch |
|
|
|
with torch.no_grad(): |
|
out_logits = model(encoded_batch, attention_mask=attn_mask).logits |
|
|
|
shift_logits = out_logits[..., :-1, :].contiguous() |
|
shift_labels = labels[..., 1:].contiguous() |
|
shift_attention_mask_batch = attn_mask[..., 1:].contiguous() |
|
|
|
perplexity_batch = torch.exp( |
|
( |
|
loss_fct(shift_logits.transpose(1, 2), shift_labels) |
|
* shift_attention_mask_batch |
|
).sum(1) |
|
/ shift_attention_mask_batch.sum(1) |
|
) |
|
|
|
ppls += perplexity_batch.tolist() |
|
|
|
return {"perplexities": ppls, "mean_perplexity": np.mean(ppls)} |
|
|