yu-val-weiss commited on
Commit
b1b0ea9
·
1 Parent(s): c91ab9d

change to blimp naming

Browse files
Files changed (2) hide show
  1. app.py +1 -2
  2. perplexity.py → blimp.py +73 -69
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import evaluate
2
  from evaluate.utils import launch_gradio_widget
3
 
4
-
5
- module = evaluate.load("perplexity", module_type="metric")
6
  launch_gradio_widget(module)
 
1
  import evaluate
2
  from evaluate.utils import launch_gradio_widget
3
 
4
+ module = evaluate.load("pico-lm/blimp", module_type="metric")
 
5
  launch_gradio_widget(module)
perplexity.py → blimp.py RENAMED
@@ -11,76 +11,51 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- """Perplexity Metric."""
15
 
16
  import datasets
 
17
  import numpy as np
18
  import torch
 
19
  from torch.nn import CrossEntropyLoss
20
  from transformers import AutoModelForCausalLM, AutoTokenizer
21
 
22
- import evaluate
23
- from evaluate import logging
24
-
25
-
26
  _CITATION = """\
27
-
 
 
 
 
 
 
 
 
 
 
 
 
28
  """
29
 
30
  _DESCRIPTION = """
31
- Perplexity (PPL) is one of the most common metrics for evaluating language models.
32
- It is defined as the exponentiated average negative log-likelihood of a sequence, calculated with exponent base `e`.
 
 
33
 
34
- For more information, see https://huggingface.co/docs/transformers/perplexity
35
  """
36
 
37
  _KWARGS_DESCRIPTION = """
38
  Args:
39
- model_id (str): model used for calculating Perplexity
40
- NOTE: Perplexity can only be calculated for causal language models.
41
- This includes models such as gpt2, causal variations of bert,
42
- causal versions of t5, and more (the full list can be found
43
- in the AutoModelForCausalLM documentation here:
44
- https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
45
-
46
- predictions (list of str): input text, each separate text snippet
47
- is one list entry.
48
  batch_size (int): the batch size to run texts through the model. Defaults to 16.
49
- add_start_token (bool): whether to add the start token to the texts,
50
- so the perplexity can include the probability of the first word. Defaults to True.
51
  device (str): device to run on, defaults to 'cuda' when available
52
  Returns:
53
- perplexity: dictionary containing the perplexity scores for the texts
54
- in the input list, as well as the mean perplexity. If one of the input texts is
55
- longer than the max input length of the model, then it is truncated to the
56
- max length for the perplexity computation.
57
  Examples:
58
- Example 1:
59
- >>> perplexity = evaluate.load("perplexity", module_type="metric")
60
- >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
61
- >>> results = perplexity.compute(model_id='gpt2',
62
- ... add_start_token=False,
63
- ... predictions=input_texts) # doctest:+ELLIPSIS
64
- >>> print(list(results.keys()))
65
- ['perplexities', 'mean_perplexity']
66
- >>> print(round(results["mean_perplexity"], 0))
67
- 647.0
68
- >>> print(round(results["perplexities"][0], 0))
69
- 32.0
70
-
71
- Example 2:
72
- >>> from datasets import load_dataset
73
- >>> perplexity = evaluate.load("perplexity", module_type="metric")
74
- >>> input_texts = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")["text"][:10] # doctest: +SKIP
75
- >>> input_texts = [s for s in input_texts if s!='']
76
- >>> results = perplexity.compute(model_id='gpt2',
77
- ... predictions=input_texts)
78
- >>> print(list(results.keys()))
79
- ['perplexities', 'mean_perplexity']
80
- >>> print(round(results["mean_perplexity"], 2)) # doctest: +SKIP
81
- 576.76
82
- >>> print(round(results["perplexities"][0], 2)) # doctest: +SKIP
83
- 889.28
84
  """
85
 
86
 
@@ -97,19 +72,33 @@ class Perplexity(evaluate.Metric):
97
  "predictions": datasets.Value("string"),
98
  }
99
  ),
100
- reference_urls=["https://huggingface.co/docs/transformers/perplexity"],
 
 
 
101
  )
102
 
103
  def _compute(
104
- self, predictions, model_id, batch_size: int = 16, add_start_token: bool = True, device=None, max_length=None
 
 
 
 
 
 
105
  ):
106
-
107
  if device is not None:
108
- assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
 
 
109
  if device == "gpu":
110
  device = "cuda"
111
  else:
112
- device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
113
 
114
  model = AutoModelForCausalLM.from_pretrained(model_id)
115
  model = model.to(device)
@@ -120,19 +109,21 @@ class Perplexity(evaluate.Metric):
120
  # if there is not an already assigned pad_token, assign an existing
121
  # special token to also be the padding token
122
  if tokenizer.pad_token is None and batch_size > 1:
123
- existing_special_tokens = list(tokenizer.special_tokens_map_extended.values())
 
 
124
  # check that the model already has at least one special token defined
125
- assert (
126
- len(existing_special_tokens) > 0
127
- ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
128
  # assign one of the special tokens to also be the pad token
129
  tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
130
 
131
  if add_start_token and max_length:
132
  # leave room for <BOS> token to be added:
133
- assert (
134
- tokenizer.bos_token is not None
135
- ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
136
  max_tokenized_len = max_length - 1
137
  else:
138
  max_tokenized_len = max_length
@@ -152,11 +143,13 @@ class Perplexity(evaluate.Metric):
152
 
153
  # check that each input is long enough:
154
  if add_start_token:
155
- assert torch.all(torch.ge(attn_masks.sum(1), 1)), "Each input text must be at least one token long."
 
 
156
  else:
157
- assert torch.all(
158
- torch.ge(attn_masks.sum(1), 2)
159
- ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
160
 
161
  ppls = []
162
  loss_fct = CrossEntropyLoss(reduction="none")
@@ -167,10 +160,18 @@ class Perplexity(evaluate.Metric):
167
  attn_mask = attn_masks[start_index:end_index]
168
 
169
  if add_start_token:
170
- bos_tokens_tensor = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(device)
 
 
171
  encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
172
  attn_mask = torch.cat(
173
- [torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(device), attn_mask], dim=1
 
 
 
 
 
 
174
  )
175
 
176
  labels = encoded_batch
@@ -183,7 +184,10 @@ class Perplexity(evaluate.Metric):
183
  shift_attention_mask_batch = attn_mask[..., 1:].contiguous()
184
 
185
  perplexity_batch = torch.exp(
186
- (loss_fct(shift_logits.transpose(1, 2), shift_labels) * shift_attention_mask_batch).sum(1)
 
 
 
187
  / shift_attention_mask_batch.sum(1)
188
  )
189
 
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """Blimp Metric."""
15
 
16
  import datasets
17
+ import evaluate
18
  import numpy as np
19
  import torch
20
+ from evaluate import logging
21
  from torch.nn import CrossEntropyLoss
22
  from transformers import AutoModelForCausalLM, AutoTokenizer
23
 
 
 
 
 
24
  _CITATION = """\
25
+ @article{warstadt2020blimp,
26
+ author = {Warstadt, Alex and Parrish, Alicia and Liu, Haokun and Mohananey, Anhad and Peng, Wei and Wang, Sheng-Fu and Bowman, Samuel R.},
27
+ title = {BLiMP: The Benchmark of Linguistic Minimal Pairs for English},
28
+ journal = {Transactions of the Association for Computational Linguistics},
29
+ volume = {8},
30
+ number = {},
31
+ pages = {377-392},
32
+ year = {2020},
33
+ doi = {10.1162/tacl\_a\_00321},
34
+ URL = {https://doi.org/10.1162/tacl_a_00321},
35
+ eprint = {https://doi.org/10.1162/tacl_a_00321},
36
+ abstract = { We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs—that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4\%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands. }
37
+ }
38
  """
39
 
40
  _DESCRIPTION = """
41
+ BLiMP is a challenge set for evaluating what language models (LMs) know about major grammatical phenomena in English.
42
+ BLiMP consists of 67 sub-datasets, each containing 1000 minimal pairs isolating specific contrasts in syntax, morphology, or semantics.
43
+ The data is automatically generated according to expert-crafted grammars. Aggregate human agreement with the labels is 96.4%.
44
+ We use BLiMP to evaluate an n-gram LM, LSTM LM, GPT-2, and Transformer-XL.
45
 
46
+ For more info see https://github.com/alexwarstadt/blimp.
47
  """
48
 
49
  _KWARGS_DESCRIPTION = """
50
  Args:
51
+ model_id (str): model used for calculating Blimp
 
 
 
 
 
 
 
 
52
  batch_size (int): the batch size to run texts through the model. Defaults to 16.
 
 
53
  device (str): device to run on, defaults to 'cuda' when available
54
  Returns:
55
+ blimp: dictionary containing the blimp scores for each of the 67 sub-datasets, as well as the overall accuracy.
56
+ An LM’s overall accuracy on BLiMP is simply the proportion of the 67,000 minimal pairs in which the model assigns a higher probability to the acceptable sentence.
 
 
57
  Examples:
58
+ TODO: examples.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  """
60
 
61
 
 
72
  "predictions": datasets.Value("string"),
73
  }
74
  ),
75
+ reference_urls=[
76
+ "https://github.com/alexwarstadt/blimp",
77
+ "https://huggingface.co/datasets/nyu-mll/blimp",
78
+ ],
79
  )
80
 
81
  def _compute(
82
+ self,
83
+ predictions,
84
+ model_id,
85
+ batch_size: int = 16,
86
+ add_start_token: bool = True,
87
+ device=None,
88
+ max_length=None,
89
  ):
 
90
  if device is not None:
91
+ assert device in ["gpu", "cpu", "cuda", "mps"], (
92
+ "device should be either gpu, cpu or mps."
93
+ )
94
  if device == "gpu":
95
  device = "cuda"
96
  else:
97
+ device = (
98
+ "cuda"
99
+ if torch.cuda.is_available()
100
+ else ("mps" if torch.mps.is_available() else "cpu")
101
+ )
102
 
103
  model = AutoModelForCausalLM.from_pretrained(model_id)
104
  model = model.to(device)
 
109
  # if there is not an already assigned pad_token, assign an existing
110
  # special token to also be the padding token
111
  if tokenizer.pad_token is None and batch_size > 1:
112
+ existing_special_tokens = list(
113
+ tokenizer.special_tokens_map_extended.values()
114
+ )
115
  # check that the model already has at least one special token defined
116
+ assert len(existing_special_tokens) > 0, (
117
+ "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
118
+ )
119
  # assign one of the special tokens to also be the pad token
120
  tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
121
 
122
  if add_start_token and max_length:
123
  # leave room for <BOS> token to be added:
124
+ assert tokenizer.bos_token is not None, (
125
+ "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
126
+ )
127
  max_tokenized_len = max_length - 1
128
  else:
129
  max_tokenized_len = max_length
 
143
 
144
  # check that each input is long enough:
145
  if add_start_token:
146
+ assert torch.all(torch.ge(attn_masks.sum(1), 1)), (
147
+ "Each input text must be at least one token long."
148
+ )
149
  else:
150
+ assert torch.all(torch.ge(attn_masks.sum(1), 2)), (
151
+ "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
152
+ )
153
 
154
  ppls = []
155
  loss_fct = CrossEntropyLoss(reduction="none")
 
160
  attn_mask = attn_masks[start_index:end_index]
161
 
162
  if add_start_token:
163
+ bos_tokens_tensor = torch.tensor(
164
+ [[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)
165
+ ).to(device)
166
  encoded_batch = torch.cat([bos_tokens_tensor, encoded_batch], dim=1)
167
  attn_mask = torch.cat(
168
+ [
169
+ torch.ones(bos_tokens_tensor.size(), dtype=torch.int64).to(
170
+ device
171
+ ),
172
+ attn_mask,
173
+ ],
174
+ dim=1,
175
  )
176
 
177
  labels = encoded_batch
 
184
  shift_attention_mask_batch = attn_mask[..., 1:].contiguous()
185
 
186
  perplexity_batch = torch.exp(
187
+ (
188
+ loss_fct(shift_logits.transpose(1, 2), shift_labels)
189
+ * shift_attention_mask_batch
190
+ ).sum(1)
191
  / shift_attention_mask_batch.sum(1)
192
  )
193