Datasets:

Modalities:
Text
Libraries:
Datasets
Jon Gauthier commited on
Commit
70f4226
·
1 Parent(s): aab1635

Get token aggregation working; just need to support predictions

Browse files
Files changed (2) hide show
  1. syntaxgym.py +123 -17
  2. test.py +18 -6
syntaxgym.py CHANGED
@@ -5,13 +5,16 @@ SyntaxGym dataset as used in Hu et al. (2020).
5
  """
6
 
7
 
 
8
  from copy import deepcopy
9
  import json
10
  from pathlib import Path
11
  import re
12
- from typing import List
13
 
14
  import datasets
 
 
15
 
16
  from .prediction import Prediction
17
 
@@ -39,6 +42,15 @@ for suite_f in Path("test_suites").glob("*.json"):
39
  SUITE_JSONS = {suite["meta"]["name"]: suite for suite in SUITE_JSONS}
40
 
41
 
 
 
 
 
 
 
 
 
 
42
  class SyntaxGymSuiteConfig(datasets.BuilderConfig):
43
 
44
  def __init__(self, suite_json, version=datasets.Version("1.0.0"), **kwargs):
@@ -68,7 +80,7 @@ class SyntaxGym(datasets.GeneratorBasedBuilder):
68
  }
69
 
70
  features = {
71
- "item_number": datasets.Value("string"),
72
  "conditions": datasets.Sequence(condition_spec)
73
  }
74
 
@@ -95,10 +107,7 @@ class SyntaxGym(datasets.GeneratorBasedBuilder):
95
  for item in suite_json["items"]:
96
  # Convert to sentence input.
97
  for cond in item["conditions"]:
98
- cond["content"] = " ".join([region["content"].lstrip()
99
- for region in cond["regions"]
100
- if region["content"].strip() != ""])
101
- cond["content"] = re.sub(r"\s+,", ",", cond["content"])
102
 
103
  yield item["item_number"], item
104
 
@@ -117,16 +126,18 @@ class SyntaxGymMetric(datasets.Metric):
117
  ]
118
 
119
  def _info(self):
 
120
  features = datasets.Features({
121
- "conditions": datasets.Sequence({
122
- "condition_name": datasets.Value("string"),
123
- "regions": datasets.Sequence({
124
- "region_number": datasets.Value("int32"),
125
- "metric_value": {
126
- "sum": datasets.Value("float32")
127
- },
128
- }),
129
- })
 
130
  })
131
  return datasets.MetricInfo(
132
  description="TODO",
@@ -135,5 +146,100 @@ class SyntaxGymMetric(datasets.Metric):
135
  features=features,
136
  )
137
 
138
- def _compute(self, region_surprisals):
139
- suite_with_results = deepcopy(self.suite)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  """
6
 
7
 
8
+ from collections import defaultdict
9
  from copy import deepcopy
10
  import json
11
  from pathlib import Path
12
  import re
13
+ from typing import List, Tuple
14
 
15
  import datasets
16
+ import numpy as np
17
+ import torch
18
 
19
  from .prediction import Prediction
20
 
 
42
  SUITE_JSONS = {suite["meta"]["name"]: suite for suite in SUITE_JSONS}
43
 
44
 
45
+ def condition_to_string(cond):
46
+ ret = " ".join([region["content"].lstrip()
47
+ for region in cond["regions"]
48
+ if region["content"].strip() != ""])
49
+ ret = re.sub(r"\s+,", ",", ret)
50
+
51
+ return ret
52
+
53
+
54
  class SyntaxGymSuiteConfig(datasets.BuilderConfig):
55
 
56
  def __init__(self, suite_json, version=datasets.Version("1.0.0"), **kwargs):
 
80
  }
81
 
82
  features = {
83
+ "item_number": datasets.Value("int32"),
84
  "conditions": datasets.Sequence(condition_spec)
85
  }
86
 
 
107
  for item in suite_json["items"]:
108
  # Convert to sentence input.
109
  for cond in item["conditions"]:
110
+ cond["content"] = condition_to_string(cond)
 
 
 
111
 
112
  yield item["item_number"], item
113
 
 
126
  ]
127
 
128
  def _info(self):
129
+ seq = datasets.Sequence
130
  features = datasets.Features({
131
+ # surprisals: 3d float array
132
+ "surprisals": seq(seq(datasets.Value("float"))),
133
+
134
+ # TODO necessary? can assume it remains sorted?
135
+ "condition_names": datasets.Value("string"),
136
+
137
+ "input_ids": seq(datasets.Value("int32")),
138
+
139
+ # offset mapping: 3d int array
140
+ "offset_mapping": seq(seq(datasets.Value("int32"))),
141
  })
142
  return datasets.MetricInfo(
143
  description="TODO",
 
146
  features=features,
147
  )
148
 
149
+ def _compute(self, surprisals, item_number, condition_names,
150
+ input_ids, offset_mapping):
151
+ # surprisals: B * T * V
152
+ surprisals = torch.tensor(surprisals)
153
+ assert surprisals.ndim == 3
154
+
155
+ # input_ids: B * T
156
+ input_ids = torch.tensor(input_ids)
157
+ assert input_ids.ndim == 2
158
+
159
+ # Get surprisals of expected words.
160
+ surps_shifted = surprisals[:, :-1, :]
161
+ expected_ids = input_ids[:, 1:]
162
+
163
+ # TODO: check this logic
164
+ tt = expected_ids.unsqueeze(2)
165
+ surprisals = torch.gather(surps_shifted, 2, expected_ids.unsqueeze(2)) \
166
+ .squeeze(2)
167
+ # This is the original, which works but not with multiple axes in expected_ids
168
+ # surprisals = surps_shifted[range(surps_shifted.shape[0]), expected_ids]
169
+
170
+ # surprisals is now B * (T - 1)
171
+
172
+ #### aggregate
173
+ region_totals = {condition_name: defaultdict(float)
174
+ for condition_name in condition_names}
175
+ region2tokens = self.compute_region_token_mapping(
176
+ item_number, condition_names, input_ids, offset_mapping)
177
+
178
+ for i, (i_cond, i_inputs) in enumerate(zip(condition_names, input_ids)):
179
+ for region_number, region_tokens in region2tokens[i_cond].items():
180
+ for token in region_tokens:
181
+ if token < surprisals.shape[1]:
182
+ region_totals[i_cond][region_number] += surprisals[i, token]
183
+ else:
184
+ # TODO don't think this is an issue, just should clean
185
+ # up the aggregation output
186
+ print("Warning: exceeded ", token)
187
+
188
+ region_totals = {c: dict(totals) for c, totals in region_totals.items()}
189
+
190
+ def get_region_edges(self, item_number, condition_name):
191
+ """
192
+ Get left edge of each region as a character index.
193
+ """
194
+ # NB this is coupled with `condition_to_string` logic of course
195
+
196
+ # DEV bad, just reindex
197
+ item = next(item for item in self.suite["items"]
198
+ if item["item_number"] == item_number)
199
+ cond = next(cond for cond in item["conditions"]
200
+ if cond["condition_name"] == condition_name)
201
+
202
+ idx = 0
203
+ ret = []
204
+ for r_idx, region in enumerate(cond["regions"]):
205
+ ret.append(idx)
206
+
207
+ content = region["content"]
208
+ region_size = len(content)
209
+ if content.strip() != "" and r_idx != 0 and not content.startswith(","):
210
+ # Add joining space
211
+ region_size += 1
212
+
213
+ idx += region_size
214
+
215
+ return ret
216
+
217
+ def compute_region_token_mapping(self, item_number, condition_names, input_ids,
218
+ offset_mapping: List[Tuple[int, int]]):
219
+ # input_ids: B * T
220
+ # offset_mapping: B * T * 2
221
+
222
+ region2tokens = {cond: defaultdict(list) for cond in condition_names}
223
+
224
+ input_ids = input_ids.detach()
225
+ for i_cond, i_tokens, i_offsets in zip(condition_names, input_ids, offset_mapping):
226
+ region_edges = self.get_region_edges(item_number, i_cond)
227
+
228
+ t_cursor, r_cursor = 0, 0
229
+ while t_cursor < i_tokens.shape[0]:
230
+ # token = i_tokens[t_cursor]
231
+ token_char_start, token_char_end = i_offsets[t_cursor]
232
+
233
+ region_start = region_edges[r_cursor]
234
+ region_end = region_edges[r_cursor + 1] \
235
+ if r_cursor + 1 < len(region_edges) else np.inf
236
+
237
+ # NB region boundaries are left edges, hence the >= here.
238
+ if token_char_start >= region_end:
239
+ r_cursor += 1
240
+ continue
241
+
242
+ region2tokens[i_cond][r_cursor + 1].append(t_cursor)
243
+ t_cursor += 1
244
+
245
+ return region2tokens
test.py CHANGED
@@ -1,4 +1,5 @@
1
  import datasets
 
2
  import transformers
3
  import torch
4
 
@@ -6,25 +7,36 @@ import torch
6
  dataset = datasets.load_dataset("syntaxgym.py", "mvrr_mod")
7
  metric = datasets.load_metric("syntaxgym.py", "mvrr_mod")
8
 
9
- tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
 
 
 
10
  # DEV
11
  tokenizer.pad_token = tokenizer.eos_token
12
 
13
- model = transformers.AutoModelForCausalLM.from_pretrained("gpt2")
14
  model.eval()
15
 
16
 
17
  for item in dataset["test"]:
18
  # TODO full preprocessing setup
 
19
  tokenized = tokenizer(item["conditions"]["content"], return_tensors="pt",
20
- padding=True)
21
 
22
  print(item)
23
  print(tokenized)
 
24
 
25
  with torch.no_grad():
26
- print(model(tokenized["input_ids"]))
27
-
28
- # TODO eval
 
 
 
 
 
 
29
 
30
  break
 
1
  import datasets
2
+ import numpy as np
3
  import transformers
4
  import torch
5
 
 
7
  dataset = datasets.load_dataset("syntaxgym.py", "mvrr_mod")
8
  metric = datasets.load_metric("syntaxgym.py", "mvrr_mod")
9
 
10
+ # model_ref = "gpt2"
11
+ model_ref = "hf-internal-testing/tiny-random-gpt_neo"
12
+
13
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_ref)
14
  # DEV
15
  tokenizer.pad_token = tokenizer.eos_token
16
 
17
+ model = transformers.AutoModelForCausalLM.from_pretrained(model_ref)
18
  model.eval()
19
 
20
 
21
  for item in dataset["test"]:
22
  # TODO full preprocessing setup
23
+ condition_names = item["conditions"]["condition_name"]
24
  tokenized = tokenizer(item["conditions"]["content"], return_tensors="pt",
25
+ padding=True, return_offsets_mapping=True)
26
 
27
  print(item)
28
  print(tokenized)
29
+ print(tokenized["offset_mapping"].shape)
30
 
31
  with torch.no_grad():
32
+ # Pre-softmax predictive distribution (shape B * T * V)
33
+ output = model(tokenized["input_ids"])[0]
34
+ surprisals = -output.log_softmax(dim=2) / np.log(2)
35
+
36
+ result = metric.compute(surprisals=surprisals,
37
+ item_number=item["item_number"],
38
+ condition_names=condition_names,
39
+ input_ids=tokenized["input_ids"],
40
+ offset_mapping=tokenized["offset_mapping"])
41
 
42
  break