|
|
|
|
|
""" |
|
SyntaxGym dataset as used in Hu et al. (2020). |
|
""" |
|
|
|
|
|
from collections import defaultdict |
|
from copy import deepcopy |
|
import json |
|
from pathlib import Path |
|
import re |
|
from typing import List, Dict, Tuple |
|
from typing_extensions import TypedDict |
|
|
|
import datasets |
|
from datasets import logging |
|
import numpy as np |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
from .prediction import Prediction |
|
|
|
|
|
_CITATION = """ |
|
@inproceedings{Hu:et-al:2020, |
|
author = {Hu, Jennifer and Gauthier, Jon and Qian, Peng and Wilcox, Ethan and Levy, Roger}, |
|
title = {A systematic assessment of syntactic generalization in neural language models}, |
|
booktitle = {Proceedings of the Association of Computational Linguistics}, |
|
year = {2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = "" |
|
|
|
|
|
_PROJECT_URL = "https://syntaxgym.org" |
|
_DOWNLOAD_URL = "https://raw.githubusercontent.com/cpllab/syntactic-generalization/nextflow/test_suites/json/" |
|
|
|
|
|
def condition_to_string(cond): |
|
ret = " ".join([region["content"].lstrip() |
|
for region in cond["regions"] |
|
if region["content"].strip() != ""]) |
|
ret = re.sub(r"\s+,", ",", ret) |
|
|
|
return ret |
|
|
|
|
|
class SyntaxGymSuiteConfig(datasets.BuilderConfig): |
|
|
|
def __init__(self, name, version=datasets.Version("1.0.0"), **kwargs): |
|
description = f"SyntaxGym test suite {name}.\n" + _DESCRIPTION |
|
super().__init__(name=name, description=description, version=version, |
|
**kwargs) |
|
|
|
|
|
SUITE_DATASET_CONDITION_SPEC = { |
|
"condition_name": datasets.Value("string"), |
|
"content": datasets.Value("string"), |
|
"regions": datasets.Sequence({ |
|
"region_number": datasets.Value("int32"), |
|
"content": datasets.Value("string") |
|
}) |
|
} |
|
|
|
SUITE_DATASET_SPEC = { |
|
"item_number": datasets.Value("int32"), |
|
"conditions": datasets.Sequence(SUITE_DATASET_CONDITION_SPEC), |
|
"predictions": datasets.Sequence(datasets.Value("string")), |
|
} |
|
|
|
|
|
class SyntaxGym(datasets.GeneratorBasedBuilder): |
|
|
|
SUITES = [ |
|
"center_embed", "center_embed_mod", |
|
"cleft", "cleft_modifier", |
|
"fgd_hierarchy", "fgd_object", |
|
"fgd_pp", "fgd_subject", |
|
"mvrr", "mvrr_mod", |
|
"npi_orc_any", "npi_orc_ever", "npi_src_any", "npi_src_ever", |
|
"npz_ambig", "npz_ambig_mod", "npz_obj", "npz_obj_mod", |
|
"number_orc", "number_prep", "number_src", |
|
"reflexive_orc_fem", "reflexive_orc_masc", |
|
"reflexive_prep_fem", "reflexive_prep_masc", |
|
"reflexive_src_fem", "reflexive_src_masc", |
|
"subordination", "subordination_orc-orc", |
|
"subordination_pp-pp", "subordination_src-src", |
|
] |
|
BUILDER_CONFIGS = [SyntaxGymSuiteConfig(suite_name) |
|
for suite_name in SUITES] |
|
|
|
def _info(self): |
|
citation = "" |
|
|
|
|
|
|
|
citation += f"SyntaxGym citation:\n{_CITATION}" |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(SUITE_DATASET_SPEC), |
|
homepage=_PROJECT_URL, |
|
citation=citation, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
download_url = _DOWNLOAD_URL + f"{self.config.name}.json" |
|
downloaded_file = dl_manager.download_and_extract(download_url) |
|
return [datasets.SplitGenerator(name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": downloaded_file})] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
suite_json = json.load(f) |
|
|
|
predictions = [p["formula"] for p in suite_json["predictions"]] |
|
|
|
for item in suite_json["items"]: |
|
|
|
for cond in item["conditions"]: |
|
cond["content"] = condition_to_string(cond) |
|
|
|
item["predictions"] = predictions |
|
|
|
yield item["item_number"], item |
|
|
|
|
|
class SyntaxGymMetricResult(TypedDict): |
|
prediction_results: List[List[bool]] |
|
region_totals: List[Dict[Tuple[str, int], float]] |
|
|
|
|
|
class SyntaxGymMetric(datasets.Metric): |
|
""" |
|
SyntaxGym prediction evaluation metric. |
|
""" |
|
|
|
def _info(self): |
|
seq = datasets.Sequence |
|
features = datasets.Features({ |
|
"suite": SUITE_DATASET_SPEC |
|
}) |
|
return datasets.MetricInfo( |
|
description="TODO", |
|
citation=_CITATION, |
|
inputs_description="TODO", |
|
features=features, |
|
) |
|
|
|
def _compute(self, suite, model_id, batch_size: int = 16, |
|
add_start_token=True, device=None |
|
) -> SyntaxGymMetricResult: |
|
if device is not None: |
|
assert device in ["gpu", "cpu", "cuda"] |
|
if device == "gpu": |
|
device = "cuda" |
|
else: |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
model = AutoModelForCausalLM.from_pretrained(model_id) |
|
model = model.to(device) |
|
model.eval() |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
results = {"prediction_results": [], "region_totals": []} |
|
|
|
for item in logging.tqdm(suite): |
|
result_single = self._compute_single(item, tokenizer, model, device) |
|
|
|
for k in ["prediction_results", "region_totals"]: |
|
results[k].append(result_single[k]) |
|
|
|
return results |
|
|
|
|
|
def _compute_single(self, item, tokenizer, model, device): |
|
tokenized = tokenizer(item["conditions"]["content"], |
|
padding=True, |
|
return_tensors="pt", |
|
return_offsets_mapping=True).to(device) |
|
|
|
|
|
input_ids = tokenized["input_ids"] |
|
assert input_ids.ndim == 2 |
|
|
|
|
|
with torch.no_grad(): |
|
|
|
logits = model(input_ids).logits |
|
surprisals = -logits.log_softmax(dim=2) / np.log(2) |
|
|
|
|
|
assert surprisals.ndim == 3 |
|
|
|
|
|
surps_shifted = surprisals[:, :-1, :] |
|
expected_ids = input_ids[:, 1:] |
|
|
|
|
|
tt = expected_ids.unsqueeze(2) |
|
surprisals = torch.gather(surps_shifted, 2, expected_ids.unsqueeze(2)) \ |
|
.squeeze(2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
condition_names = item["conditions"]["condition_name"] |
|
region_totals = {condition_name: defaultdict(float) |
|
for condition_name in condition_names} |
|
region2tokens = self.compute_region_token_mapping( |
|
item, input_ids, tokenized["offset_mapping"]) |
|
|
|
for i, (i_cond, i_inputs) in enumerate(zip(condition_names, input_ids)): |
|
for region_number, region_tokens in region2tokens[i_cond].items(): |
|
for token in region_tokens: |
|
if token < surprisals.shape[1]: |
|
region_totals[i_cond][region_number] += surprisals[i, token] |
|
else: |
|
|
|
|
|
assert token == surprisals.shape[1], \ |
|
"%s %s" % (token, surprisals.shape[1]) |
|
|
|
region_totals = {(condition_name, region_number): float(total) |
|
for condition_name, totals in region_totals.items() |
|
for region_number, total in totals.items()} |
|
|
|
results = { |
|
"prediction_results": [ |
|
Prediction(i, formula, "sum").formula(region_totals) |
|
for i, formula in enumerate(item["predictions"]) |
|
], |
|
|
|
"region_totals": region_totals |
|
} |
|
return results |
|
|
|
def get_region_edges(self, item, condition_idx): |
|
""" |
|
Get left edge of each region as a character index. |
|
""" |
|
|
|
|
|
regions = item["conditions"]["regions"][condition_idx] |
|
|
|
idx = 0 |
|
ret = [] |
|
for r_idx, region_content in enumerate(regions["content"]): |
|
ret.append(idx) |
|
|
|
region_size = len(region_content) |
|
if region_content.strip() != "" and r_idx != 0 and not region_content.startswith(","): |
|
|
|
region_size += 1 |
|
|
|
idx += region_size |
|
|
|
return ret |
|
|
|
def compute_region_token_mapping(self, item, input_ids: torch.LongTensor, |
|
offset_mapping: List[Tuple[int, int]] |
|
) -> Dict[str, Dict[int, List[int]]]: |
|
|
|
|
|
|
|
|
|
condition_names = item["conditions"]["condition_name"] |
|
region2tokens = {cond: defaultdict(list) for cond in condition_names} |
|
|
|
max_long = torch.iinfo(torch.int64).max |
|
|
|
input_ids = input_ids.detach() |
|
for i_cond, (i_tokens, i_offsets) in enumerate(zip(input_ids, offset_mapping)): |
|
region_edges = self.get_region_edges(item, i_cond) |
|
|
|
t_cursor, r_cursor = 0, 0 |
|
while t_cursor < i_tokens.shape[0]: |
|
|
|
token_char_start, token_char_end = i_offsets[t_cursor] |
|
|
|
region_start = region_edges[r_cursor] |
|
region_end = region_edges[r_cursor + 1] \ |
|
if r_cursor + 1 < len(region_edges) else max_long |
|
|
|
|
|
if token_char_start >= region_end: |
|
r_cursor += 1 |
|
continue |
|
|
|
region2tokens[condition_names[i_cond]][r_cursor + 1].append(t_cursor) |
|
t_cursor += 1 |
|
|
|
return region2tokens |
|
|