Jon Gauthier
support loading all suites by default; represent multiple suites within dataset with suite_name feature
d482c79
# coding=utf-8 | |
""" | |
SyntaxGym dataset as used in Hu et al. (2020). | |
""" | |
from collections import defaultdict | |
from copy import deepcopy | |
import json | |
from pathlib import Path | |
import re | |
from typing import List, Dict, Tuple | |
from typing_extensions import TypedDict | |
import datasets | |
from datasets import logging | |
import numpy as np | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
_CITATION = """ | |
@inproceedings{Hu:et-al:2020, | |
author = {Hu, Jennifer and Gauthier, Jon and Qian, Peng and Wilcox, Ethan and Levy, Roger}, | |
title = {A systematic assessment of syntactic generalization in neural language models}, | |
booktitle = {Proceedings of the Association of Computational Linguistics}, | |
year = {2020} | |
} | |
""" | |
_DESCRIPTION = "" # TODO | |
_PROJECT_URL = "https://syntaxgym.org" | |
_DOWNLOAD_URL = "https://raw.githubusercontent.com/cpllab/syntactic-generalization/nextflow/test_suites/json/" | |
def condition_to_string(cond): | |
ret = " ".join([region["content"].lstrip() | |
for region in cond["regions"] | |
if region["content"].strip() != ""]) | |
ret = re.sub(r"\s+([.,])", r"\1", ret) | |
return ret | |
class SyntaxGymSuiteConfig(datasets.BuilderConfig): | |
def __init__(self, name, version=datasets.Version("1.0.0"), **kwargs): | |
description = f"SyntaxGym test suite {name}.\n" + _DESCRIPTION | |
super().__init__(name=name, description=description, version=version, | |
**kwargs) | |
class SyntaxGymAll2020SuitesConfig(datasets.BuilderConfig): | |
def __init__(self, **kwargs): | |
super().__init__( | |
name="all-2020", | |
description="All SyntaxGym test suites from Hu et al. (2020).\n" + _DESCRIPTION) | |
SUITE_DATASET_CONDITION_SPEC = { | |
"condition_name": datasets.Value("string"), | |
"content": datasets.Value("string"), | |
"regions": datasets.Sequence({ | |
"region_number": datasets.Value("int32"), | |
"content": datasets.Value("string") | |
}) | |
} | |
SUITE_DATASET_SPEC = { | |
"suite_name": datasets.Value("string"), | |
"item_number": datasets.Value("int32"), | |
"conditions": datasets.Sequence(SUITE_DATASET_CONDITION_SPEC), | |
"predictions": datasets.Sequence(datasets.Value("string")), | |
} | |
class SyntaxGym(datasets.GeneratorBasedBuilder): | |
SUITES = [ | |
"center_embed", "center_embed_mod", | |
"cleft", "cleft_modifier", | |
"fgd_hierarchy", "fgd_object", | |
"fgd_pp", "fgd_subject", | |
"mvrr", "mvrr_mod", | |
"npi_orc_any", "npi_orc_ever", "npi_src_any", "npi_src_ever", | |
"npz_ambig", "npz_ambig_mod", "npz_obj", "npz_obj_mod", | |
"number_orc", "number_prep", "number_src", | |
"reflexive_orc_fem", "reflexive_orc_masc", | |
"reflexive_prep_fem", "reflexive_prep_masc", | |
"reflexive_src_fem", "reflexive_src_masc", | |
"subordination", "subordination_orc-orc", | |
"subordination_pp-pp", "subordination_src-src", | |
] | |
BUILDER_CONFIGS = \ | |
[SyntaxGymSuiteConfig(suite_name) for suite_name in SUITES] + \ | |
[SyntaxGymAll2020SuitesConfig()] | |
DEFAULT_CONFIG_NAME = "all-2020" | |
def _info(self): | |
citation = "" | |
# print(self.BUILDER_CONFIGS) | |
# if self.config.meta["reference"]: | |
# citation = f"Test suite citation: {self.meta['reference']}\n" | |
citation += f"SyntaxGym citation:\n{_CITATION}" | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features(SUITE_DATASET_SPEC), | |
homepage=_PROJECT_URL, | |
citation=citation, | |
) | |
def _download_suite(self, name, dl_manager: datasets.DownloadManager): | |
return dl_manager.download_and_extract(_DOWNLOAD_URL + f"{name}.json") | |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: | |
if isinstance(self.config, SyntaxGymAll2020SuitesConfig): | |
paths = [self._download_suite(suite_name, dl_manager) for suite_name in self.SUITES] | |
else: | |
paths = [self._download_suite(self.config.name, dl_manager)] | |
return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"paths": paths})] | |
def _generate_examples(self, paths): | |
for path in paths: | |
with open(path, "r", encoding="utf-8") as f: | |
suite_json = json.load(f) | |
suite_name = suite_json["meta"]["name"] | |
predictions = [p["formula"] for p in suite_json["predictions"]] | |
for item in suite_json["items"]: | |
# Convert to sentence input. | |
for cond in item["conditions"]: | |
cond["content"] = condition_to_string(cond) | |
item["suite_name"] = suite_name | |
item["predictions"] = predictions | |
yield f"{suite_name}/{item['item_number']}", item | |