Datasets:

Modalities:
Text
Libraries:
Datasets
File size: 4,846 Bytes
2b2f744
 
 
 
 
 
 
70f4226
7e951ff
2b2f744
 
7e951ff
2b3c98b
 
2b2f744
 
535608c
70f4226
 
535608c
2b2f744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3dad049
2b2f744
 
70f4226
 
 
 
07dd735
70f4226
 
 
 
2b2f744
 
3dad049
2b2f744
 
 
 
 
d482c79
 
 
 
 
 
 
 
535608c
 
 
 
 
 
 
 
 
 
d482c79
535608c
 
 
 
 
 
2b2f744
 
3dad049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d482c79
 
 
 
2b2f744
 
 
3dad049
 
 
2b2f744
 
 
 
535608c
2b2f744
 
 
 
d482c79
 
2b2f744
d482c79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e951ff
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# coding=utf-8

"""
SyntaxGym dataset as used in Hu et al. (2020).
"""


from collections import defaultdict
from copy import deepcopy
import json
from pathlib import Path
import re
from typing import List, Dict, Tuple
from typing_extensions import TypedDict

import datasets
from datasets import logging
import numpy as np
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


_CITATION = """
@inproceedings{Hu:et-al:2020,
  author = {Hu, Jennifer and Gauthier, Jon and Qian, Peng and Wilcox, Ethan and Levy, Roger},
  title = {A systematic assessment of syntactic generalization in neural language models},
  booktitle = {Proceedings of the Association of Computational Linguistics},
  year = {2020}
}
"""

_DESCRIPTION = ""   # TODO


_PROJECT_URL = "https://syntaxgym.org"
_DOWNLOAD_URL = "https://raw.githubusercontent.com/cpllab/syntactic-generalization/nextflow/test_suites/json/"


def condition_to_string(cond):
    ret = " ".join([region["content"].lstrip()
                    for region in cond["regions"]
                    if region["content"].strip() != ""])
    ret = re.sub(r"\s+([.,])", r"\1", ret)

    return ret


class SyntaxGymSuiteConfig(datasets.BuilderConfig):

    def __init__(self, name, version=datasets.Version("1.0.0"), **kwargs):
        description = f"SyntaxGym test suite {name}.\n" + _DESCRIPTION
        super().__init__(name=name, description=description, version=version,
                         **kwargs)


class SyntaxGymAll2020SuitesConfig(datasets.BuilderConfig):

    def __init__(self, **kwargs):
        super().__init__(
            name="all-2020",
            description="All SyntaxGym test suites from Hu et al. (2020).\n" + _DESCRIPTION)


SUITE_DATASET_CONDITION_SPEC = {
    "condition_name": datasets.Value("string"),
    "content": datasets.Value("string"),
    "regions": datasets.Sequence({
        "region_number": datasets.Value("int32"),
        "content": datasets.Value("string")
    })
}

SUITE_DATASET_SPEC = {
    "suite_name": datasets.Value("string"),
    "item_number": datasets.Value("int32"),
    "conditions": datasets.Sequence(SUITE_DATASET_CONDITION_SPEC),
    "predictions": datasets.Sequence(datasets.Value("string")),
}


class SyntaxGym(datasets.GeneratorBasedBuilder):

    SUITES = [
        "center_embed", "center_embed_mod",
        "cleft", "cleft_modifier",
        "fgd_hierarchy", "fgd_object",
        "fgd_pp", "fgd_subject",
        "mvrr", "mvrr_mod",
        "npi_orc_any", "npi_orc_ever", "npi_src_any", "npi_src_ever",
        "npz_ambig", "npz_ambig_mod", "npz_obj", "npz_obj_mod",
        "number_orc", "number_prep", "number_src",
        "reflexive_orc_fem", "reflexive_orc_masc",
        "reflexive_prep_fem", "reflexive_prep_masc",
        "reflexive_src_fem", "reflexive_src_masc",
        "subordination", "subordination_orc-orc",
        "subordination_pp-pp", "subordination_src-src",
    ]
    BUILDER_CONFIGS = \
        [SyntaxGymSuiteConfig(suite_name) for suite_name in SUITES] + \
        [SyntaxGymAll2020SuitesConfig()]
    DEFAULT_CONFIG_NAME = "all-2020"

    def _info(self):
        citation = ""
        # print(self.BUILDER_CONFIGS)
        # if self.config.meta["reference"]:
        #     citation = f"Test suite citation: {self.meta['reference']}\n"
        citation += f"SyntaxGym citation:\n{_CITATION}"

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(SUITE_DATASET_SPEC),
            homepage=_PROJECT_URL,
            citation=citation,
        )

    def _download_suite(self, name, dl_manager: datasets.DownloadManager):
        return dl_manager.download_and_extract(_DOWNLOAD_URL + f"{name}.json")

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        if isinstance(self.config, SyntaxGymAll2020SuitesConfig):
            paths = [self._download_suite(suite_name, dl_manager) for suite_name in self.SUITES]
        else:
            paths = [self._download_suite(self.config.name, dl_manager)]
        
        return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"paths": paths})]

    def _generate_examples(self, paths):
        for path in paths:
            with open(path, "r", encoding="utf-8") as f:
                suite_json = json.load(f)

            suite_name = suite_json["meta"]["name"]
            predictions = [p["formula"] for p in suite_json["predictions"]]

            for item in suite_json["items"]:
                # Convert to sentence input.
                for cond in item["conditions"]:
                    cond["content"] = condition_to_string(cond)

                item["suite_name"] = suite_name
                item["predictions"] = predictions

                yield f"{suite_name}/{item['item_number']}", item