File size: 4,981 Bytes
f550567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133

import csv
import json
import os
from copy import deepcopy

import datasets


_CITATION = """\
@InProceedings{boyd2018wnut,
  author    = {Adriane Boyd},
  title     = {Using Wikipedia Edits in Low Resource Grammatical Error Correction},
  booktitle = {Proceedings of the 4th Workshop on Noisy User-generated Text},
  publisher = {Association for Computational Linguistics},
  year      = {2018},
  url       = {http://aclweb.org/anthology/W18-6111}
}
"""

_DESCRIPTION = """\
Falko-MERLIN is a grammatical error correction corpus consisting of essays and exams.
"""

_HOMEPAGE = "https://github.com/adrianeboyd/boyd-wnut2018"

_LICENSE = "Creative Commons Attribution Share Alike 4.0 International"

_URLS = {
    "falko_merlin_wikipedia": "http://www.sfs.uni-tuebingen.de/~adriane/download/wnut2018/data.tar.gz"
}


class FalkoMERLIN(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        features = datasets.Features(
            {
                "src_tokens": datasets.Sequence(datasets.Value("string")),
                "tgt_tokens": datasets.Sequence(datasets.Value("string")),
                "corrections": [{
                    "idx_src": datasets.Sequence(datasets.Value("int32")),
                    "idx_tgt": datasets.Sequence(datasets.Value("int32")),
                    "corr_type": datasets.Value("string")
                }]
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        urls = _URLS["falko_merlin_wikipedia"]
        data_dir = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-train.m2")},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-dev.m2")},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"file_path": os.path.join(data_dir, "data", "fm-test.m2")}
            )
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, file_path):
        skip_edits = {"noop", "UNK", "Um"}
        with open(file_path, "r", encoding="utf-8") as f:
            idx_ex = 0
            src_sent, tgt_sent, corrections, offset = None, None, [], 0
            for idx_line, _line in enumerate(f):
                line = _line.strip()

                if len(line) > 0:
                    prefix, remainder = line[0], line[2:]
                    if prefix == "S":
                        src_sent = remainder.split(" ")
                        tgt_sent = deepcopy(src_sent)

                    elif prefix == "A":
                        annotation_data = remainder.split("|||")
                        idx_start, idx_end = map(int, annotation_data[0].split(" "))
                        edit_type, edit_text = annotation_data[1], annotation_data[2]
                        if edit_type in skip_edits:
                            continue

                        formatted_correction = {
                            "idx_src": list(range(idx_start, idx_end)),
                            "idx_tgt": [],
                            "corr_type": edit_type
                        }
                        annotator_id = int(annotation_data[-1])
                        assert annotator_id == 0

                        removal = len(edit_text) == 0 or edit_text == "-NONE-"
                        if removal:
                            for idx_to_remove in range(idx_start, idx_end):
                                del tgt_sent[offset + idx_to_remove]
                                offset -= 1

                        else:  # replacement/insertion
                            edit_tokens = edit_text.split(" ")
                            len_diff = len(edit_tokens) - (idx_end - idx_start)

                            formatted_correction["idx_tgt"] = list(
                                range(offset + idx_start, offset + idx_end + len_diff))
                            tgt_sent[offset + idx_start: offset + idx_end] = edit_tokens
                            offset += len_diff

                        corrections.append(formatted_correction)

                else:  # empty line, indicating end of example
                    yield idx_ex, {
                        "src_tokens": src_sent,
                        "tgt_tokens": tgt_sent,
                        "corrections": corrections
                    }
                    src_sent, tgt_sent, corrections, offset = None, None, [], 0
                    idx_ex += 1