Theoreticallyhugo commited on
Commit
a4a7178
·
1 Parent(s): ea649c0

simple is working but not properly tested

Browse files
Files changed (1) hide show
  1. Stab-Gurevych-Essays.py +464 -0
Stab-Gurevych-Essays.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import re
22
+ import tempfile
23
+ import urllib
24
+ import requests
25
+ from pathlib import Path
26
+ from zipfile import ZipFile
27
+
28
+ import datasets
29
+
30
+ # TODO: Add BibTeX citation
31
+ # Find for instance the citation on arxiv or on the dataset repo/website
32
+ _CITATION = """\
33
+ @InProceedings{huggingface:dataset,
34
+ title = {A great new dataset},
35
+ author={huggingface, Inc.
36
+ },
37
+ year={2020}
38
+ }
39
+ """
40
+
41
+ # TODO: Add description of the dataset here
42
+ # You can copy an official description
43
+ _DESCRIPTION = """\
44
+ This dataset contains 402 argumentative essays from non-native
45
+ """
46
+
47
+ # TODO: Add a link to an official homepage for the dataset here
48
+ _HOMEPAGE = ""
49
+
50
+ # TODO: Add the licence for the dataset here if you can find it
51
+ _LICENSE = ""
52
+
53
+ # TODO: Add link to the official dataset URLs here
54
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
55
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
+ _URLS = {
57
+ "tu_darmstadt": "https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/2422/ArgumentAnnotatedEssays-2.0.zip?sequence=1&isAllowed=y",
58
+ }
59
+
60
+
61
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
62
+ class NewDataset(datasets.GeneratorBasedBuilder):
63
+ """TODO: Short description of my dataset."""
64
+
65
+ VERSION = datasets.Version("1.1.0")
66
+
67
+ temp_dir = tempfile.TemporaryDirectory()
68
+
69
+ # This is an example of a dataset with multiple configurations.
70
+ # If you don't want/need to define several sub-sets in your dataset,
71
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
72
+
73
+ # If you need to make complex sub-parts in the datasets with configurable options
74
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
75
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
76
+
77
+ # You will be able to load one or the other configurations in the following list with
78
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
79
+ BUILDER_CONFIGS = [
80
+ datasets.BuilderConfig(
81
+ name="full_labels",
82
+ version=VERSION,
83
+ description="get all the data conveyed by the labels, O, B-Claim, I-Claim, etc.",
84
+ ),
85
+ datasets.BuilderConfig(
86
+ name="spans",
87
+ version=VERSION,
88
+ description="get the spans, O, B-Span, I-Span.",
89
+ ),
90
+ datasets.BuilderConfig(
91
+ name="simple",
92
+ version=VERSION,
93
+ description="get the labels without B/I, O, MajorClaim, Claim, Premise",
94
+ ),
95
+ datasets.BuilderConfig(
96
+ name="sep_tok",
97
+ version=VERSION,
98
+ description="get the labels without B/I, meaning O, Claim, Premise"
99
+ + ", etc.\n insert seperator tokens <s> ... </s>",
100
+ ),
101
+ datasets.BuilderConfig(
102
+ name="sep_tok_full_labels",
103
+ version=VERSION,
104
+ description="get the labels with B/I, meaning O, I-Claim, I-Premise"
105
+ + ", etc.\n insert seperator tokens <s> ... </s>",
106
+ ),
107
+ ]
108
+
109
+ DEFAULT_CONFIG_NAME = "full_labels" # It's not mandatory to have a default configuration. Just use one if it make sense.
110
+
111
+ def _info(self):
112
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
113
+ if (
114
+ self.config.name == "full_labels"
115
+ ): # This is the name of the configuration selected in BUILDER_CONFIGS above
116
+ features = datasets.Features(
117
+ {
118
+ "id": datasets.Value("int16"),
119
+ "tokens": datasets.Sequence(datasets.Value("string")),
120
+ "ner_tags": datasets.Sequence(
121
+ datasets.ClassLabel(
122
+ names=[
123
+ "O",
124
+ "B-MajorClaim",
125
+ "I-MajorClaim",
126
+ "B-Claim",
127
+ "I-Claim",
128
+ "B-Premise",
129
+ "I-Premise",
130
+ ]
131
+ )
132
+ ),
133
+ "text": datasets.Value("string"),
134
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
135
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
136
+ }
137
+ )
138
+ elif (
139
+ self.config.name == "spans"
140
+ ): # This is an example to show how to have different features for "first_domain" and "second_domain"
141
+ features = datasets.Features(
142
+ {
143
+ "id": datasets.Value("int16"),
144
+ "tokens": datasets.Sequence(datasets.Value("string")),
145
+ "ner_tags": datasets.Sequence(
146
+ datasets.ClassLabel(
147
+ names=[
148
+ "O",
149
+ "B",
150
+ "I",
151
+ ]
152
+ )
153
+ ),
154
+ "text": datasets.Value("string"),
155
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
156
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
157
+ }
158
+ )
159
+ elif (
160
+ self.config.name == "simple"
161
+ ): # This is an example to show how to have different features for "first_domain" and "second_domain"
162
+ features = datasets.Features(
163
+ {
164
+ "id": datasets.Value("int16"),
165
+ "tokens": datasets.Sequence(datasets.Value("string")),
166
+ "ner_tags": datasets.Sequence(
167
+ datasets.ClassLabel(
168
+ names=[
169
+ "O",
170
+ "X_placeholder_X",
171
+ "MajorClaim",
172
+ "Claim",
173
+ "Premise",
174
+ ]
175
+ )
176
+ ),
177
+ "text": datasets.Value("string"),
178
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
179
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
180
+ }
181
+ )
182
+ elif self.config.name == "sep_tok":
183
+ features = datasets.Features(
184
+ {
185
+ "id": datasets.Value("int16"),
186
+ "tokens": datasets.Sequence(datasets.Value("string")),
187
+ "ner_tags": datasets.Sequence(
188
+ datasets.ClassLabel(
189
+ names=[
190
+ "O",
191
+ "X_placeholder_X",
192
+ "MajorClaim",
193
+ "Claim",
194
+ "Premise",
195
+ ]
196
+ )
197
+ ),
198
+ "text": datasets.Value("string"),
199
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
200
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
201
+ }
202
+ )
203
+ elif self.config.name == "sep_tok_full_labels":
204
+ features = datasets.Features(
205
+ {
206
+ "id": datasets.Value("int16"),
207
+ "tokens": datasets.Sequence(datasets.Value("string")),
208
+ "ner_tags": datasets.Sequence(
209
+ datasets.ClassLabel(
210
+ names=[
211
+ "O",
212
+ "B-MajorClaim",
213
+ "I-MajorClaim",
214
+ "B-Claim",
215
+ "I-Claim",
216
+ "B-Premise",
217
+ "I-Premise",
218
+ ]
219
+ )
220
+ ),
221
+ "text": datasets.Value("string"),
222
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
223
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
224
+ }
225
+ )
226
+
227
+ return datasets.DatasetInfo(
228
+ # This is the description that will appear on the datasets page.
229
+ description=_DESCRIPTION,
230
+ # This defines the different columns of the dataset and their types
231
+ features=features, # Here we define them above because they are different between the two configurations
232
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
233
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
234
+ # supervised_keys=("sentence", "label"),
235
+ # Homepage of the dataset for documentation
236
+ homepage=_HOMEPAGE,
237
+ # License for the dataset if available
238
+ license=_LICENSE,
239
+ # Citation for the dataset
240
+ citation=_CITATION,
241
+ )
242
+
243
+ def __load_data(self):
244
+ # set up paths
245
+ save_dir = Path(self.temp_dir.name)
246
+ save_file = Path("essays.zip")
247
+
248
+ # get url to data
249
+ url = _URLS["tu_darmstadt"]
250
+ # download data
251
+ r = requests.get(url, stream=True)
252
+ # save data to temporary dir
253
+ with open(save_dir / save_file, 'wb') as fd:
254
+ for chunk in r.iter_content(chunk_size=128):
255
+ fd.write(chunk)
256
+ # recursively unzip files
257
+ for glob_path in save_dir.rglob("*.zip"):
258
+ with ZipFile(glob_path, 'r') as zip_ref:
259
+ zip_ref.extractall(glob_path.parent)
260
+ return save_dir
261
+
262
+ def __range_generator(self, train=0.8, test=0.2):
263
+ """
264
+ returns three range objects to access the list of essays
265
+ these are the train, test, and validate range, where the size of the
266
+ validation range is dictated by the other two ranges
267
+ """
268
+ # START RANGE AT 1!!!
269
+ return (
270
+ range(1, int(403 * train)), # train
271
+ range(int(403 * train), int(403 * (train + test))), # test
272
+ range(int(403 * (train + test)), 403), # validate
273
+ )
274
+
275
+ def _split_generators(self, _):
276
+ data_dir = self.__load_data()
277
+
278
+ # this dataset will return a "train" split only, allowing for
279
+ # 5-fold cross-validation
280
+ train, test, validate = self.__range_generator(0.7, 0.2)
281
+ # essays = self._get_essay_list()
282
+
283
+ if len(validate) > 0 and len(test) > 0:
284
+ return [
285
+ datasets.SplitGenerator(
286
+ name=datasets.Split.TRAIN,
287
+ # These kwargs will be passed to _generate_examples
288
+ gen_kwargs={
289
+ "data_dir": data_dir,
290
+ "id_range": train,
291
+ },
292
+ ),
293
+ datasets.SplitGenerator(
294
+ name=datasets.Split.VALIDATION,
295
+ # These kwargs will be passed to _generate_examples
296
+ gen_kwargs={
297
+ "data_dir": data_dir,
298
+ "id_range": validate,
299
+ },
300
+ ),
301
+ datasets.SplitGenerator(
302
+ name=datasets.Split.TEST,
303
+ # These kwargs will be passed to _generate_examples
304
+ gen_kwargs={
305
+ "data_dir": data_dir,
306
+ "id_range": test,
307
+ },
308
+ ),
309
+ ]
310
+ elif len(test) > 0:
311
+ return [
312
+ datasets.SplitGenerator(
313
+ name=datasets.Split.TRAIN,
314
+ # These kwargs will be passed to _generate_examples
315
+ gen_kwargs={
316
+ "data_dir": data_dir,
317
+ "id_range": train,
318
+ },
319
+ ),
320
+ datasets.SplitGenerator(
321
+ name=datasets.Split.TEST,
322
+ # These kwargs will be passed to _generate_examples
323
+ gen_kwargs={
324
+ "data_dir": data_dir,
325
+ "id_range": test,
326
+ },
327
+ ),
328
+ ]
329
+ elif len(validate) > 0:
330
+ return [
331
+ datasets.SplitGenerator(
332
+ name=datasets.Split.TRAIN,
333
+ # These kwargs will be passed to _generate_examples
334
+ gen_kwargs={
335
+ "data_dir": data_dir,
336
+ "id_range": train,
337
+ },
338
+ ),
339
+ datasets.SplitGenerator(
340
+ name=datasets.Split.VALIDATION,
341
+ # These kwargs will be passed to _generate_examples
342
+ gen_kwargs={
343
+ "data_dir": data_dir,
344
+ "id_range": validate,
345
+ },
346
+ ),
347
+ ]
348
+ else:
349
+ return [
350
+ datasets.SplitGenerator(
351
+ name=datasets.Split.TRAIN,
352
+ # These kwargs will be passed to _generate_examples
353
+ gen_kwargs={
354
+ "data_dir": data_dir,
355
+ "id_range": train,
356
+ },
357
+ ),
358
+ ]
359
+
360
+ def _get_essay(self, id: int, data_dir: Path):
361
+ return data_dir.joinpath(f"essay{str(id).rjust(3, '0')}.txt").read_text(), data_dir.joinpath(f"essay{str(id).rjust(3, '0')}.ann").read_text()
362
+
363
+ def _parse_raw_ann(self, raw_ann: str):
364
+ raw_anns = raw_ann.split("\n")
365
+ clean_anns = []
366
+ for cur_raw_ann in raw_anns:
367
+ matches = re.match(r".+\t(.+) (.+) (.+)\t(.+)", cur_raw_ann)
368
+ if matches is not None:
369
+ clean_anns.append(
370
+ (matches.group(1), int(matches.group(2)), int(matches.group(3)), matches.group(4))
371
+ )
372
+ # sorting spans by start before returningbefore returning
373
+ return sorted(clean_anns, key=lambda x: x[1])
374
+
375
+ def _tokenise(self, text, clean_anns):
376
+ # find spans
377
+ previous_end = 0
378
+ spans = []
379
+ # for every span, add the not span that is before it
380
+ for clean_ann in clean_anns:
381
+ spans.append(("O", text[previous_end:clean_ann[1]]))
382
+ spans.append((clean_ann[0], text[clean_ann[1]:clean_ann[2]]))
383
+ # if were picking up the wrong text...
384
+ if spans[-1][1] != clean_ann[3]:
385
+ print(spans[-1][1])
386
+ input(clean_ann[3])
387
+ previous_end = clean_ann[2]
388
+ # add whatever is left over to not spans
389
+ spans.append(("O", text[previous_end:]))
390
+
391
+ tokens = []
392
+ labels = []
393
+ # tokenise spans
394
+ for span in spans:
395
+ span_tokens = span[1].split()
396
+ label = span[0]
397
+ if self.config.name == "simple":
398
+ # with simple, the token is already correct
399
+ pass
400
+ elif self.config.name == "sep_tok":
401
+ # with sep_tok, the token is correct, but a sep top needs to be inserted
402
+ pass
403
+ # TODO: make sure to include the sep tok!!!
404
+ elif self.config.name == "spans":
405
+ if label != "O":
406
+ label = "I"
407
+ # TODO: make sure to iclude the B and the I
408
+ elif self.config.name == "full_labels":
409
+ # TODO: ensure I and B
410
+ pass
411
+ elif self.config.name == "sep_tok_full_labels":
412
+ # TODO: ensure I and B
413
+ # TODO: make sure to include the sep tok!!!
414
+ pass
415
+ labels.append([label] * len(span_tokens))
416
+ tokens.append(span_tokens)
417
+
418
+ # flatten list of lists of labels before return
419
+ labels = [ label for inner_list in labels for label in inner_list ]
420
+ return tokens, labels
421
+
422
+ def _process_essay(self, id, data_dir: Path):
423
+ # TODO: get the logic in here. everything else it taken care of i think
424
+ text, raw_ann = self._get_essay(id, data_dir)
425
+ clean_anns = self._parse_raw_ann(raw_ann)
426
+ tokens, labels = self._tokenise(text, clean_anns)
427
+
428
+
429
+ # id = self._get_id(essay)
430
+ # # input(id)
431
+ # tokens = self._get_tokens(essay)
432
+ # # input(tokens)
433
+ # label_dict = self._get_label_dict(essay)
434
+ # # input(label_dict)
435
+ # tokens, labels, begins, ends = self._match_tokens(tokens, label_dict)
436
+ # # input(tokens)
437
+ # # input(labels)
438
+ # text = self._get_text(essay)
439
+
440
+ # id = 1
441
+ # tokens = ["1"]
442
+ # labels = [1]
443
+ # text = "a"
444
+ # begins = [1]
445
+ # ends = [2]
446
+ return {
447
+ "id": id,
448
+ "tokens": tokens,
449
+ "ner_tags": labels,
450
+ "text": text,
451
+ "span_begins": [ann[1] for ann in clean_anns],
452
+ "span_ends": [ann[2] for ann in clean_anns],
453
+ }
454
+
455
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
456
+ def _generate_examples(self, data_dir: Path, id_range: list):
457
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
458
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
459
+
460
+ data_dir = data_dir.joinpath("ArgumentAnnotatedEssays-2.0", "brat-project-final")
461
+
462
+ for id in id_range:
463
+ # input(data[id])
464
+ yield id, self._process_essay(id, data_dir)