elenanereiss commited on
Commit
8a2d3e4
·
1 Parent(s): 078193c

Create new file

Browse files
Files changed (1) hide show
  1. german-ler.py +185 -0
german-ler.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import datasets
18
+
19
+
20
+ _DESCRIPTION = """\
21
+ A dataset of Legal Documents from German federal court decisions for Named Entity Recognition. The dataset is human-annotated with 19 fine-grained entity classes. The dataset consists of approx. 67,000 sentences and contains 54,000 annotated entities.
22
+ """
23
+
24
+ _HOMEPAGE_URL = "https://github.com/elenanereiss/Legal-Entity-Recognition"
25
+ _CITATION = """\
26
+ @inproceedings{leitner2019fine,
27
+ author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},
28
+ title = {{Fine-grained Named Entity Recognition in Legal Documents}},
29
+ booktitle = {Semantic Systems. The Power of AI and Knowledge
30
+ Graphs. Proceedings of the 15th International Conference
31
+ (SEMANTiCS 2019)},
32
+ year = 2019,
33
+ editor = {Maribel Acosta and Philippe Cudré-Mauroux and Maria
34
+ Maleshkova and Tassilo Pellegrini and Harald Sack and York
35
+ Sure-Vetter},
36
+ keywords = {aip},
37
+ publisher = {Springer},
38
+ series = {Lecture Notes in Computer Science},
39
+ number = {11702},
40
+ address = {Karlsruhe, Germany},
41
+ month = 9,
42
+ note = {10/11 September 2019},
43
+ pages = {272--287},
44
+ pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}
45
+ }
46
+ """
47
+ _URL = {
48
+ "train": "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/ler_train.conll",
49
+ "dev": "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/ler_dev.conll",
50
+ "test": "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/ler_test.conll",
51
+ }
52
+ _VERSION = "1.0.0"
53
+
54
+
55
+ class German_LER(datasets.GeneratorBasedBuilder):
56
+ VERSION = datasets.Version(_VERSION)
57
+
58
+ def _info(self):
59
+ return datasets.DatasetInfo(
60
+ description=_DESCRIPTION,
61
+ features=datasets.Features(
62
+ {
63
+ "id": datasets.Value("string"),
64
+ "tokens": datasets.Sequence(datasets.Value("string")),
65
+ "ner_tags": datasets.Sequence(
66
+ datasets.features.ClassLabel(
67
+ names=[
68
+ "B-AN",
69
+ "B-EUN",
70
+ "B-GRT",
71
+ "B-GS",
72
+ "B-INN",
73
+ "B-LD",
74
+ "B-LDS",
75
+ "B-LIT",
76
+ "B-MRK",
77
+ "B-ORG",
78
+ "B-PER",
79
+ "B-RR",
80
+ "B-RS",
81
+ "B-ST",
82
+ "B-STR",
83
+ "B-UN",
84
+ "B-VO",
85
+ "B-VS",
86
+ "B-VT",
87
+ "I-AN",
88
+ "I-EUN",
89
+ "I-GRT",
90
+ "I-GS",
91
+ "I-INN",
92
+ "I-LD",
93
+ "I-LDS",
94
+ "I-LIT",
95
+ "I-MRK",
96
+ "I-ORG",
97
+ "I-PER",
98
+ "I-RR",
99
+ "I-RS",
100
+ "I-ST",
101
+ "I-STR",
102
+ "I-UN",
103
+ "I-VO",
104
+ "I-VS",
105
+ "I-VT",
106
+ "O",
107
+ ]
108
+ )
109
+ ),
110
+ },
111
+ ),
112
+ supervised_keys=None,
113
+ homepage=_HOMEPAGE_URL,
114
+ citation=_CITATION,
115
+ )
116
+
117
+
118
+ def _split_generators(self, dl_manager):
119
+ """Returns SplitGenerators."""
120
+
121
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
122
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
+ data_dir = dl_manager.download_and_extract(_URL)
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={"datapath": data_dir["train"], "split": "train"},
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TEST,
133
+ # These kwargs will be passed to _generate_examples
134
+ gen_kwargs={"datapath": data_dir["test"], "split": "test"},
135
+ ),
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.VALIDATION,
138
+ # These kwargs will be passed to _generate_examples
139
+ gen_kwargs={"datapath": data_dir["dev"], "split": "dev"},
140
+ ),
141
+ ]
142
+
143
+
144
+ def _generate_examples(self, datapath):
145
+ sentence_counter = 0
146
+ for filepath in self.config.filepaths:
147
+ filepath = os.path.join(datapath, filepath)
148
+ with open(filepath, encoding="utf-8") as f:
149
+ current_words = []
150
+ current_labels = []
151
+ for row in f:
152
+ row = row.rstrip()
153
+ row_split = row.split()
154
+ if len(row_split) == 2:
155
+ token, label = row_split
156
+ current_words.append(token)
157
+ current_labels.append(label)
158
+ else:
159
+ if not current_words:
160
+ continue
161
+ assert len(current_words) == len(current_labels), "word len doesnt match label length"
162
+ sentence = (
163
+ sentence_counter,
164
+ {
165
+ "id": str(sentence_counter),
166
+ "tokens": current_words,
167
+ "ner_tags": current_labels,
168
+ },
169
+ )
170
+ sentence_counter += 1
171
+ current_words = []
172
+ current_labels = []
173
+ yield sentence
174
+
175
+ # if something remains:
176
+ if current_words:
177
+ sentence = (
178
+ sentence_counter,
179
+ {
180
+ "id": str(sentence_counter),
181
+ "tokens": current_words,
182
+ "ner_tags": current_labels,
183
+ },
184
+ )
185
+ yield sentence