elenanereiss commited on
Commit
f6e2638
·
1 Parent(s): dc55e3c

Update german-ler.py

Browse files
Files changed (1) hide show
  1. german-ler.py +39 -2
german-ler.py CHANGED
@@ -105,6 +105,27 @@ class German_LER(datasets.GeneratorBasedBuilder):
105
  ]
106
  )
107
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  },
109
  ),
110
  supervised_keys=None,
@@ -138,12 +159,23 @@ class German_LER(datasets.GeneratorBasedBuilder):
138
  ),
139
  ]
140
 
141
-
 
 
 
 
 
 
 
 
 
 
142
  def _generate_examples(self, datapath, split):
143
  sentence_counter = 0
144
  with open(datapath, encoding="utf-8") as f:
145
  current_words = []
146
  current_labels = []
 
147
  for row in f:
148
  row = row.rstrip()
149
  row_split = row.split()
@@ -151,24 +183,28 @@ class German_LER(datasets.GeneratorBasedBuilder):
151
  token, label = row_split
152
  current_words.append(token)
153
  current_labels.append(label)
 
154
  else:
155
  if not current_words:
156
  continue
157
  assert len(current_words) == len(current_labels), "word len doesnt match label length"
 
158
  sentence = (
159
  sentence_counter,
160
  {
161
  "id": str(sentence_counter),
162
  "tokens": current_words,
163
  "ner_tags": current_labels,
 
164
  },
165
  )
166
  sentence_counter += 1
167
  current_words = []
168
  current_labels = []
 
169
  yield sentence
170
 
171
- # if something remains:
172
  if current_words:
173
  sentence = (
174
  sentence_counter,
@@ -176,6 +212,7 @@ class German_LER(datasets.GeneratorBasedBuilder):
176
  "id": str(sentence_counter),
177
  "tokens": current_words,
178
  "ner_tags": current_labels,
 
179
  },
180
  )
181
  yield sentence
 
105
  ]
106
  )
107
  ),
108
+ "ner_coarse_tags": datasets.Sequence(
109
+ datasets.features.ClassLabel(
110
+ names=[
111
+ "B-LIT",
112
+ "B-LOC",
113
+ "B-NRM",
114
+ "B-ORG",
115
+ "B-PER",
116
+ "B-REG",
117
+ "B-RS",
118
+ "I-LIT",
119
+ "I-LOC",
120
+ "I-NRM",
121
+ "I-ORG",
122
+ "I-PER",
123
+ "I-REG",
124
+ "I-RS",
125
+ "O",
126
+ ]
127
+ )
128
+ ),
129
  },
130
  ),
131
  supervised_keys=None,
 
159
  ),
160
  ]
161
 
162
+ def generate_coarse_tags(label):
163
+ if label == 'O': return label
164
+
165
+ bio, fine_tag = label.split("-")
166
+ if fine_tag in ['PER', 'RR', 'AN']: return bio + '-PER'
167
+ elif fine_tag in ['LD', 'ST', 'STR', 'LDS']: return bio + '-LOC'
168
+ elif fine_tag in ['ORG', 'UN', 'INN', 'GRT', 'MRK']: return bio + '-ORG'
169
+ elif fine_tag in ['GS', 'VO', 'EUN']: return bio + '-NRM'
170
+ elif fine_tag in ['VS', 'VT']: return bio + '-REG'
171
+ else: return label
172
+
173
  def _generate_examples(self, datapath, split):
174
  sentence_counter = 0
175
  with open(datapath, encoding="utf-8") as f:
176
  current_words = []
177
  current_labels = []
178
+ current_coarse_labels = []
179
  for row in f:
180
  row = row.rstrip()
181
  row_split = row.split()
 
183
  token, label = row_split
184
  current_words.append(token)
185
  current_labels.append(label)
186
+ current_coarse_labels.append(generate_coarse_tags(label))
187
  else:
188
  if not current_words:
189
  continue
190
  assert len(current_words) == len(current_labels), "word len doesnt match label length"
191
+ assert len(current_words) == len(current_coarse_labels), "word len doesnt match coarse label length"
192
  sentence = (
193
  sentence_counter,
194
  {
195
  "id": str(sentence_counter),
196
  "tokens": current_words,
197
  "ner_tags": current_labels,
198
+ "ner_coarse_tags": current_coarse_labels,
199
  },
200
  )
201
  sentence_counter += 1
202
  current_words = []
203
  current_labels = []
204
+ current_coarse_labels = []
205
  yield sentence
206
 
207
+ # last sentence
208
  if current_words:
209
  sentence = (
210
  sentence_counter,
 
212
  "id": str(sentence_counter),
213
  "tokens": current_words,
214
  "ner_tags": current_labels,
215
+ "ner_coarse_tags": current_coarse_labels,
216
  },
217
  )
218
  yield sentence