changed tokenization
Browse files- ref_seg_ger.py +3 -1
ref_seg_ger.py
CHANGED
@@ -57,7 +57,7 @@ _URLS = {
|
|
57 |
|
58 |
_LABELS = [
|
59 |
'publisher', 'source', 'url', 'other', 'author', 'editor', 'lpage',
|
60 |
-
'volume', 'year', 'issue', 'title', 'fpage', '
|
61 |
]
|
62 |
|
63 |
_FEATURES = datasets.Features(
|
@@ -229,6 +229,8 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
229 |
|
230 |
# tokenized_input = row['token'].split(' ')
|
231 |
tkn = self.TOKENIZER.pre_tokenize_str(row['token'])
|
|
|
|
|
232 |
if not tkn:
|
233 |
continue
|
234 |
tokenized_input, offsets = zip(*tkn)
|
|
|
57 |
|
58 |
_LABELS = [
|
59 |
'publisher', 'source', 'url', 'other', 'author', 'editor', 'lpage',
|
60 |
+
'volume', 'year', 'issue', 'title', 'fpage', 'edition'
|
61 |
]
|
62 |
|
63 |
_FEATURES = datasets.Features(
|
|
|
229 |
|
230 |
# tokenized_input = row['token'].split(' ')
|
231 |
tkn = self.TOKENIZER.pre_tokenize_str(row['token'])
|
232 |
+
if row['label'] == 'identfier':
|
233 |
+
row['label'] = 'other'
|
234 |
if not tkn:
|
235 |
continue
|
236 |
tokenized_input, offsets = zip(*tkn)
|