init
Browse files
experiments/model_finetuning_ner.py
CHANGED
@@ -102,30 +102,30 @@ def main(
|
|
102 |
|
103 |
def tokenize_and_align_labels(examples):
|
104 |
tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
|
105 |
-
if tokenizer.is_fast:
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
else:
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
|
130 |
|
131 |
|
|
|
102 |
|
103 |
def tokenize_and_align_labels(examples):
|
104 |
tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
|
105 |
+
# if tokenizer.is_fast:
|
106 |
+
tokenized_inputs = tokenizer(
|
107 |
+
tokens,
|
108 |
+
truncation=True,
|
109 |
+
is_split_into_words=True,
|
110 |
+
padding="max_length",
|
111 |
+
max_length=128)
|
112 |
+
all_labels = examples["gold_label_sequence"]
|
113 |
+
new_labels = []
|
114 |
+
for ind, labels in enumerate(all_labels):
|
115 |
+
word_ids = tokenized_inputs.word_ids(ind)
|
116 |
+
new_labels.append(align_labels_with_tokens(labels, word_ids))
|
117 |
+
tokenized_inputs["labels"] = new_labels
|
118 |
+
return tokenized_inputs
|
119 |
+
# else:
|
120 |
+
# tokenized_inputs = tokenizer(
|
121 |
+
# tokens)
|
122 |
+
# all_labels = examples["gold_label_sequence"]
|
123 |
+
# new_labels = []
|
124 |
+
# for ind, labels in enumerate(all_labels):
|
125 |
+
# word_ids = tokenized_inputs.word_ids(ind)
|
126 |
+
# new_labels.append(align_labels_with_tokens(labels, word_ids))
|
127 |
+
# tokenized_inputs["labels"] = new_labels
|
128 |
+
# return tokenized_inputs
|
129 |
|
130 |
|
131 |
|