asahi417 commited on
Commit
ea2c2e9
·
1 Parent(s): c2f3058
Files changed (1) hide show
  1. experiments/model_finetuning_ner.py +24 -24
experiments/model_finetuning_ner.py CHANGED
@@ -102,30 +102,30 @@ def main(
102
 
103
  def tokenize_and_align_labels(examples):
104
  tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
105
- if tokenizer.is_fast:
106
- tokenized_inputs = tokenizer(
107
- tokens,
108
- truncation=True,
109
- is_split_into_words=True,
110
- padding="max_length",
111
- max_length=128)
112
- all_labels = examples["gold_label_sequence"]
113
- new_labels = []
114
- for ind, labels in enumerate(all_labels):
115
- word_ids = tokenized_inputs.word_ids(ind)
116
- new_labels.append(align_labels_with_tokens(labels, word_ids))
117
- tokenized_inputs["labels"] = new_labels
118
- return tokenized_inputs
119
- else:
120
- tokenized_inputs = tokenizer(
121
- tokens, )
122
- all_labels = examples["gold_label_sequence"]
123
- new_labels = []
124
- for ind, labels in enumerate(all_labels):
125
- word_ids = tokenized_inputs.word_ids(ind)
126
- new_labels.append(align_labels_with_tokens(labels, word_ids))
127
- tokenized_inputs["labels"] = new_labels
128
- return tokenized_inputs
129
 
130
 
131
 
 
102
 
103
  def tokenize_and_align_labels(examples):
104
  tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
105
+ # if tokenizer.is_fast:
106
+ tokenized_inputs = tokenizer(
107
+ tokens,
108
+ truncation=True,
109
+ is_split_into_words=True,
110
+ padding="max_length",
111
+ max_length=128)
112
+ all_labels = examples["gold_label_sequence"]
113
+ new_labels = []
114
+ for ind, labels in enumerate(all_labels):
115
+ word_ids = tokenized_inputs.word_ids(ind)
116
+ new_labels.append(align_labels_with_tokens(labels, word_ids))
117
+ tokenized_inputs["labels"] = new_labels
118
+ return tokenized_inputs
119
+ # else:
120
+ # tokenized_inputs = tokenizer(
121
+ # tokens)
122
+ # all_labels = examples["gold_label_sequence"]
123
+ # new_labels = []
124
+ # for ind, labels in enumerate(all_labels):
125
+ # word_ids = tokenized_inputs.word_ids(ind)
126
+ # new_labels.append(align_labels_with_tokens(labels, word_ids))
127
+ # tokenized_inputs["labels"] = new_labels
128
+ # return tokenized_inputs
129
 
130
 
131