init
Browse files- experiments/main.sh +4 -8
- experiments/model_finetuning_ner.py +12 -12
experiments/main.sh
CHANGED
@@ -4,10 +4,9 @@ MODEL="cardiffnlp/twitter-roberta-base-2021-124m"
|
|
4 |
MODEL="roberta-base"
|
5 |
# topic, ner, nerd[hw]
|
6 |
MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
|
7 |
-
|
8 |
-
# topic [hawk], ner, nerd[ukri]
|
9 |
MODEL="jhu-clsp/bernice"
|
10 |
-
# topic, ner [hawk]
|
11 |
MODEL="vinai/bertweet-base"
|
12 |
|
13 |
|
@@ -36,8 +35,10 @@ python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed1"
|
|
36 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed1"
|
37 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed1"
|
38 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed1"
|
|
|
39 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed2"
|
40 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed2"
|
|
|
41 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed2"
|
42 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed2"
|
43 |
|
@@ -49,14 +50,9 @@ python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed0"
|
|
49 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed0"
|
50 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random0_seed1"
|
51 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed1"
|
52 |
-
[ulri]
|
53 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed1"
|
54 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed1"
|
55 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random0_seed2"
|
56 |
-
[stone]
|
57 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed2"
|
58 |
-
rm -rf ckpt
|
59 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed2"
|
60 |
-
rm -rf ckpt
|
61 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2"
|
62 |
-
rm -rf ckpt
|
|
|
4 |
MODEL="roberta-base"
|
5 |
# topic, ner, nerd[hw]
|
6 |
MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
|
7 |
+
# topic, ner, nerd[ukri]
|
|
|
8 |
MODEL="jhu-clsp/bernice"
|
9 |
+
# topic, ner [hawk], nerd[hw]
|
10 |
MODEL="vinai/bertweet-base"
|
11 |
|
12 |
|
|
|
35 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed1"
|
36 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed1"
|
37 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed1"
|
38 |
+
|
39 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed2"
|
40 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed2"
|
41 |
+
|
42 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed2"
|
43 |
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed2"
|
44 |
|
|
|
50 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed0"
|
51 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random0_seed1"
|
52 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed1"
|
|
|
53 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed1"
|
54 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed1"
|
55 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random0_seed2"
|
|
|
56 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed2"
|
|
|
57 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed2"
|
|
|
58 |
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2"
|
|
experiments/model_finetuning_ner.py
CHANGED
@@ -110,18 +110,18 @@ def main(
|
|
110 |
max_length=128)
|
111 |
all_labels = examples["gold_label_sequence"]
|
112 |
new_labels = []
|
113 |
-
if tokenizer.is_fast:
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
else:
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
tokenized_inputs["labels"] = new_labels
|
126 |
return tokenized_inputs
|
127 |
|
|
|
110 |
max_length=128)
|
111 |
all_labels = examples["gold_label_sequence"]
|
112 |
new_labels = []
|
113 |
+
# if tokenizer.is_fast:
|
114 |
+
# for ind, labels in enumerate(all_labels):
|
115 |
+
# word_ids = tokenized_inputs.word_ids(ind)
|
116 |
+
# new_labels.append(align_labels_with_tokens(labels, word_ids))
|
117 |
+
# else:
|
118 |
+
for token, label in zip(tokens, all_labels):
|
119 |
+
tmp_labels = [-100]
|
120 |
+
for to, la in zip(token, label):
|
121 |
+
to_tokenized = tokenizer.tokenize(to)
|
122 |
+
tmp_labels += [la] * len(to_tokenized)
|
123 |
+
tmp_labels = tmp_labels + [-100] * (128 - len(tmp_labels))
|
124 |
+
new_labels.append(tmp_labels)
|
125 |
tokenized_inputs["labels"] = new_labels
|
126 |
return tokenized_inputs
|
127 |
|