init
Browse files- experiments/main.sh +34 -53
- experiments/model_finetuning_ner.py +13 -17
experiments/main.sh
CHANGED
@@ -1,56 +1,40 @@
|
|
1 |
-
# topic
|
2 |
-
MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
|
3 |
-
# topic [hawk]
|
4 |
MODEL="cardiffnlp/twitter-roberta-base-2021-124m"
|
5 |
-
# topic
|
|
|
|
|
|
|
|
|
6 |
MODEL="jhu-clsp/bernice"
|
7 |
-
# topic [
|
8 |
MODEL="vinai/bertweet-base"
|
9 |
-
# topic [hawk], ner [stone]
|
10 |
-
MODEL="roberta-base"
|
11 |
|
12 |
|
13 |
# NER
|
14 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "ner_temporal"
|
15 |
-
|
16 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "
|
17 |
-
|
18 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "
|
19 |
-
|
20 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "
|
21 |
-
|
22 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "
|
23 |
-
|
24 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "
|
25 |
-
|
26 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "
|
27 |
-
rm -rf ckpt
|
28 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed1"
|
29 |
-
rm -rf ckpt
|
30 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed1"
|
31 |
-
rm -rf ckpt
|
32 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed2"
|
33 |
-
rm -rf ckpt
|
34 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed2"
|
35 |
-
rm -rf ckpt
|
36 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed2"
|
37 |
-
rm -rf ckpt
|
38 |
-
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed2"
|
39 |
-
rm -rf ckpt
|
40 |
|
41 |
# NERD
|
42 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_temporal"
|
43 |
-
|
44 |
-
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random0_seed0" --skip-train --skip-test
|
45 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random1_seed0"
|
46 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random2_seed0"
|
47 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random3_seed0"
|
48 |
-
|
49 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random0_seed1"
|
50 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random1_seed1"
|
51 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random2_seed1"
|
52 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random3_seed1"
|
53 |
-
|
54 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random0_seed2"
|
55 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random1_seed2"
|
56 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random2_seed2"
|
@@ -58,19 +42,16 @@ python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random3_seed2"
|
|
58 |
|
59 |
|
60 |
# TOPIC
|
61 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "topic_temporal"
|
62 |
-
|
63 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
64 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
65 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
66 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
67 |
-
|
68 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
69 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
70 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
71 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
72 |
-
|
73 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "
|
74 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed2"
|
75 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed2"
|
76 |
-
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2"
|
|
|
1 |
+
# topic, ner
|
|
|
|
|
2 |
MODEL="cardiffnlp/twitter-roberta-base-2021-124m"
|
3 |
+
# topic, ner
|
4 |
+
MODEL="roberta-base"
|
5 |
+
# topic, ner [ukri]
|
6 |
+
MODEL="cardiffnlp/twitter-roberta-base-2022-154m"
|
7 |
+
# topic [hawk], ner [ukri]
|
8 |
MODEL="jhu-clsp/bernice"
|
9 |
+
# topic, ner [as it's not fasttokenizer, a bit tricky...]
|
10 |
MODEL="vinai/bertweet-base"
|
|
|
|
|
11 |
|
12 |
|
13 |
# NER
|
14 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_temporal" --skip-train --skip-test
|
15 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed0" --skip-train --skip-test
|
16 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed0" --skip-train --skip-test
|
17 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed0" --skip-train --skip-test
|
18 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed0" --skip-train --skip-test
|
19 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed1" --skip-train --skip-test
|
20 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed1" --skip-train --skip-test
|
21 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed1" --skip-train --skip-test
|
22 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed1" --skip-train --skip-test
|
23 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed2" --skip-train --skip-test
|
24 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed2" --skip-train --skip-test
|
25 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed2" --skip-train --skip-test
|
26 |
+
python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed2" --skip-train --skip-test
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
# NERD
|
29 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_temporal"
|
30 |
+
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random0_seed0"
|
|
|
31 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random1_seed0"
|
32 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random2_seed0"
|
33 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random3_seed0"
|
|
|
34 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random0_seed1"
|
35 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random1_seed1"
|
36 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random2_seed1"
|
37 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random3_seed1"
|
|
|
38 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random0_seed2"
|
39 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random1_seed2"
|
40 |
python model_finetuning_nerd.py -m "${MODEL}" -d "nerd_random2_seed2"
|
|
|
42 |
|
43 |
|
44 |
# TOPIC
|
45 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_temporal" --skip-train --skip-test
|
46 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random0_seed0" --skip-train --skip-test
|
47 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed0" --skip-train --skip-test
|
48 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed0" --skip-train --skip-test
|
49 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed0" --skip-train --skip-test
|
50 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random0_seed1" --skip-train --skip-test
|
51 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed1" --skip-train --skip-test
|
52 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed1" --skip-train --skip-test
|
53 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed1" --skip-train --skip-test
|
54 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random0_seed2" --skip-train --skip-test
|
55 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random1_seed2" --skip-train --skip-test
|
56 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random2_seed2" --skip-train --skip-test
|
57 |
+
python model_finetuning_topic.py -m "${MODEL}" -d "topic_random3_seed2" --skip-train --skip-test
|
|
|
|
|
|
experiments/model_finetuning_ner.py
CHANGED
@@ -17,7 +17,7 @@ from glob import glob
|
|
17 |
import numpy as np
|
18 |
import evaluate
|
19 |
from datasets import load_dataset
|
20 |
-
from transformers import
|
21 |
from huggingface_hub import Repository
|
22 |
|
23 |
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
|
@@ -102,7 +102,6 @@ def main(
|
|
102 |
|
103 |
def tokenize_and_align_labels(examples):
|
104 |
tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
|
105 |
-
# if tokenizer.is_fast:
|
106 |
tokenized_inputs = tokenizer(
|
107 |
tokens,
|
108 |
truncation=True,
|
@@ -111,23 +110,20 @@ def main(
|
|
111 |
max_length=128)
|
112 |
all_labels = examples["gold_label_sequence"]
|
113 |
new_labels = []
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
tokenized_inputs["labels"] = new_labels
|
118 |
return tokenized_inputs
|
119 |
-
# else:
|
120 |
-
# tokenized_inputs = tokenizer(
|
121 |
-
# tokens)
|
122 |
-
# all_labels = examples["gold_label_sequence"]
|
123 |
-
# new_labels = []
|
124 |
-
# for ind, labels in enumerate(all_labels):
|
125 |
-
# word_ids = tokenized_inputs.word_ids(ind)
|
126 |
-
# new_labels.append(align_labels_with_tokens(labels, word_ids))
|
127 |
-
# tokenized_inputs["labels"] = new_labels
|
128 |
-
# return tokenized_inputs
|
129 |
-
|
130 |
-
|
131 |
|
132 |
dataset = load_dataset(dataset, dataset_type)
|
133 |
tokenized_datasets = dataset.map(lambda x: tokenize_and_align_labels(x), batched=True)
|
|
|
17 |
import numpy as np
|
18 |
import evaluate
|
19 |
from datasets import load_dataset
|
20 |
+
from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer
|
21 |
from huggingface_hub import Repository
|
22 |
|
23 |
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
|
|
|
102 |
|
103 |
def tokenize_and_align_labels(examples):
|
104 |
tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
|
|
|
105 |
tokenized_inputs = tokenizer(
|
106 |
tokens,
|
107 |
truncation=True,
|
|
|
110 |
max_length=128)
|
111 |
all_labels = examples["gold_label_sequence"]
|
112 |
new_labels = []
|
113 |
+
if tokenizer.is_fast:
|
114 |
+
for ind, labels in enumerate(all_labels):
|
115 |
+
word_ids = tokenized_inputs.word_ids(ind)
|
116 |
+
new_labels.append(align_labels_with_tokens(labels, word_ids))
|
117 |
+
else:
|
118 |
+
for token, label in zip(tokens, all_labels):
|
119 |
+
tmp_labels = [-100]
|
120 |
+
for to, la in enumerate(token, label):
|
121 |
+
to_tokenized = tokenizer.tokenize(to)
|
122 |
+
tmp_labels += [la] * len(to_tokenized)
|
123 |
+
tmp_labels = tmp_labels + [-100] * (128 - len(tmp_labels))
|
124 |
+
new_labels.append(tmp_labels)
|
125 |
tokenized_inputs["labels"] = new_labels
|
126 |
return tokenized_inputs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
dataset = load_dataset(dataset, dataset_type)
|
129 |
tokenized_datasets = dataset.map(lambda x: tokenize_and_align_labels(x), batched=True)
|