init
Browse files
experiments/model_finetuning_nerd.py
CHANGED
@@ -1,20 +1,23 @@
|
|
1 |
-
"""
|
2 |
-
|
3 |
-
|
4 |
-
python
|
5 |
-
|
6 |
-
python
|
7 |
-
python
|
8 |
-
|
9 |
-
python
|
10 |
-
|
11 |
-
python
|
12 |
-
python
|
13 |
-
|
14 |
-
python
|
15 |
-
|
16 |
-
python
|
17 |
-
python
|
|
|
|
|
|
|
18 |
"""
|
19 |
import argparse
|
20 |
import json
|
@@ -26,34 +29,12 @@ from shutil import copyfile
|
|
26 |
from glob import glob
|
27 |
|
28 |
import numpy as np
|
29 |
-
|
|
|
30 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
|
31 |
from huggingface_hub import Repository
|
32 |
|
33 |
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
|
34 |
-
|
35 |
-
LABEL2ID = {
|
36 |
-
"arts_&_culture": 0,
|
37 |
-
"business_&_entrepreneurs": 1,
|
38 |
-
"celebrity_&_pop_culture": 2,
|
39 |
-
"diaries_&_daily_life": 3,
|
40 |
-
"family": 4,
|
41 |
-
"fashion_&_style": 5,
|
42 |
-
"film_tv_&_video": 6,
|
43 |
-
"fitness_&_health": 7,
|
44 |
-
"food_&_dining": 8,
|
45 |
-
"gaming": 9,
|
46 |
-
"learning_&_educational": 10,
|
47 |
-
"music": 11,
|
48 |
-
"news_&_social_concern": 12,
|
49 |
-
"other_hobbies": 13,
|
50 |
-
"relationships": 14,
|
51 |
-
"science_&_technology": 15,
|
52 |
-
"sports": 16,
|
53 |
-
"travel_&_adventure": 17,
|
54 |
-
"youth_&_student_life": 18
|
55 |
-
}
|
56 |
-
ID2LABEL = {v: k for k, v in LABEL2ID.items()}
|
57 |
EVAL_STEP = 500
|
58 |
RANDOM_SEED = 42
|
59 |
N_TRIALS = 10
|
@@ -65,45 +46,49 @@ def sigmoid(x):
|
|
65 |
|
66 |
def main(
|
67 |
dataset: str = "tweettemposhift/tweet_temporal_shift",
|
68 |
-
dataset_type: str = "
|
69 |
model: str = "roberta-base",
|
70 |
skip_train: bool = False,
|
71 |
skip_test: bool = False,
|
72 |
skip_upload: bool = False):
|
73 |
|
|
|
|
|
|
|
|
|
74 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
75 |
-
model = AutoModelForSequenceClassification.from_pretrained(
|
76 |
-
model, id2label=ID2LABEL, label2id=LABEL2ID, num_labels=len(LABEL2ID), problem_type="multi_label_classification"
|
77 |
-
)
|
78 |
dataset = load_dataset(dataset, dataset_type)
|
79 |
tokenized_datasets = dataset.map(
|
80 |
-
lambda x: tokenizer(
|
|
|
|
|
|
|
|
|
|
|
81 |
)
|
82 |
-
|
83 |
-
|
|
|
84 |
|
85 |
def compute_metric_search(eval_pred):
|
86 |
logits, labels = eval_pred
|
87 |
-
predictions = np.
|
88 |
-
return
|
89 |
|
90 |
def compute_metric_all(eval_pred):
|
91 |
logits, labels = eval_pred
|
92 |
-
predictions = np.
|
93 |
return {
|
94 |
"f1": metric_f1.compute(predictions=predictions, references=labels, average="micro")["f1"],
|
95 |
"f1_macro": metric_f1.compute(predictions=predictions, references=labels, average="macro")["f1"],
|
96 |
"accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"]
|
97 |
}
|
98 |
|
99 |
-
model_alias = f"topic-{dataset_type}-{os.path.basename(model)}"
|
100 |
-
output_dir = f"ckpt/{model_alias}"
|
101 |
-
best_model_path = pj(output_dir, "best_model")
|
102 |
|
103 |
if not skip_train:
|
104 |
logging.info("training model")
|
105 |
trainer = Trainer(
|
106 |
-
model=model,
|
107 |
args=TrainingArguments(
|
108 |
output_dir=output_dir,
|
109 |
evaluation_strategy="steps",
|
@@ -114,11 +99,7 @@ def main(
|
|
114 |
eval_dataset=tokenized_datasets["validation"],
|
115 |
compute_metrics=compute_metric_search,
|
116 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
117 |
-
model,
|
118 |
-
return_dict=True,
|
119 |
-
num_labels=len(LABEL2ID),
|
120 |
-
id2label=ID2LABEL,
|
121 |
-
label2id=LABEL2ID
|
122 |
)
|
123 |
)
|
124 |
|
@@ -129,10 +110,9 @@ def main(
|
|
129 |
"per_device_train_batch_size", [8, 16, 32]
|
130 |
),
|
131 |
},
|
132 |
-
local_dir="./hyperparameter_search_cache",
|
133 |
direction="maximize",
|
134 |
backend="optuna",
|
135 |
-
|
136 |
)
|
137 |
for n, v in best_run.hyperparameters.items():
|
138 |
setattr(trainer.args, n, v)
|
@@ -151,15 +131,8 @@ def main(
|
|
151 |
else:
|
152 |
metric = {}
|
153 |
for single_test in test_split:
|
154 |
-
model = AutoModelForSequenceClassification.from_pretrained(
|
155 |
-
best_model_path,
|
156 |
-
num_labels=len(LABEL2ID),
|
157 |
-
problem_type="multi_label_classification",
|
158 |
-
id2label=ID2LABEL,
|
159 |
-
label2id=LABEL2ID
|
160 |
-
)
|
161 |
trainer = Trainer(
|
162 |
-
model=
|
163 |
args=TrainingArguments(
|
164 |
output_dir=output_dir,
|
165 |
evaluation_strategy="no",
|
@@ -177,15 +150,9 @@ def main(
|
|
177 |
if not skip_upload:
|
178 |
logging.info("uploading to huggingface")
|
179 |
model_organization = "tweettemposhift"
|
180 |
-
|
181 |
-
best_model_path,
|
182 |
-
num_labels=len(LABEL2ID),
|
183 |
-
problem_type="multi_label_classification",
|
184 |
-
id2label=ID2LABEL,
|
185 |
-
label2id=LABEL2ID
|
186 |
-
)
|
187 |
tokenizer = AutoTokenizer.from_pretrained(best_model_path)
|
188 |
-
|
189 |
tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
|
190 |
repo = Repository(model_alias, f"{model_organization}/{model_alias}")
|
191 |
for i in glob(f"{best_model_path}/*"):
|
@@ -197,7 +164,7 @@ def main(
|
|
197 |
if __name__ == "__main__":
|
198 |
parser = argparse.ArgumentParser(description="Fine-tuning language model.")
|
199 |
parser.add_argument("-m", "--model", help="transformer LM", default="roberta-base", type=str)
|
200 |
-
parser.add_argument("-d", "--dataset-type", help='dataset type', default="
|
201 |
parser.add_argument("--skip-train", action="store_true")
|
202 |
parser.add_argument("--skip-test", action="store_true")
|
203 |
parser.add_argument("--skip-upload", action="store_true")
|
|
|
1 |
+
"""Experiment.
|
2 |
+
|
3 |
+
```
|
4 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_temporal"
|
5 |
+
|
6 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random0_seed0"
|
7 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random1_seed0"
|
8 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random2_seed0"
|
9 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random3_seed0"
|
10 |
+
|
11 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random0_seed1"
|
12 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random1_seed1"
|
13 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random2_seed1"
|
14 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random3_seed1"
|
15 |
+
|
16 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random0_seed2"
|
17 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random1_seed2"
|
18 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random2_seed2"
|
19 |
+
python model_finetuning_nerd.py -m "roberta-base" -d "nerd_random3_seed2"
|
20 |
+
```
|
21 |
"""
|
22 |
import argparse
|
23 |
import json
|
|
|
29 |
from glob import glob
|
30 |
|
31 |
import numpy as np
|
32 |
+
import evaluate
|
33 |
+
from datasets import load_dataset
|
34 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
|
35 |
from huggingface_hub import Repository
|
36 |
|
37 |
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
EVAL_STEP = 500
|
39 |
RANDOM_SEED = 42
|
40 |
N_TRIALS = 10
|
|
|
46 |
|
47 |
def main(
|
48 |
dataset: str = "tweettemposhift/tweet_temporal_shift",
|
49 |
+
dataset_type: str = "nerd_temporal",
|
50 |
model: str = "roberta-base",
|
51 |
skip_train: bool = False,
|
52 |
skip_test: bool = False,
|
53 |
skip_upload: bool = False):
|
54 |
|
55 |
+
model_alias = f"nerd-{dataset_type}-{os.path.basename(model)}"
|
56 |
+
output_dir = f"ckpt/{model_alias}"
|
57 |
+
best_model_path = pj(output_dir, "best_model")
|
58 |
+
|
59 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
|
|
|
|
|
|
60 |
dataset = load_dataset(dataset, dataset_type)
|
61 |
tokenized_datasets = dataset.map(
|
62 |
+
lambda x: tokenizer(
|
63 |
+
[f"[target] {a}, [definition] {b}, [text] {c}" for a, b, c in zip(x["target"], x["definition"], x["text"])],
|
64 |
+
padding="max_length",
|
65 |
+
truncation=True,
|
66 |
+
max_length=256),
|
67 |
+
batched=True
|
68 |
)
|
69 |
+
tokenized_datasets = tokenized_datasets.rename_column("gold_label_binary", "label")
|
70 |
+
metric_accuracy = evaluate.load("accuracy")
|
71 |
+
metric_f1 = evaluate.load("f1")
|
72 |
|
73 |
def compute_metric_search(eval_pred):
|
74 |
logits, labels = eval_pred
|
75 |
+
predictions = np.argmax(logits, axis=-1)
|
76 |
+
return metric_accuracy.compute(predictions=predictions, references=labels, average="micro")
|
77 |
|
78 |
def compute_metric_all(eval_pred):
|
79 |
logits, labels = eval_pred
|
80 |
+
predictions = np.argmax(logits, axis=-1)
|
81 |
return {
|
82 |
"f1": metric_f1.compute(predictions=predictions, references=labels, average="micro")["f1"],
|
83 |
"f1_macro": metric_f1.compute(predictions=predictions, references=labels, average="macro")["f1"],
|
84 |
"accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"]
|
85 |
}
|
86 |
|
|
|
|
|
|
|
87 |
|
88 |
if not skip_train:
|
89 |
logging.info("training model")
|
90 |
trainer = Trainer(
|
91 |
+
model=AutoModelForSequenceClassification.from_pretrained(model, num_labels=2),
|
92 |
args=TrainingArguments(
|
93 |
output_dir=output_dir,
|
94 |
evaluation_strategy="steps",
|
|
|
99 |
eval_dataset=tokenized_datasets["validation"],
|
100 |
compute_metrics=compute_metric_search,
|
101 |
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
|
102 |
+
model, return_dict=True, num_labels=2,
|
|
|
|
|
|
|
|
|
103 |
)
|
104 |
)
|
105 |
|
|
|
110 |
"per_device_train_batch_size", [8, 16, 32]
|
111 |
),
|
112 |
},
|
|
|
113 |
direction="maximize",
|
114 |
backend="optuna",
|
115 |
+
n_trials=N_TRIALS
|
116 |
)
|
117 |
for n, v in best_run.hyperparameters.items():
|
118 |
setattr(trainer.args, n, v)
|
|
|
131 |
else:
|
132 |
metric = {}
|
133 |
for single_test in test_split:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
trainer = Trainer(
|
135 |
+
model=AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=2),
|
136 |
args=TrainingArguments(
|
137 |
output_dir=output_dir,
|
138 |
evaluation_strategy="no",
|
|
|
150 |
if not skip_upload:
|
151 |
logging.info("uploading to huggingface")
|
152 |
model_organization = "tweettemposhift"
|
153 |
+
model_instance = AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=2)
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
tokenizer = AutoTokenizer.from_pretrained(best_model_path)
|
155 |
+
model_instance.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
|
156 |
tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
|
157 |
repo = Repository(model_alias, f"{model_organization}/{model_alias}")
|
158 |
for i in glob(f"{best_model_path}/*"):
|
|
|
164 |
if __name__ == "__main__":
|
165 |
parser = argparse.ArgumentParser(description="Fine-tuning language model.")
|
166 |
parser.add_argument("-m", "--model", help="transformer LM", default="roberta-base", type=str)
|
167 |
+
parser.add_argument("-d", "--dataset-type", help='dataset type', default="nerd_temporal", type=str)
|
168 |
parser.add_argument("--skip-train", action="store_true")
|
169 |
parser.add_argument("--skip-test", action="store_true")
|
170 |
parser.add_argument("--skip-upload", action="store_true")
|
experiments/model_finetuning_topic.py
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
-
"""
|
|
|
|
|
2 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_temporal"
|
3 |
|
4 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed0"
|
@@ -15,6 +17,7 @@ python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed2"
|
|
15 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed2"
|
16 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed2"
|
17 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed2"
|
|
|
18 |
"""
|
19 |
import argparse
|
20 |
import json
|
@@ -88,12 +91,12 @@ def main(
|
|
88 |
|
89 |
def compute_metric_search(eval_pred):
|
90 |
logits, labels = eval_pred
|
91 |
-
predictions = np.array([[int(sigmoid(j) > 0.5) for j in
|
92 |
return metric_f1.compute(predictions=predictions, references=labels, average="micro")
|
93 |
|
94 |
def compute_metric_all(eval_pred):
|
95 |
logits, labels = eval_pred
|
96 |
-
predictions = np.array([[int(sigmoid(j) > 0.5) for j in
|
97 |
return {
|
98 |
"f1": metric_f1.compute(predictions=predictions, references=labels, average="micro")["f1"],
|
99 |
"f1_macro": metric_f1.compute(predictions=predictions, references=labels, average="macro")["f1"],
|
|
|
1 |
+
"""Experiment.
|
2 |
+
|
3 |
+
```
|
4 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_temporal"
|
5 |
|
6 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed0"
|
|
|
17 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed2"
|
18 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed2"
|
19 |
python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed2"
|
20 |
+
```
|
21 |
"""
|
22 |
import argparse
|
23 |
import json
|
|
|
91 |
|
92 |
def compute_metric_search(eval_pred):
|
93 |
logits, labels = eval_pred
|
94 |
+
predictions = np.array([[int(sigmoid(j) > 0.5) for j in lo] for lo in logits])
|
95 |
return metric_f1.compute(predictions=predictions, references=labels, average="micro")
|
96 |
|
97 |
def compute_metric_all(eval_pred):
|
98 |
logits, labels = eval_pred
|
99 |
+
predictions = np.array([[int(sigmoid(j) > 0.5) for j in lo] for lo in logits])
|
100 |
return {
|
101 |
"f1": metric_f1.compute(predictions=predictions, references=labels, average="micro")["f1"],
|
102 |
"f1_macro": metric_f1.compute(predictions=predictions, references=labels, average="macro")["f1"],
|
experiments/requirements.txt
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
torch
|
2 |
-
scikit-learn
|
3 |
-
numpy
|
4 |
-
datasets
|
5 |
-
transformers
|
6 |
-
huggingface_hub
|
7 |
-
optuna
|
8 |
-
accelerate
|
|
|
1 |
+
torch==2.1.0
|
2 |
+
scikit-learn==1.3.1
|
3 |
+
numpy==1.24.4
|
4 |
+
datasets==2.14.5
|
5 |
+
transformers==4.34.1
|
6 |
+
huggingface_hub==0.17.3
|
7 |
+
optuna==3.4.0
|
8 |
+
accelerate==0.4.1
|