asahi417 commited on
Commit
ff2b314
·
1 Parent(s): 80c77f9
experiments/model_finetuning_topic.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import logging
4
+ import math
5
+ import os
6
+ from os.path import join as pj
7
+ from shutil import copyfile
8
+ from glob import glob
9
+
10
+ import numpy as np
11
+ from datasets import load_dataset, load_metric
12
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
13
+ from huggingface_hub import Repository
14
+
15
+ logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
16
+
17
+ LABEL2ID = {
18
+ "arts_&_culture": 0,
19
+ "business_&_entrepreneurs": 1,
20
+ "celebrity_&_pop_culture": 2,
21
+ "diaries_&_daily_life": 3,
22
+ "family": 4,
23
+ "fashion_&_style": 5,
24
+ "film_tv_&_video": 6,
25
+ "fitness_&_health": 7,
26
+ "food_&_dining": 8,
27
+ "gaming": 9,
28
+ "learning_&_educational": 10,
29
+ "music": 11,
30
+ "news_&_social_concern": 12,
31
+ "other_hobbies": 13,
32
+ "relationships": 14,
33
+ "science_&_technology": 15,
34
+ "sports": 16,
35
+ "travel_&_adventure": 17,
36
+ "youth_&_student_life": 18
37
+ }
38
+ ID2LABEL = {v: k for k, v in LABEL2ID.items()}
39
+ EVAL_STEP = 500
40
+ RANDOM_SEED = 42
41
+ N_TRIALS = 10
42
+
43
+
44
+ def sigmoid(x):
45
+ return 1 / (1 + math.exp(-x))
46
+
47
+
48
+ def main(
49
+ dataset: str = "tweettemposhift/tweet_temporal_shift",
50
+ dataset_type: str = "topic_temporal",
51
+ model: str = "roberta-base",
52
+ skip_train: bool = False,
53
+ skip_test: bool = False,
54
+ skip_upload: bool = False):
55
+
56
+ tokenizer = AutoTokenizer.from_pretrained(model)
57
+ model = AutoModelForSequenceClassification.from_pretrained(
58
+ model, id2label=ID2LABEL, label2id=LABEL2ID, num_labels=len(LABEL2ID), problem_type="multi_label_classification"
59
+ )
60
+ dataset = load_dataset(dataset, dataset_type)
61
+ tokenized_datasets = dataset.map(
62
+ lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=256), batched=True
63
+ )
64
+ metric_accuracy = load_metric("accuracy", "multilabel")
65
+ metric_f1 = load_metric("f1", "multilabel")
66
+
67
+ def compute_metric_search(eval_pred):
68
+ logits, labels = eval_pred
69
+ predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
70
+ return metric_f1.compute(predictions=predictions, references=labels, average="micro")
71
+
72
+ def compute_metric_all(eval_pred):
73
+ logits, labels = eval_pred
74
+ predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
75
+ return {
76
+ "f1": metric_f1.compute(predictions=predictions, references=labels, average="micro")["f1"],
77
+ "f1_macro": metric_f1.compute(predictions=predictions, references=labels, average="macro")["f1"],
78
+ "accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"]
79
+ }
80
+
81
+ model_alias = f"topic_{dataset_type}_{os.path.basename(model)}"
82
+ output_dir = f"ckpt/{model_alias}"
83
+ best_model_path = pj(output_dir, "best_model")
84
+
85
+ if not skip_train:
86
+ logging.info("training model")
87
+ trainer = Trainer(
88
+ model=model,
89
+ args=TrainingArguments(
90
+ output_dir=output_dir,
91
+ evaluation_strategy="steps",
92
+ eval_steps=EVAL_STEP,
93
+ seed=RANDOM_SEED
94
+ ),
95
+ train_dataset=tokenized_datasets["train"],
96
+ eval_dataset=tokenized_datasets["validation"],
97
+ compute_metrics=compute_metric_search,
98
+ model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
99
+ model,
100
+ return_dict=True,
101
+ num_labels=len(LABEL2ID),
102
+ id2label=ID2LABEL,
103
+ label2id=LABEL2ID
104
+ )
105
+ )
106
+
107
+ best_run = trainer.hyperparameter_search(
108
+ hp_space=lambda trial: {
109
+ "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
110
+ "per_device_train_batch_size": trial.suggest_categorical(
111
+ "per_device_train_batch_size", [8, 16, 32]
112
+ ),
113
+ },
114
+ local_dir="./hyperparameter_search_cache",
115
+ direction="maximize",
116
+ backend="optuna",
117
+ N_TRIALS=N_TRIALS
118
+ )
119
+ for n, v in best_run.hyperparameters.items():
120
+ setattr(trainer.args, n, v)
121
+ trainer.train()
122
+ trainer.save_model(best_model_path)
123
+
124
+ if not skip_test:
125
+ logging.info("testing model")
126
+ test_split = ["test"]
127
+ if dataset_type.endswith("temporal"):
128
+ test_split += ["test_1", "test_2", "test_3", "test_4"]
129
+ summary_file = pj(output_dir, "summary.json")
130
+ if os.path.exists(summary_file):
131
+ with open(summary_file) as f:
132
+ metric = json.load(f)
133
+ else:
134
+ metric = {}
135
+ for single_test in test_split:
136
+ model = AutoModelForSequenceClassification.from_pretrained(
137
+ best_model_path,
138
+ num_labels=len(LABEL2ID),
139
+ problem_type="multi_label_classification",
140
+ id2label=ID2LABEL,
141
+ label2id=LABEL2ID
142
+ )
143
+ trainer = Trainer(
144
+ model=model,
145
+ args=TrainingArguments(
146
+ output_dir=output_dir,
147
+ evaluation_strategy="no",
148
+ seed=RANDOM_SEED
149
+ ),
150
+ train_dataset=tokenized_datasets["train"],
151
+ eval_dataset=tokenized_datasets[single_test],
152
+ compute_metrics=compute_metric_all
153
+ )
154
+ metric.update({f"{single_test}/{k}": v for k, v in trainer.evaluate().items()})
155
+ logging.info(json.dumps(metric, indent=4))
156
+ with open(summary_file, "w") as f:
157
+ json.dump(metric, f)
158
+
159
+ if not skip_upload:
160
+ logging.info("uploading to huggingface")
161
+ model_organization = "tweettemposhift"
162
+ model = AutoModelForSequenceClassification.from_pretrained(
163
+ best_model_path,
164
+ num_labels=len(LABEL2ID),
165
+ problem_type="multi_label_classification",
166
+ id2label=ID2LABEL,
167
+ label2id=LABEL2ID
168
+ )
169
+ tokenizer = AutoTokenizer.from_pretrained(best_model_path)
170
+ model.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
171
+ tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
172
+ repo = Repository(model_alias, f"{model_organization}/{model_alias}")
173
+ for i in glob(f"{best_model_path}/*"):
174
+ if not os.path.exists(f"{model_alias}/{os.path.basename(i)}"):
175
+ copyfile(i, f"{model_alias}/{os.path.basename(i)}")
176
+ repo.push_to_hub()
177
+
178
+
179
+ if __name__ == "__main__":
180
+ parser = argparse.ArgumentParser(description="Fine-tuning language model.")
181
+ parser.add_argument("-m", "--model", help="transformer LM", default="roberta-base", type=str)
182
+ parser.add_argument("-d", "--dataset-type", help='', default="topic_temporal", type=str)
183
+ parser.add_argument("--skip-train", action="store_true")
184
+ parser.add_argument("--skip-test", action="store_true")
185
+ parser.add_argument("--skip-upload", action="store_true")
186
+ opt = parser.parse_args()
187
+ main(
188
+ dataset_type=opt.dataset_type,
189
+ model=opt.model,
190
+ skip_train=opt.skip_train,
191
+ skip_test=opt.skip_test,
192
+ skip_upload=opt.skip_upload,
193
+ )
194
+
experiments/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ numpy
2
+ datasets
3
+ transformers
4
+ huggingface_hub