asahi417 commited on
Commit
e54e3de
·
1 Parent(s): 0dae77e
experiments/model_finetuning_nerd.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_temporal"
3
+
4
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed0"
5
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed0"
6
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed0"
7
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed0"
8
+
9
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed1"
10
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed1"
11
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed1"
12
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed1"
13
+
14
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed2"
15
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed2"
16
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed2"
17
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed2"
18
+ """
19
+ import argparse
20
+ import json
21
+ import logging
22
+ import math
23
+ import os
24
+ from os.path import join as pj
25
+ from shutil import copyfile
26
+ from glob import glob
27
+
28
+ import numpy as np
29
+ from datasets import load_dataset, load_metric
30
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
31
+ from huggingface_hub import Repository
32
+
33
+ logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
34
+
35
+ LABEL2ID = {
36
+ "arts_&_culture": 0,
37
+ "business_&_entrepreneurs": 1,
38
+ "celebrity_&_pop_culture": 2,
39
+ "diaries_&_daily_life": 3,
40
+ "family": 4,
41
+ "fashion_&_style": 5,
42
+ "film_tv_&_video": 6,
43
+ "fitness_&_health": 7,
44
+ "food_&_dining": 8,
45
+ "gaming": 9,
46
+ "learning_&_educational": 10,
47
+ "music": 11,
48
+ "news_&_social_concern": 12,
49
+ "other_hobbies": 13,
50
+ "relationships": 14,
51
+ "science_&_technology": 15,
52
+ "sports": 16,
53
+ "travel_&_adventure": 17,
54
+ "youth_&_student_life": 18
55
+ }
56
+ ID2LABEL = {v: k for k, v in LABEL2ID.items()}
57
+ EVAL_STEP = 500
58
+ RANDOM_SEED = 42
59
+ N_TRIALS = 10
60
+
61
+
62
+ def sigmoid(x):
63
+ return 1 / (1 + math.exp(-x))
64
+
65
+
66
+ def main(
67
+ dataset: str = "tweettemposhift/tweet_temporal_shift",
68
+ dataset_type: str = "topic_temporal",
69
+ model: str = "roberta-base",
70
+ skip_train: bool = False,
71
+ skip_test: bool = False,
72
+ skip_upload: bool = False):
73
+
74
+ tokenizer = AutoTokenizer.from_pretrained(model)
75
+ model = AutoModelForSequenceClassification.from_pretrained(
76
+ model, id2label=ID2LABEL, label2id=LABEL2ID, num_labels=len(LABEL2ID), problem_type="multi_label_classification"
77
+ )
78
+ dataset = load_dataset(dataset, dataset_type)
79
+ tokenized_datasets = dataset.map(
80
+ lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=256), batched=True
81
+ )
82
+ metric_accuracy = load_metric("accuracy", "multilabel")
83
+ metric_f1 = load_metric("f1", "multilabel")
84
+
85
+ def compute_metric_search(eval_pred):
86
+ logits, labels = eval_pred
87
+ predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
88
+ return metric_f1.compute(predictions=predictions, references=labels, average="micro")
89
+
90
+ def compute_metric_all(eval_pred):
91
+ logits, labels = eval_pred
92
+ predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
93
+ return {
94
+ "f1": metric_f1.compute(predictions=predictions, references=labels, average="micro")["f1"],
95
+ "f1_macro": metric_f1.compute(predictions=predictions, references=labels, average="macro")["f1"],
96
+ "accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"]
97
+ }
98
+
99
+ model_alias = f"topic-{dataset_type}-{os.path.basename(model)}"
100
+ output_dir = f"ckpt/{model_alias}"
101
+ best_model_path = pj(output_dir, "best_model")
102
+
103
+ if not skip_train:
104
+ logging.info("training model")
105
+ trainer = Trainer(
106
+ model=model,
107
+ args=TrainingArguments(
108
+ output_dir=output_dir,
109
+ evaluation_strategy="steps",
110
+ eval_steps=EVAL_STEP,
111
+ seed=RANDOM_SEED
112
+ ),
113
+ train_dataset=tokenized_datasets["train"],
114
+ eval_dataset=tokenized_datasets["validation"],
115
+ compute_metrics=compute_metric_search,
116
+ model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
117
+ model,
118
+ return_dict=True,
119
+ num_labels=len(LABEL2ID),
120
+ id2label=ID2LABEL,
121
+ label2id=LABEL2ID
122
+ )
123
+ )
124
+
125
+ best_run = trainer.hyperparameter_search(
126
+ hp_space=lambda trial: {
127
+ "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
128
+ "per_device_train_batch_size": trial.suggest_categorical(
129
+ "per_device_train_batch_size", [8, 16, 32]
130
+ ),
131
+ },
132
+ local_dir="./hyperparameter_search_cache",
133
+ direction="maximize",
134
+ backend="optuna",
135
+ N_TRIALS=N_TRIALS
136
+ )
137
+ for n, v in best_run.hyperparameters.items():
138
+ setattr(trainer.args, n, v)
139
+ trainer.train()
140
+ trainer.save_model(best_model_path)
141
+
142
+ if not skip_test:
143
+ logging.info("testing model")
144
+ test_split = ["test"]
145
+ if dataset_type.endswith("temporal"):
146
+ test_split += ["test_1", "test_2", "test_3", "test_4"]
147
+ summary_file = pj(output_dir, "summary.json")
148
+ if os.path.exists(summary_file):
149
+ with open(summary_file) as f:
150
+ metric = json.load(f)
151
+ else:
152
+ metric = {}
153
+ for single_test in test_split:
154
+ model = AutoModelForSequenceClassification.from_pretrained(
155
+ best_model_path,
156
+ num_labels=len(LABEL2ID),
157
+ problem_type="multi_label_classification",
158
+ id2label=ID2LABEL,
159
+ label2id=LABEL2ID
160
+ )
161
+ trainer = Trainer(
162
+ model=model,
163
+ args=TrainingArguments(
164
+ output_dir=output_dir,
165
+ evaluation_strategy="no",
166
+ seed=RANDOM_SEED
167
+ ),
168
+ train_dataset=tokenized_datasets["train"],
169
+ eval_dataset=tokenized_datasets[single_test],
170
+ compute_metrics=compute_metric_all
171
+ )
172
+ metric.update({f"{single_test}/{k}": v for k, v in trainer.evaluate().items()})
173
+ logging.info(json.dumps(metric, indent=4))
174
+ with open(summary_file, "w") as f:
175
+ json.dump(metric, f)
176
+
177
+ if not skip_upload:
178
+ logging.info("uploading to huggingface")
179
+ model_organization = "tweettemposhift"
180
+ model = AutoModelForSequenceClassification.from_pretrained(
181
+ best_model_path,
182
+ num_labels=len(LABEL2ID),
183
+ problem_type="multi_label_classification",
184
+ id2label=ID2LABEL,
185
+ label2id=LABEL2ID
186
+ )
187
+ tokenizer = AutoTokenizer.from_pretrained(best_model_path)
188
+ model.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
189
+ tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
190
+ repo = Repository(model_alias, f"{model_organization}/{model_alias}")
191
+ for i in glob(f"{best_model_path}/*"):
192
+ if not os.path.exists(f"{model_alias}/{os.path.basename(i)}"):
193
+ copyfile(i, f"{model_alias}/{os.path.basename(i)}")
194
+ repo.push_to_hub()
195
+
196
+
197
+ if __name__ == "__main__":
198
+ parser = argparse.ArgumentParser(description="Fine-tuning language model.")
199
+ parser.add_argument("-m", "--model", help="transformer LM", default="roberta-base", type=str)
200
+ parser.add_argument("-d", "--dataset-type", help='dataset type', default="topic_temporal", type=str)
201
+ parser.add_argument("--skip-train", action="store_true")
202
+ parser.add_argument("--skip-test", action="store_true")
203
+ parser.add_argument("--skip-upload", action="store_true")
204
+ opt = parser.parse_args()
205
+ main(
206
+ dataset_type=opt.dataset_type,
207
+ model=opt.model,
208
+ skip_train=opt.skip_train,
209
+ skip_test=opt.skip_test,
210
+ skip_upload=opt.skip_upload,
211
+ )
212
+
experiments/model_finetuning_topic.py CHANGED
@@ -1,5 +1,20 @@
1
  """
2
- python model_finetuning_topic.py -m "roberta-base" -d "temporal_topic"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  """
4
  import argparse
5
  import json
@@ -56,6 +71,10 @@ def main(
56
  skip_test: bool = False,
57
  skip_upload: bool = False):
58
 
 
 
 
 
59
  tokenizer = AutoTokenizer.from_pretrained(model)
60
  model = AutoModelForSequenceClassification.from_pretrained(
61
  model, id2label=ID2LABEL, label2id=LABEL2ID, num_labels=len(LABEL2ID), problem_type="multi_label_classification"
@@ -81,10 +100,6 @@ def main(
81
  "accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"]
82
  }
83
 
84
- model_alias = f"topic-{dataset_type}-{os.path.basename(model)}"
85
- output_dir = f"ckpt/{model_alias}"
86
- best_model_path = pj(output_dir, "best_model")
87
-
88
  if not skip_train:
89
  logging.info("training model")
90
  trainer = Trainer(
 
1
  """
2
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_temporal"
3
+
4
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed0"
5
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed0"
6
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed0"
7
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed0"
8
+
9
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed1"
10
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed1"
11
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed1"
12
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed1"
13
+
14
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random0_seed2"
15
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random1_seed2"
16
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random2_seed2"
17
+ python model_finetuning_topic.py -m "roberta-base" -d "topic_random3_seed2"
18
  """
19
  import argparse
20
  import json
 
71
  skip_test: bool = False,
72
  skip_upload: bool = False):
73
 
74
+ model_alias = f"topic-{dataset_type}-{os.path.basename(model)}"
75
+ output_dir = f"ckpt/{model_alias}"
76
+ best_model_path = pj(output_dir, "best_model")
77
+
78
  tokenizer = AutoTokenizer.from_pretrained(model)
79
  model = AutoModelForSequenceClassification.from_pretrained(
80
  model, id2label=ID2LABEL, label2id=LABEL2ID, num_labels=len(LABEL2ID), problem_type="multi_label_classification"
 
100
  "accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"]
101
  }
102
 
 
 
 
 
103
  if not skip_train:
104
  logging.info("training model")
105
  trainer = Trainer(
experiments/requirements.txt CHANGED
@@ -1,3 +1,5 @@
 
 
1
  numpy
2
  datasets
3
  transformers
 
1
+ torch
2
+ scikit-learn
3
  numpy
4
  datasets
5
  transformers