asahi417 commited on
Commit
40423d5
·
1 Parent(s): b615d10
experiments/model_finetuning_emoji.py ADDED
File without changes
experiments/model_finetuning_hate.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Experiment.
2
+
3
+ ```
4
+ python model_finetuning_hate.py -m "roberta-base" -d "hate_temporal"
5
+ ```
6
+ """
7
+ import argparse
8
+ import json
9
+ import logging
10
+ import math
11
+ import os
12
+ import re
13
+ from os.path import join as pj
14
+ from shutil import copyfile, rmtree
15
+ from glob import glob
16
+
17
+ import numpy as np
18
+ import evaluate
19
+ from datasets import load_dataset
20
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
21
+ from huggingface_hub import Repository
22
+
23
+ logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
24
+ os.environ["WANDB_DISABLED"] = "true"
25
+
26
+ EVAL_STEP = 500
27
+ RANDOM_SEED = 42
28
+ N_TRIALS = 10
29
+ URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
30
+ HANDLE_RE = re.compile(r"@\w+")
31
+
32
+
33
+ def preprocess_bernice(text):
34
+ text = HANDLE_RE.sub("@USER", text)
35
+ text = URL_RE.sub("HTTPURL", text)
36
+ return text
37
+
38
+
39
+ def preprocess_timelm(text):
40
+ text = HANDLE_RE.sub("@user", text)
41
+ text = URL_RE.sub("http", text)
42
+ return text
43
+
44
+
45
+ def preprocess(model_name, text):
46
+ if model_name == "jhu-clsp/bernice":
47
+ return preprocess_bernice(text)
48
+ if "twitter-roberta-base" in model_name:
49
+ return preprocess_timelm(text)
50
+ return text
51
+
52
+
53
+ def sigmoid(x):
54
+ return 1 / (1 + math.exp(-x))
55
+
56
+
57
+ def main(
58
+ dataset: str = "tweettemposhift/tweet_temporal_shift",
59
+ dataset_type: str = "hate_temporal",
60
+ model: str = "roberta-base",
61
+ skip_train: bool = False,
62
+ skip_test: bool = False,
63
+ skip_upload: bool = False):
64
+
65
+ model_alias = f"hate-{dataset_type}-{os.path.basename(model)}"
66
+ output_dir = f"ckpt/{model_alias}"
67
+ best_model_path = pj(output_dir, "best_model")
68
+
69
+ tokenizer = AutoTokenizer.from_pretrained(model)
70
+ dataset = load_dataset(dataset, dataset_type)
71
+ tokenized_datasets = dataset.map(
72
+ lambda x: tokenizer(
73
+ [preprocess(model, t) for t in x["text"]],
74
+ padding="max_length",
75
+ truncation=True,
76
+ max_length=64),
77
+ batched=True
78
+ )
79
+ tokenized_datasets = tokenized_datasets.rename_column("gold_label_binary", "label")
80
+ metric_accuracy = evaluate.load("accuracy")
81
+ metric_f1 = evaluate.load("f1")
82
+
83
+ def compute_metric_search(eval_pred):
84
+ logits, labels = eval_pred
85
+ predictions = np.argmax(logits, axis=-1)
86
+ return metric_accuracy.compute(predictions=predictions, references=labels)
87
+
88
+ def compute_metric_all(eval_pred):
89
+ logits, labels = eval_pred
90
+ predictions = np.argmax(logits, axis=-1)
91
+ return {
92
+ "f1": metric_f1.compute(predictions=predictions, references=labels)["f1"],
93
+ "accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"]
94
+ }
95
+
96
+ if not skip_train:
97
+ logging.info("training model")
98
+ trainer = Trainer(
99
+ model=AutoModelForSequenceClassification.from_pretrained(model, num_labels=2),
100
+ args=TrainingArguments(
101
+ output_dir=output_dir,
102
+ evaluation_strategy="steps",
103
+ eval_steps=EVAL_STEP,
104
+ seed=RANDOM_SEED
105
+ ),
106
+ train_dataset=tokenized_datasets["train"],
107
+ eval_dataset=tokenized_datasets["validation"],
108
+ compute_metrics=compute_metric_search,
109
+ model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
110
+ model, return_dict=True, num_labels=2,
111
+ )
112
+ )
113
+
114
+ best_run = trainer.hyperparameter_search(
115
+ hp_space=lambda trial: {
116
+ "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
117
+ "per_device_train_batch_size": trial.suggest_categorical(
118
+ "per_device_train_batch_size", [8, 16, 32]
119
+ ),
120
+ },
121
+ direction="maximize",
122
+ backend="optuna",
123
+ n_trials=N_TRIALS
124
+ )
125
+ for n, v in best_run.hyperparameters.items():
126
+ setattr(trainer.args, n, v)
127
+ trainer.train()
128
+ trainer.save_model(best_model_path)
129
+
130
+ if not skip_test:
131
+ logging.info("testing model")
132
+ test_split = ["test"]
133
+ if dataset_type.endswith("temporal"):
134
+ test_split += ["test_1", "test_2", "test_3", "test_4"]
135
+ summary_file = pj(best_model_path, "summary.json")
136
+ if os.path.exists(summary_file):
137
+ with open(summary_file) as f:
138
+ metric = json.load(f)
139
+ else:
140
+ metric = {}
141
+ for single_test in test_split:
142
+ trainer = Trainer(
143
+ model=AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=2),
144
+ args=TrainingArguments(
145
+ output_dir=output_dir,
146
+ evaluation_strategy="no",
147
+ seed=RANDOM_SEED
148
+ ),
149
+ train_dataset=tokenized_datasets["train"],
150
+ eval_dataset=tokenized_datasets[single_test],
151
+ compute_metrics=compute_metric_all
152
+ )
153
+ metric.update({f"{single_test}/{k}": v for k, v in trainer.evaluate().items()})
154
+ logging.info(json.dumps(metric, indent=4))
155
+ with open(summary_file, "w") as f:
156
+ json.dump(metric, f)
157
+
158
+ if not skip_upload:
159
+ logging.info("uploading to huggingface")
160
+ model_organization = "tweettemposhift"
161
+ model_instance = AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=2)
162
+ model_instance.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
163
+ tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
164
+ repo = Repository(model_alias, f"{model_organization}/{model_alias}")
165
+ for i in glob(f"{best_model_path}/*"):
166
+ if not os.path.exists(f"{model_alias}/{os.path.basename(i)}"):
167
+ copyfile(i, f"{model_alias}/{os.path.basename(i)}")
168
+ repo.push_to_hub()
169
+ rmtree(model_alias)
170
+
171
+
172
+ if __name__ == "__main__":
173
+ parser = argparse.ArgumentParser(description="Fine-tuning language model.")
174
+ parser.add_argument("-m", "--model", help="transformer LM", default="roberta-base", type=str)
175
+ parser.add_argument("-d", "--dataset-type", help='dataset type', default="hate_temporal", type=str)
176
+ parser.add_argument("--skip-train", action="store_true")
177
+ parser.add_argument("--skip-test", action="store_true")
178
+ parser.add_argument("--skip-upload", action="store_true")
179
+ opt = parser.parse_args()
180
+ main(
181
+ dataset_type=opt.dataset_type,
182
+ model=opt.model,
183
+ skip_train=opt.skip_train,
184
+ skip_test=opt.skip_test,
185
+ skip_upload=opt.skip_upload,
186
+ )
187
+
statistics.py CHANGED
@@ -5,7 +5,7 @@ from transformers import AutoTokenizer
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("roberta-base")
7
  stats = []
8
- for i in ["hate_temporal", "nerd_temporal", "ner_temporal", "topic_temporal", "sentiment_small_temporal"]:
9
  for s in ["train", "validation", "test"]:
10
  dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s)
11
  df = dataset.to_pandas()
@@ -26,6 +26,7 @@ for i in ["hate_temporal", "nerd_temporal", "ner_temporal", "topic_temporal", "s
26
  df = pd.DataFrame(stats)
27
  print(df)
28
  pretty_name = {
 
29
  "hate_temporal": "Hate",
30
  "nerd_temporal": "NERD",
31
  "ner_temporal": "NER",
 
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("roberta-base")
7
  stats = []
8
+ for i in ["emoji_temporal", "hate_temporal", "nerd_temporal", "ner_temporal", "topic_temporal", "sentiment_small_temporal"]:
9
  for s in ["train", "validation", "test"]:
10
  dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s)
11
  df = dataset.to_pandas()
 
26
  df = pd.DataFrame(stats)
27
  print(df)
28
  pretty_name = {
29
+ "emoji_temporal": "Emoji",
30
  "hate_temporal": "Hate",
31
  "nerd_temporal": "NERD",
32
  "ner_temporal": "NER",