|
"""Experiment. |
|
|
|
``` |
|
python model_finetuning_sentiment.py -m "roberta-base" -d "sentiment_temporal" |
|
``` |
|
""" |
|
import argparse |
|
import json |
|
import logging |
|
import math |
|
import os |
|
import re |
|
from os.path import join as pj |
|
from shutil import copyfile, rmtree |
|
from glob import glob |
|
|
|
import numpy as np |
|
import evaluate |
|
from datasets import load_dataset |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer |
|
from huggingface_hub import Repository |
|
|
|
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S") |
|
os.environ["WANDB_DISABLED"] = "true" |
|
|
|
EVAL_STEP = 500 |
|
RANDOM_SEED = 42 |
|
N_TRIALS = 10 |
|
URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+") |
|
HANDLE_RE = re.compile(r"@\w+") |
|
|
|
|
|
def preprocess_bernice(text): |
|
text = HANDLE_RE.sub("@USER", text) |
|
text = URL_RE.sub("HTTPURL", text) |
|
return text |
|
|
|
|
|
def preprocess_timelm(text): |
|
text = HANDLE_RE.sub("@user", text) |
|
text = URL_RE.sub("http", text) |
|
return text |
|
|
|
|
|
def preprocess(model_name, text): |
|
if model_name == "jhu-clsp/bernice": |
|
return preprocess_bernice(text) |
|
if "twitter-roberta-base" in model_name: |
|
return preprocess_timelm(text) |
|
return text |
|
|
|
|
|
def sigmoid(x): |
|
return 1 / (1 + math.exp(-x)) |
|
|
|
|
|
def main( |
|
dataset: str = "tweettemposhift/tweet_temporal_shift", |
|
dataset_type: str = "sentiment_temporal", |
|
model: str = "roberta-base", |
|
skip_train: bool = False, |
|
skip_test: bool = False, |
|
skip_upload: bool = False): |
|
|
|
model_alias = f"sentiment-{dataset_type}-{os.path.basename(model)}" |
|
output_dir = f"ckpt/{model_alias}" |
|
best_model_path = pj(output_dir, "best_model") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model) |
|
dataset = load_dataset(dataset, dataset_type) |
|
tokenized_datasets = dataset.map( |
|
lambda x: tokenizer( |
|
[preprocess(model, t) for t in x["text"]], |
|
padding="max_length", |
|
truncation=True, |
|
max_length=64), |
|
batched=True |
|
) |
|
tokenized_datasets = tokenized_datasets.rename_column("gold_label_binary", "label") |
|
metric_accuracy = evaluate.load("accuracy") |
|
metric_f1 = evaluate.load("f1") |
|
|
|
def compute_metric_search(eval_pred): |
|
logits, labels = eval_pred |
|
predictions = np.argmax(logits, axis=-1) |
|
return metric_accuracy.compute(predictions=predictions, references=labels) |
|
|
|
def compute_metric_all(eval_pred): |
|
logits, labels = eval_pred |
|
predictions = np.argmax(logits, axis=-1) |
|
return { |
|
"f1": metric_f1.compute(predictions=predictions, references=labels)["f1"], |
|
"accuracy": metric_accuracy.compute(predictions=predictions, references=labels)["accuracy"] |
|
} |
|
|
|
if not skip_train: |
|
logging.info("training model") |
|
trainer = Trainer( |
|
model=AutoModelForSequenceClassification.from_pretrained(model, num_labels=2), |
|
args=TrainingArguments( |
|
output_dir=output_dir, |
|
evaluation_strategy="steps", |
|
eval_steps=EVAL_STEP, |
|
seed=RANDOM_SEED |
|
), |
|
train_dataset=tokenized_datasets["train"], |
|
eval_dataset=tokenized_datasets["validation"], |
|
compute_metrics=compute_metric_search, |
|
model_init=lambda x: AutoModelForSequenceClassification.from_pretrained( |
|
model, return_dict=True, num_labels=2, |
|
) |
|
) |
|
|
|
best_run = trainer.hyperparameter_search( |
|
hp_space=lambda trial: { |
|
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), |
|
"per_device_train_batch_size": trial.suggest_categorical( |
|
"per_device_train_batch_size", [8, 16, 32] |
|
), |
|
}, |
|
direction="maximize", |
|
backend="optuna", |
|
n_trials=N_TRIALS |
|
) |
|
for n, v in best_run.hyperparameters.items(): |
|
setattr(trainer.args, n, v) |
|
trainer.train() |
|
trainer.save_model(best_model_path) |
|
|
|
if not skip_test: |
|
logging.info("testing model") |
|
test_split = ["test"] |
|
if dataset_type.endswith("temporal"): |
|
test_split += ["test_1", "test_2", "test_3", "test_4"] |
|
summary_file = pj(best_model_path, "summary.json") |
|
if os.path.exists(summary_file): |
|
with open(summary_file) as f: |
|
metric = json.load(f) |
|
else: |
|
metric = {} |
|
for single_test in test_split: |
|
trainer = Trainer( |
|
model=AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=2), |
|
args=TrainingArguments( |
|
output_dir=output_dir, |
|
evaluation_strategy="no", |
|
seed=RANDOM_SEED |
|
), |
|
train_dataset=tokenized_datasets["train"], |
|
eval_dataset=tokenized_datasets[single_test], |
|
compute_metrics=compute_metric_all |
|
) |
|
metric.update({f"{single_test}/{k}": v for k, v in trainer.evaluate().items()}) |
|
logging.info(json.dumps(metric, indent=4)) |
|
with open(summary_file, "w") as f: |
|
json.dump(metric, f) |
|
|
|
if not skip_upload: |
|
logging.info("uploading to huggingface") |
|
model_organization = "tweettemposhift" |
|
model_instance = AutoModelForSequenceClassification.from_pretrained(best_model_path, num_labels=2) |
|
model_instance.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True) |
|
tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True) |
|
repo = Repository(model_alias, f"{model_organization}/{model_alias}") |
|
for i in glob(f"{best_model_path}/*"): |
|
if not os.path.exists(f"{model_alias}/{os.path.basename(i)}"): |
|
copyfile(i, f"{model_alias}/{os.path.basename(i)}") |
|
repo.push_to_hub() |
|
rmtree(model_alias) |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Fine-tuning language model.") |
|
parser.add_argument("-m", "--model", help="transformer LM", default="roberta-base", type=str) |
|
parser.add_argument("-d", "--dataset-type", help='dataset type', default="sentiment_temporal", type=str) |
|
parser.add_argument("--skip-train", action="store_true") |
|
parser.add_argument("--skip-test", action="store_true") |
|
parser.add_argument("--skip-upload", action="store_true") |
|
opt = parser.parse_args() |
|
main( |
|
dataset_type=opt.dataset_type, |
|
model=opt.model, |
|
skip_train=opt.skip_train, |
|
skip_test=opt.skip_test, |
|
skip_upload=opt.skip_upload, |
|
) |
|
|
|
|