tweet_temporal_shift / experiments /model_finetuning_ner.py
asahi417's picture
init
253c4c9
"""Experiment.
```
python model_finetuning_ner.py -m "roberta-base" -d "ner_temporal"
```
"""
import argparse
import json
import logging
import math
import os
import re
from os.path import join as pj
from shutil import copyfile, rmtree
from glob import glob
import numpy as np
import evaluate
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer
from huggingface_hub import Repository
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
os.environ["WANDB_DISABLED"] = "true"
EVAL_STEP = 500
RANDOM_SEED = 42
N_TRIALS = 10
URL_RE = re.compile(r"https?:\/\/[\w\.\/\?\=\d&#%_:/-]+")
HANDLE_RE = re.compile(r"@\w+")
LABEL2ID = {
"B-corporation": 0,
"B-creative_work": 1,
"B-event": 2,
"B-group": 3,
"B-location": 4,
"B-person": 5,
"B-product": 6,
"I-corporation": 7,
"I-creative_work": 8,
"I-event": 9,
"I-group": 10,
"I-location": 11,
"I-person": 12,
"I-product": 13,
"O": 14
}
ID2LABEL = {v: k for k, v in LABEL2ID.items()}
def preprocess_bernice(text):
text = HANDLE_RE.sub("@USER", text)
text = URL_RE.sub("HTTPURL", text)
return text
def preprocess_timelm(text):
text = HANDLE_RE.sub("@user", text)
text = URL_RE.sub("http", text)
return text
def preprocess(model_name, text):
if model_name == "jhu-clsp/bernice":
return preprocess_bernice(text)
if "twitter-roberta-base" in model_name:
return preprocess_timelm(text)
return text
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def main(
dataset: str = "tweettemposhift/tweet_temporal_shift",
dataset_type: str = "ner_temporal",
model: str = "roberta-base",
skip_train: bool = False,
skip_test: bool = False,
skip_upload: bool = False):
model_alias = f"ner-{dataset_type}-{os.path.basename(model)}"
output_dir = f"ckpt/{model_alias}"
best_model_path = pj(output_dir, "best_model")
tokenizer = AutoTokenizer.from_pretrained(model, add_prefix_space=True, use_fast=True)
def tokenize_and_align_labels(examples):
tokens = [[preprocess(model, w) for w in t] for t in examples["text_tokenized"]]
tokenized_inputs = tokenizer(
tokens,
truncation=True,
is_split_into_words=True,
padding="max_length",
max_length=128)
all_labels = examples["gold_label_sequence"]
new_labels = []
for token, label in zip(tokens, all_labels):
tmp_labels = [-100]
for to, la in zip(token, label):
to_tokenized = tokenizer.tokenize(to)
tmp_labels += [la] * len(to_tokenized)
if len(tmp_labels) > 128:
tmp_labels = tmp_labels[:128]
else:
tmp_labels = tmp_labels + [-100] * (128 - len(tmp_labels))
new_labels.append(tmp_labels)
tokenized_inputs["labels"] = new_labels
return tokenized_inputs
dataset = load_dataset(dataset, dataset_type)
tokenized_datasets = dataset.map(lambda x: tokenize_and_align_labels(x), batched=True)
seqeval = evaluate.load("seqeval")
def compute_metric_all(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
labels_new, predictions_new = [], []
for label, prediction in zip(labels, predictions):
prediction = [p for la, p in zip(label, prediction) if la != -100]
label = [la for la in label if la != -100]
labels_new.append([ID2LABEL[la] for la in label])
predictions_new.append([ID2LABEL[p] for p in prediction])
return seqeval.compute(predictions=predictions_new, references=labels_new)
def compute_metric_search(eval_pred):
return {"overall_f1": compute_metric_all(eval_pred)["overall_f1"]}
if not skip_train:
logging.info("training model")
trainer = Trainer(
model=AutoModelForTokenClassification.from_pretrained(model, num_labels=len(LABEL2ID)),
args=TrainingArguments(
output_dir=output_dir,
evaluation_strategy="steps",
eval_steps=EVAL_STEP,
seed=RANDOM_SEED
),
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
compute_metrics=compute_metric_search,
model_init=lambda x: AutoModelForTokenClassification.from_pretrained(
model, return_dict=True, num_labels=len(LABEL2ID),
)
)
best_run = trainer.hyperparameter_search(
hp_space=lambda trial: {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"per_device_train_batch_size": trial.suggest_categorical(
"per_device_train_batch_size", [8, 16, 32]
),
},
direction="maximize",
backend="optuna",
n_trials=N_TRIALS
)
for n, v in best_run.hyperparameters.items():
setattr(trainer.args, n, v)
trainer.train()
trainer.save_model(best_model_path)
if not skip_test:
logging.info("testing model")
test_split = ["test"]
if dataset_type.endswith("temporal"):
test_split += ["test_1", "test_2", "test_3", "test_4"]
summary_file = pj(best_model_path, "summary.json")
if os.path.exists(summary_file):
with open(summary_file) as f:
metric = json.load(f)
else:
metric = {}
for single_test in test_split:
trainer = Trainer(
model=AutoModelForTokenClassification.from_pretrained(best_model_path, num_labels=len(LABEL2ID)),
args=TrainingArguments(
output_dir=output_dir,
evaluation_strategy="no",
seed=RANDOM_SEED
),
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets[single_test],
compute_metrics=compute_metric_all
)
metric.update({f"{single_test}/{k}": v for k, v in trainer.evaluate().items()})
logging.info(json.dumps(metric, indent=4))
with open(summary_file, "w") as f:
json.dump(metric, f)
if not skip_upload:
logging.info("uploading to huggingface")
model_organization = "tweettemposhift"
model_instance = AutoModelForTokenClassification.from_pretrained(best_model_path, num_labels=len(LABEL2ID))
model_instance.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
tokenizer.push_to_hub(f"{model_organization}/{model_alias}", use_auth_token=True)
repo = Repository(model_alias, f"{model_organization}/{model_alias}")
for i in glob(f"{best_model_path}/*"):
if os.path.basename(i) == "summary.json" and os.path.exists(f"{model_alias}/{os.path.basename(i)}"):
os.remove(f"{model_alias}/{os.path.basename(i)}")
if not os.path.exists(f"{model_alias}/{os.path.basename(i)}"):
copyfile(i, f"{model_alias}/{os.path.basename(i)}")
repo.push_to_hub()
rmtree(model_alias)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Fine-tuning language model.")
parser.add_argument("-m", "--model", help="transformer LM", default="roberta-base", type=str)
parser.add_argument("-d", "--dataset-type", help="dataset type", default="ner_temporal", type=str)
parser.add_argument("--skip-train", action="store_true")
parser.add_argument("--skip-test", action="store_true")
parser.add_argument("--skip-upload", action="store_true")
opt = parser.parse_args()
main(
dataset_type=opt.dataset_type,
model=opt.model,
skip_train=opt.skip_train,
skip_test=opt.skip_test,
skip_upload=opt.skip_upload,
)