|
""" |
|
Copyright (c) 2022, salesforce.com, inc. |
|
All rights reserved. |
|
SPDX-License-Identifier: BSD-3-Clause |
|
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause |
|
""" |
|
|
|
import logging |
|
import os |
|
import wandb |
|
|
|
import torch |
|
import torch.distributed as dist |
|
from common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized |
|
from common.logger import MetricLogger, SmoothedValue |
|
from common.registry import registry |
|
from datasets.data_utils import prepare_sample |
|
from typing import Optional, Dict, List, Union, Tuple |
|
import torch.nn.functional as F |
|
import ipdb |
|
from sklearn.metrics import cohen_kappa_score, accuracy_score, confusion_matrix |
|
import numpy as np |
|
|
|
class BaseTask: |
|
def __init__(self, **kwargs): |
|
super().__init__() |
|
|
|
self.inst_id_key = "instance_id" |
|
self.wandb_initialized = False |
|
|
|
def init_wandb(self, cfg): |
|
if is_main_process(): |
|
self.wandb_initialized = True |
|
|
|
@classmethod |
|
def setup_task(cls, **kwargs): |
|
return cls() |
|
|
|
def build_model(self, cfg): |
|
model_config = cfg.model_cfg |
|
model_cls = registry.get_model_class(model_config.arch) |
|
return model_cls.from_config(model_config) |
|
|
|
def build_datasets(self, cfg): |
|
""" |
|
Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. |
|
Download dataset and annotations automatically if not exist. |
|
|
|
Args: |
|
cfg (common.config.Config): _description_ |
|
|
|
Returns: |
|
dict: Dictionary of torch.utils.data.Dataset objects by split. |
|
""" |
|
|
|
datasets = dict() |
|
|
|
datasets_config = cfg.datasets_cfg |
|
|
|
assert len(datasets_config) > 0, "At least one dataset has to be specified." |
|
|
|
for name in datasets_config: |
|
dataset_config = datasets_config[name] |
|
builder = registry.get_builder_class(name)(dataset_config) |
|
dataset = builder.build_datasets() |
|
|
|
dataset['train'].name = name |
|
if 'sample_ratio' in dataset_config: |
|
dataset['train'].sample_ratio = dataset_config.sample_ratio |
|
print(f"Loaded dataset: {name} with {len(dataset['train'])} samples.") |
|
datasets[name] = dataset |
|
|
|
return datasets |
|
|
|
def train_step(self, model, samples): |
|
outputs = model(samples) |
|
loss, modal, task = outputs['loss'], outputs['modal'], outputs['task_type'] |
|
return loss, modal, task |
|
|
|
def valid_step(self, model, samples): |
|
""" |
|
Validation step function to compute predictions and prepare for QWK calculation. |
|
""" |
|
model.eval() |
|
|
|
with torch.no_grad(): |
|
outputs = model(samples) |
|
loss = outputs['loss'] |
|
logits = outputs.get('logits', None) |
|
if logits is None: |
|
|
|
probs = outputs.get('probs', None) |
|
if probs is not None: |
|
preds = torch.argmax(probs, dim=1) |
|
else: |
|
preds = torch.argmax(logits, dim=1) |
|
|
|
labels = samples['labels'] |
|
score_labels = samples.get('score_labels', None) |
|
|
|
|
|
|
|
return [{ |
|
'loss': loss.item(), |
|
'pred': pred.item(), |
|
'logits': logits, |
|
'label': label.item(), |
|
'score_label': score_label.item() if score_label is not None else None, |
|
'modal': outputs['modal'], |
|
'task_type': outputs['task_type'] |
|
} for pred, label, score_label in zip(preds, labels, score_labels)] |
|
|
|
|
|
|
|
|
|
def before_evaluation(self, model, dataset, **kwargs): |
|
model.before_evaluation(dataset=dataset, task_type=type(self)) |
|
|
|
def after_evaluation(self, val_result, **kwargs): |
|
loss = val_result['loss'] |
|
qwk = val_result['qwk'] |
|
val_log = { |
|
'agg_metrics': qwk |
|
} |
|
return val_log |
|
|
|
def inference_step(self): |
|
raise NotImplementedError |
|
|
|
def evaluation(self, model, data_loader, cuda_enabled=True): |
|
metric_logger = MetricLogger(delimiter=" ") |
|
header = "Evaluation" |
|
|
|
print_freq = 10 |
|
|
|
results = [] |
|
|
|
for samples in metric_logger.log_every(data_loader, print_freq, header): |
|
samples = prepare_sample(samples, cuda_enabled=cuda_enabled) |
|
|
|
eval_output = self.valid_step(model=model, samples=samples) |
|
results.extend(eval_output) |
|
|
|
if is_dist_avail_and_initialized(): |
|
dist.barrier() |
|
|
|
return results |
|
|
|
|
|
|
|
def train_epoch( |
|
self, |
|
epoch, |
|
model, |
|
data_loader, |
|
optimizer, |
|
lr_scheduler, |
|
scaler=None, |
|
cuda_enabled=False, |
|
log_freq=50, |
|
accum_grad_iters=1, |
|
): |
|
return_dict, metric_logger = self._train_inner_loop( |
|
epoch=epoch, |
|
iters_per_epoch=lr_scheduler.iters_per_epoch, |
|
model=model, |
|
data_loader=data_loader, |
|
optimizer=optimizer, |
|
scaler=scaler, |
|
lr_scheduler=lr_scheduler, |
|
log_freq=log_freq, |
|
cuda_enabled=cuda_enabled, |
|
accum_grad_iters=accum_grad_iters, |
|
) |
|
|
|
|
|
if is_main_process() and self.wandb_initialized: |
|
wandb.log({ |
|
"epoch": epoch, |
|
"train/loss": float(return_dict["loss"]), |
|
"train/lr": float(return_dict["lr"]), |
|
"train/modal": return_dict["modal"], |
|
"train/task": return_dict["task"], |
|
}) |
|
|
|
return return_dict, metric_logger |
|
|
|
def train_iters( |
|
self, |
|
epoch, |
|
start_iters, |
|
iters_per_inner_epoch, |
|
model, |
|
data_loader, |
|
optimizer, |
|
lr_scheduler, |
|
scaler=None, |
|
cuda_enabled=False, |
|
log_freq=50, |
|
accum_grad_iters=1, |
|
): |
|
return self._train_inner_loop( |
|
epoch=epoch, |
|
start_iters=start_iters, |
|
iters_per_epoch=iters_per_inner_epoch, |
|
model=model, |
|
data_loader=data_loader, |
|
optimizer=optimizer, |
|
scaler=scaler, |
|
lr_scheduler=lr_scheduler, |
|
log_freq=log_freq, |
|
cuda_enabled=cuda_enabled, |
|
accum_grad_iters=accum_grad_iters, |
|
) |
|
|
|
def _train_inner_loop( |
|
self, |
|
epoch, |
|
iters_per_epoch, |
|
model, |
|
data_loader, |
|
optimizer, |
|
lr_scheduler, |
|
scaler=None, |
|
start_iters=None, |
|
log_freq=50, |
|
cuda_enabled=False, |
|
accum_grad_iters=1, |
|
): |
|
""" |
|
An inner training loop compatible with both epoch-based and iter-based training. |
|
|
|
When using epoch-based, training stops after one epoch; when using iter-based, |
|
training stops after #iters_per_epoch iterations. |
|
""" |
|
use_amp = scaler is not None |
|
|
|
if not hasattr(data_loader, "__next__"): |
|
|
|
data_loader = iter(data_loader) |
|
|
|
metric_logger = MetricLogger(delimiter=" ") |
|
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{global_avg:.8e}")) |
|
metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{global_avg:.3f}")) |
|
|
|
|
|
logging.info( |
|
"Start training epoch {}, {} iters per inner epoch.".format( |
|
epoch, iters_per_epoch |
|
) |
|
) |
|
header = "Epoch: [{}]".format(epoch) |
|
if start_iters is None: |
|
|
|
inner_epoch = epoch |
|
else: |
|
|
|
inner_epoch = start_iters // iters_per_epoch |
|
header = header + "; inner epoch [{}]".format(inner_epoch) |
|
|
|
for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header): |
|
|
|
if i >= iters_per_epoch: |
|
break |
|
|
|
samples = next(data_loader) |
|
|
|
samples = prepare_sample(samples, cuda_enabled=cuda_enabled) |
|
samples.update( |
|
{ |
|
"epoch": inner_epoch, |
|
"num_iters_per_epoch": iters_per_epoch, |
|
"iters": i, |
|
} |
|
) |
|
|
|
|
|
lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i) |
|
torch.autograd.set_detect_anomaly(True) |
|
|
|
with torch.cuda.amp.autocast(enabled=use_amp): |
|
loss, modal, task = self.train_step(model=model, samples=samples) |
|
|
|
with torch.autograd.detect_anomaly(): |
|
if use_amp: |
|
scaler.scale(loss).backward() |
|
else: |
|
loss.backward() |
|
|
|
|
|
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) |
|
|
|
|
|
if (i + 1) % accum_grad_iters == 0: |
|
if use_amp: |
|
scaler.unscale_(optimizer) |
|
scaler.step(optimizer) |
|
scaler.update() |
|
else: |
|
optimizer.step() |
|
optimizer.zero_grad() |
|
|
|
|
|
metric_logger.update(loss=loss.item()) |
|
metric_logger.update(lr=optimizer.param_groups[0]["lr"]) |
|
metric_logger.update(modal=modal) |
|
metric_logger.update(task=task) |
|
|
|
if is_main_process() and self.wandb_initialized: |
|
wandb.log({ |
|
"train/step": i + epoch * iters_per_epoch, |
|
"train/loss_step": loss.item(), |
|
"train/lr_step": optimizer.param_groups[0]["lr"], |
|
"train/modal_step": modal, |
|
"train/task_step": task, |
|
}) |
|
|
|
|
|
|
|
metric_logger.synchronize_between_processes() |
|
logging.info("Averaged stats: " + str(metric_logger.global_avg())) |
|
return_dict = {} |
|
for k, meter in metric_logger.meters.items(): |
|
if not isinstance(meter, str): |
|
return_dict.update({k: "{:.3f}".format(meter.global_avg)}) |
|
|
|
return_dict.update({'modal': modal, 'task': task}) |
|
return return_dict, metric_logger |
|
|
|
@staticmethod |
|
def save_result(result, result_dir, filename, remove_duplicate=""): |
|
import json |
|
|
|
result_file = os.path.join( |
|
result_dir, "%s_rank%d.json" % (filename, get_rank()) |
|
) |
|
final_result_file = os.path.join(result_dir, "%s.json" % filename) |
|
|
|
json.dump(result, open(result_file, "w")) |
|
|
|
if is_dist_avail_and_initialized(): |
|
dist.barrier() |
|
|
|
if is_main_process(): |
|
logging.warning("rank %d starts merging results." % get_rank()) |
|
|
|
result = [] |
|
|
|
for rank in range(get_world_size()): |
|
result_file = os.path.join( |
|
result_dir, "%s_rank%d.json" % (filename, rank) |
|
) |
|
res = json.load(open(result_file, "r")) |
|
result += res |
|
|
|
if remove_duplicate: |
|
result_new = [] |
|
id_list = [] |
|
for res in result: |
|
if res[remove_duplicate] not in id_list: |
|
id_list.append(res[remove_duplicate]) |
|
result_new.append(res) |
|
result = result_new |
|
|
|
json.dump(result, open(final_result_file, "w")) |
|
print("result file saved to %s" % final_result_file) |
|
|
|
if wandb.run is not None: |
|
wandb.log({"final_results": result}) |
|
|
|
return final_result_file |
|
|