|
""" |
|
Copyright (c) 2022, salesforce.com, inc. |
|
All rights reserved. |
|
SPDX-License-Identifier: BSD-3-Clause |
|
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause |
|
""" |
|
import time |
|
import wandb |
|
import torch |
|
import logging |
|
from common.registry import registry |
|
from tasks.base_task import BaseTask |
|
from common.logger import MetricLogger, SmoothedValue |
|
from datasets.data_utils import prepare_sample |
|
from datasets.datasets.dataloader_utils import MultiIterLoader |
|
import torch.distributed as dist |
|
from common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized |
|
from sklearn.metrics import cohen_kappa_score, accuracy_score, confusion_matrix |
|
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score |
|
from sklearn.preprocessing import label_binarize |
|
|
|
import numpy as np |
|
|
|
|
|
def prob_to_continuous(probs): |
|
"""Convert probability distribution to continuous value using expected value""" |
|
return sum(i * p for i, p in enumerate(probs)) |
|
|
|
@registry.register_task("report_generation") |
|
class ReportGenerationTask(BaseTask): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
def calculate_cindex(self, predictions, ground_truths): |
|
"""Calculate concordance index (C-index) for ordinal predictions""" |
|
|
|
|
|
predictions = [prob_to_continuous(probs) for probs in predictions.tolist()] |
|
|
|
n = len(predictions) |
|
concordant = 0 |
|
total_pairs = 0 |
|
|
|
for i in range(n): |
|
for j in range(i + 1, n): |
|
if ground_truths[i] != ground_truths[j]: |
|
total_pairs += 1 |
|
if (predictions[i] < predictions[j] and ground_truths[i] < ground_truths[j]) or \ |
|
(predictions[i] > predictions[j] and ground_truths[i] > ground_truths[j]): |
|
concordant += 1 |
|
|
|
return float(concordant / total_pairs) if total_pairs > 0 else 0.0 |
|
|
|
def evaluation_backup(self, model, data_loader, cuda_enabled=True): |
|
if not hasattr(data_loader, "__next__"): |
|
|
|
data_loader = iter(data_loader) |
|
|
|
metric_logger = MetricLogger(delimiter=" ") |
|
header = "Evaluation" |
|
|
|
print_freq = 10 |
|
|
|
results = [] |
|
|
|
for samples in metric_logger.log_every(data_loader, print_freq, header): |
|
samples = prepare_sample(samples, cuda_enabled=cuda_enabled) |
|
|
|
eval_output = self.valid_step(model=model, samples=samples) |
|
results.extend(eval_output) |
|
|
|
if is_dist_avail_and_initialized(): |
|
dist.barrier() |
|
|
|
return results |
|
|
|
def after_evaluation(self, val_result, **kwargs): |
|
loss = val_result['loss'] |
|
cindex = val_result['cindex'] |
|
val_log = { |
|
'agg_metrics': cindex |
|
} |
|
return val_log |
|
|
|
def evaluation(self, model, data_loader, cuda_enabled=True): |
|
metric_logger = MetricLogger(delimiter=" ") |
|
header = "Evaluation" |
|
print_freq = 10 |
|
|
|
results = [] |
|
|
|
|
|
if isinstance(data_loader, MultiIterLoader): |
|
|
|
total_batches = len(next(iter(data_loader.loaders))) |
|
else: |
|
total_batches = len(data_loader) |
|
|
|
|
|
for i in range(total_batches): |
|
try: |
|
samples = next(data_loader) |
|
batch_results = self.valid_step(model=model, samples=samples) |
|
results.extend(batch_results) |
|
except StopIteration: |
|
print("StopIteration at batch", i) |
|
break |
|
|
|
if is_dist_avail_and_initialized(): |
|
|
|
gathered_results = [None for _ in range(get_world_size())] |
|
dist.all_gather_object(gathered_results, results) |
|
results = [item for sublist in gathered_results for item in sublist] |
|
dist.barrier() |
|
|
|
|
|
all_logits = [r['logits'] for r in results] |
|
all_preds = [r['pred'] for r in results] |
|
all_labels = [r['label'] for r in results] |
|
all_losses = [r['loss'] for r in results] |
|
if hasattr(model, 'loss_type') and model.loss_type == 'mse': |
|
all_score_labels = [r['score_label'] for r in results] |
|
|
|
|
|
def convert_to_class(value): |
|
if value < 10: |
|
return 0 |
|
elif value < 100: |
|
return 1 |
|
elif value < 400: |
|
return 2 |
|
else: |
|
return 3 |
|
|
|
all_logits = torch.cat(all_logits, dim=0).cpu().numpy() |
|
y_true = np.array(all_labels) |
|
y_pred = np.array(all_preds) |
|
|
|
|
|
qwk = cohen_kappa_score(y_true, y_pred, weights="quadratic") |
|
cindex = self.calculate_cindex(all_logits, y_true) |
|
accuracy = accuracy_score(y_true, y_pred) |
|
conf_matrix = confusion_matrix(y_true, y_pred) |
|
|
|
|
|
|
|
y_true_bin = label_binarize(y_true, classes=range(4)) |
|
y_pred_bin = label_binarize(y_pred, classes=range(4)) |
|
|
|
|
|
auc_scores = {} |
|
for i in range(4): |
|
try: |
|
auc_scores[f'auc_class_{i}'] = roc_auc_score(y_true_bin[:, i], y_pred_bin[:, i]) |
|
except ValueError: |
|
auc_scores[f'auc_class_{i}'] = 0.0 |
|
|
|
|
|
auc_scores['auc_macro'] = sum(auc_scores.values()) / 4 |
|
|
|
|
|
f1_per_class = f1_score(y_true, y_pred, average=None) |
|
precision_per_class = precision_score(y_true, y_pred, average=None) |
|
recall_per_class = recall_score(y_true, y_pred, average=None) |
|
|
|
|
|
f1_macro = f1_score(y_true, y_pred, average='macro') |
|
precision_macro = precision_score(y_true, y_pred, average='macro') |
|
recall_macro = recall_score(y_true, y_pred, average='macro') |
|
|
|
metrics = { |
|
'loss': np.mean(all_losses), |
|
'qwk': qwk, |
|
'cindex': cindex, |
|
'accuracy': accuracy, |
|
'conf_matrix': conf_matrix, |
|
'agg_metrics': cindex, |
|
|
|
**auc_scores, |
|
|
|
**{f'f1_class_{i}': score for i, score in enumerate(f1_per_class)}, |
|
**{f'precision_class_{i}': score for i, score in enumerate(precision_per_class)}, |
|
**{f'recall_class_{i}': score for i, score in enumerate(recall_per_class)}, |
|
|
|
|
|
'f1_macro': f1_macro, |
|
'precision_macro': precision_macro, |
|
'recall_macro': recall_macro |
|
} |
|
|
|
|
|
if is_main_process(): |
|
logging.info("Evaluation Results:") |
|
logging.info(f"Loss: {metrics['loss']:.4f}") |
|
logging.info(f"QWK Score: {qwk:.4f}") |
|
logging.info(f"C-Index: {cindex:.4f}") |
|
logging.info(f"Accuracy: {accuracy:.4f}") |
|
logging.info(f"Macro F1: {f1_macro:.4f}") |
|
logging.info(f"Macro Precision: {precision_macro:.4f}") |
|
logging.info(f"Macro Recall: {recall_macro:.4f}") |
|
logging.info(f"Macro AUC: {auc_scores['auc_macro']:.4f}") |
|
|
|
logging.info("\nPer-class metrics:") |
|
for i in range(4): |
|
logging.info(f"\nClass {i}:") |
|
logging.info(f"F1: {f1_per_class[i]:.4f}") |
|
logging.info(f"Precision: {precision_per_class[i]:.4f}") |
|
logging.info(f"Recall: {recall_per_class[i]:.4f}") |
|
logging.info(f"AUC: {auc_scores[f'auc_class_{i}']:.4f}") |
|
|
|
logging.info("\nConfusion Matrix:") |
|
logging.info(conf_matrix) |
|
|
|
if self.wandb_initialized: |
|
wandb_log = { |
|
'val/qwk': qwk, |
|
'val/cindex': cindex, |
|
'val/accuracy': accuracy, |
|
'val/f1_macro': f1_macro, |
|
'val/precision_macro': precision_macro, |
|
'val/recall_macro': recall_macro, |
|
'val/auc_macro': auc_scores['auc_macro'], |
|
**{f'val/f1_class_{i}': score for i, score in enumerate(f1_per_class)}, |
|
**{f'val/precision_class_{i}': score for i, score in enumerate(precision_per_class)}, |
|
**{f'val/recall_class_{i}': score for i, score in enumerate(recall_per_class)}, |
|
**{f'val/auc_class_{i}': auc_scores[f'auc_class_{i}'] for i in range(4)}, |
|
'val/confusion_matrix': wandb.plot.confusion_matrix( |
|
probs=None, |
|
y_true=y_true, |
|
preds=y_pred, |
|
class_names=[str(i) for i in range(4)] |
|
) |
|
} |
|
|
|
wandb.log(wandb_log) |
|
|
|
|
|
return metrics |