import os import json import numpy as np from tqdm import tqdm import torch import torch.nn.functional as F from sklearn.metrics import ( accuracy_score, confusion_matrix, cohen_kappa_score, roc_auc_score, f1_score, precision_score, recall_score ) from sklearn.preprocessing import label_binarize from models.medqwen import MedQwen def pad_or_clip_images(images, target_size=(32, 3, 224, 224)): current_size = images.size() if current_size[0] < target_size[0]: pad_size = target_size[0] - current_size[0] padded_images = F.pad(images, (0, 0, 0, 0, 0, 0, 0, pad_size)) elif current_size[0] > target_size[0]: padded_images = images[:target_size[0]] else: padded_images = images return padded_images def prob_to_continuous(probs): """Convert probability distribution to continuous value using expected value""" return sum(i * p for i, p in enumerate(probs)) def calculate_cindex(predictions, ground_truths): """Calculate concordance index (C-index) for ordinal predictions""" n = len(predictions) concordant = 0 total_pairs = 0 for i in range(n): for j in range(i + 1, n): if ground_truths[i] != ground_truths[j]: # Only compare if ground truths are different total_pairs += 1 if (predictions[i] < predictions[j] and ground_truths[i] < ground_truths[j]) or \ (predictions[i] > predictions[j] and ground_truths[i] > ground_truths[j]): concordant += 1 return float(concordant / total_pairs) if total_pairs > 0 else 0.0 def calculate_metrics(predictions,all_probabilities, ground_truths): """ Calculate comprehensive metrics for 4-class classification including: - Overall accuracy - Per-class accuracy - QWK (Quadratic Weighted Kappa) - AUC scores (per-class and macro) - F1 scores (per-class and macro) - Precision scores (per-class and macro) - Recall scores (per-class and macro) """ # Convert inputs to numpy arrays if they aren't already predictions = np.array(predictions) ground_truths = np.array(ground_truths) all_probabilities = np.array(all_probabilities) # Calculate overall accuracy overall_accuracy = accuracy_score(ground_truths, predictions) # Calculate confusion matrix conf_matrix = confusion_matrix(ground_truths, predictions) # Calculate QWK qwk = cohen_kappa_score(ground_truths, predictions, weights="quadratic") # Calculate per-class accuracy per_class_accuracy = {} for class_idx in range(4): true_positives = conf_matrix[class_idx, class_idx] total_samples = sum(conf_matrix[class_idx, :]) if total_samples > 0: class_accuracy = true_positives / total_samples else: class_accuracy = 0.0 per_class_accuracy[f'class_{class_idx}_accuracy'] = float(class_accuracy) # Calculate AUC scores # First, binarize the labels and predictions for AUC calculation y_true_bin = label_binarize(ground_truths, classes=range(4)) # y_pred_bin = label_binarize(predictions, classes=range(4)) auc_scores = {} for i in range(4): try: auc_scores[f'auc_class_{i}'] = float(roc_auc_score(y_true_bin[:, i], all_probabilities[:, i])) except ValueError: auc_scores[f'auc_class_{i}'] = 0.0 # Calculate macro-averaged AUC auc_scores['auc_macro'] = float(sum(auc_scores[f'auc_class_{i}'] for i in range(4)) / 4) # Calculate F1, Precision, and Recall for each class f1_per_class = f1_score(ground_truths, predictions, average=None) precision_per_class = precision_score(ground_truths, predictions, average=None) recall_per_class = recall_score(ground_truths, predictions, average=None) # Calculate macro averages f1_macro = f1_score(ground_truths, predictions, average='macro') precision_macro = precision_score(ground_truths, predictions, average='macro') recall_macro = recall_score(ground_truths, predictions, average='macro') # Compile all metrics metrics = { 'overall_accuracy': float(overall_accuracy), 'qwk': float(qwk), **per_class_accuracy, **auc_scores, **{f'f1_class_{i}': float(score) for i, score in enumerate(f1_per_class)}, **{f'precision_class_{i}': float(score) for i, score in enumerate(precision_per_class)}, **{f'recall_class_{i}': float(score) for i, score in enumerate(recall_per_class)}, 'f1_macro': float(f1_macro), 'precision_macro': float(precision_macro), 'recall_macro': float(recall_macro), 'confusion_matrix': conf_matrix.tolist() } # Convert probabilities to continuous predictions continuous_preds = [prob_to_continuous(probs) for probs in all_probabilities.tolist()] # Calculate C-index using continuous predictions metrics['cindex'] = calculate_cindex(continuous_preds, ground_truths) mae = np.mean(np.abs(np.array(continuous_preds) - np.array(ground_truths))) mse = np.mean((np.array(continuous_preds) - np.array(ground_truths)) ** 2) metrics['continuous_mae'] = float(mae) metrics['continuous_mse'] = float(mse) return metrics def headct_inference(input_jsonl_file, save_json_file, checkpoint_file, img_root_dir, model_id): torch.manual_seed(42) torch.cuda.manual_seed_all(42) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False print('Load Model') model = MedQwen(model_id) state_dict = torch.load(checkpoint_file, map_location='cpu')['model'] print('Load Checkpoint') missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) print('missing_keys', missing_keys) print('unexpected_keys', unexpected_keys) model = model.to("cuda") model.eval() save_data_dict = {} # Lists to store predictions and ground truths for metric calculation all_predictions = [] all_ground_truths = [] all_probabilities = [] with torch.no_grad(): with open(input_jsonl_file, 'r') as file: for line in tqdm(file): data = json.loads(line) patient_id = data['patient_id'] study_id = data['study_id'] time_difference_days = data['time_difference_days'] calcium_score = data['calcium_score'] if patient_id not in save_data_dict: save_data_dict[patient_id] = {} if study_id in save_data_dict[patient_id]: # If study already exists, get its predictions for metrics calculation study_data = save_data_dict[patient_id][study_id] all_predictions.append(study_data['prediction']) all_ground_truths.append(study_data['ground_truth']) all_probabilities.append(study_data['probabilities']) continue label = data['calcium_score_label'] image_path = img_root_dir + data['image_path_list'][0] try: pth_data = torch.load(image_path, weights_only=True) pth_data = pad_or_clip_images(pth_data) input_samples = { 'input_images': torch.tensor(pth_data).unsqueeze(0).to("cuda"), 'modal': 'head CT', 'labels': torch.tensor([label], dtype=torch.long).to("cuda"), 'task_type': 'agatston' } except Exception as e: print(f"Error processing {image_path}: {e}") continue output = model(input_samples) probabilities = F.softmax(output['logits'], dim=1) model_prediction = torch.argmax(probabilities, dim=1).item() continuous_pred = prob_to_continuous(probabilities[0].tolist()) # Store predictions and ground truth all_predictions.append(model_prediction) all_ground_truths.append(label) all_probabilities.append(probabilities[0].tolist()) save_data_dict[patient_id][study_id] = { 'prediction': model_prediction, 'ground_truth': label, 'probabilities': probabilities[0].tolist(), 'continuous_prediction': continuous_pred, 'calcium_score': calcium_score, 'correct': model_prediction == label, 'time_difference_days': time_difference_days } # Save detailed results with open(save_json_file, 'w') as f: json.dump(save_data_dict, f, indent=2) save_metric_data_dict = {} # Calculate metrics after processing all samples if all_predictions: metrics = calculate_metrics(all_predictions, all_probabilities, all_ground_truths) save_metric_data_dict['metrics'] = { **metrics, 'total_samples': len(all_predictions) } # Print comprehensive metrics print("\nEvaluation Results:") print(f"Total samples: {len(all_predictions)}") print(f"\nOverall Metrics:") print(f"Accuracy: {metrics['overall_accuracy']:.4f}") print(f"QWK Score: {metrics['qwk']:.4f}") print(f"C-Index: {metrics['cindex']:.4f}") print(f"Macro F1: {metrics['f1_macro']:.4f}") print(f"Macro Precision: {metrics['precision_macro']:.4f}") print(f"Macro Recall: {metrics['recall_macro']:.4f}") print(f"Macro AUC: {metrics['auc_macro']:.4f}") print(f"Continuous MAE: {metrics['continuous_mae']:.4f}") print(f"Continuous MSE: {metrics['continuous_mse']:.4f}") print("\nPer-class metrics:") for i in range(4): print(f"\nClass {i}:") print(f"Accuracy: {metrics[f'class_{i}_accuracy']:.4f}") print(f"F1: {metrics[f'f1_class_{i}']:.4f}") print(f"Precision: {metrics[f'precision_class_{i}']:.4f}") print(f"Recall: {metrics[f'recall_class_{i}']:.4f}") print(f"AUC: {metrics[f'auc_class_{i}']:.4f}") print("\nConfusion Matrix:") print(np.array(metrics['confusion_matrix'])) # Save metrics with open(save_json_file.replace(".json","_metric.json"), 'w') as f: json.dump(save_metric_data_dict, f, indent=2) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='get_entities') parser.add_argument('--input_json_file', type=str, default='/home/xiz569/rajpurkarlab/home/xiz569/code/ongoing/2024_HeadCT/agatston/src_agatston/data/5_fold/test.jsonl') parser.add_argument('--save_json_file', type=str, default='./output/headct_swin/20241221070/result/epoch_0.json') parser.add_argument('--checkpoint_file', type=str, default='./output/headct_swin/20241221070/checkpoint_0.pth') parser.add_argument('--checkpoint_dir', type=str, default='./output/headct_swin/20241221070') parser.add_argument('--img_root_dir', type=str, default='/home/xiz569/rajpurkarlab/home/xiz569/code/ongoing/2024_GMAI/data/headct/dataset/images_preprocessed') parser.add_argument('--model_id', type=str, default="swin") args = parser.parse_args() for checkpoint_folder in os.listdir(args.checkpoint_dir): args.checkpoint_file = os.path.join(args.checkpoint_dir, checkpoint_folder, 'checkpoint_best.pth') args.save_json_file = os.path.join(args.checkpoint_dir,checkpoint_folder, "result", "best_epoch.json") if os.path.exists(args.save_json_file): continue headct_inference(args.input_json_file, args.save_json_file, args.checkpoint_file, args.img_root_dir, args.model_id)