HeadCT-FM / src /agatston /inference_binary.py
XiaomanZhang's picture
Upload folder using huggingface_hub
bba66ea verified
import os
import json
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from sklearn.metrics import (
accuracy_score, confusion_matrix, cohen_kappa_score,
roc_auc_score, f1_score, precision_score, recall_score
)
from models.medqwen import MedQwen
def pad_or_clip_images(images, target_size=(32, 3, 224, 224)):
current_size = images.size()
if current_size[0] < target_size[0]:
pad_size = target_size[0] - current_size[0]
padded_images = F.pad(images, (0, 0, 0, 0, 0, 0, 0, pad_size))
elif current_size[0] > target_size[0]:
padded_images = images[:target_size[0]]
else:
padded_images = images
return padded_images
def convert_to_binary(value):
"""Convert four-class values to binary (1 if >= 1, 0 if < 1)"""
return 1 if value >= 1 else 0
def calculate_metrics_binary(predictions, probabilities, ground_truths):
"""
Calculate comprehensive metrics for binary classification including:
- Overall accuracy
- AUC
- F1 score
- Precision
- Recall
- Confusion matrix
"""
# Convert inputs to numpy arrays if they aren't already
predictions = np.array(predictions)
ground_truths = np.array(ground_truths)
# Calculate overall accuracy
overall_accuracy = accuracy_score(ground_truths, predictions)
# Calculate confusion matrix
conf_matrix = confusion_matrix(ground_truths, predictions)
# Calculate AUC - use the probability of the positive class (class 1)
try:
auc = float(roc_auc_score(ground_truths, probabilities))
except ValueError:
auc = 0.0
# Calculate F1, Precision, and Recall
f1 = f1_score(ground_truths, predictions)
precision = precision_score(ground_truths, predictions)
recall = recall_score(ground_truths, predictions)
# Calculate QWK
qwk = cohen_kappa_score(ground_truths, predictions)
# Compile all metrics
metrics = {
'overall_accuracy': float(overall_accuracy),
'auc': float(auc),
'f1_score': float(f1),
'precision': float(precision),
'recall': float(recall),
'qwk': float(qwk),
'confusion_matrix': conf_matrix.tolist(),
'positives_count': int(np.sum(ground_truths == 1)),
'negatives_count': int(np.sum(ground_truths == 0))
}
return metrics
def headct_inference(input_jsonl_file, save_json_file, checkpoint_file, img_root_dir, model_id):
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print('Load Model')
model = MedQwen(model_id)
state_dict = torch.load(checkpoint_file, map_location='cpu')['model']
print('Load Checkpoint')
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print('missing_keys', missing_keys)
print('unexpected_keys', unexpected_keys)
model = model.to("cuda")
model.eval()
save_data_dict = {}
# Lists to store predictions and ground truths for metric calculation
all_predictions_original = [] # Store original 4-class predictions
all_ground_truths_original = [] # Store original 4-class ground truths
all_probabilities_original = [] # Store original 4-class probabilities
# Lists for binary classification
all_predictions_binary = [] # Store binary predictions
all_ground_truths_binary = [] # Store binary ground truths
all_probabilities_binary = [] # Store probability of positive class
with torch.no_grad():
with open(input_jsonl_file, 'r') as file:
for line in tqdm(file):
data = json.loads(line)
patient_id = data['patient_id']
study_id = data['study_id']
time_difference_days = data['time_difference_days']
calcium_score = data['calcium_score']
if patient_id not in save_data_dict:
save_data_dict[patient_id] = {}
if study_id in save_data_dict[patient_id]:
# If study already exists, get its predictions for metrics calculation
study_data = save_data_dict[patient_id][study_id]
# Original 4-class data
all_predictions_original.append(study_data['prediction_original'])
all_ground_truths_original.append(study_data['ground_truth_original'])
all_probabilities_original.append(study_data['probabilities_original'])
# Binary data
all_predictions_binary.append(study_data['prediction_binary'])
all_ground_truths_binary.append(study_data['ground_truth_binary'])
all_probabilities_binary.append(study_data['probability_positive'])
continue
original_label = data['calcium_score_label']
binary_label = convert_to_binary(original_label) # Convert to binary
image_path = img_root_dir + data['image_path_list'][0]
try:
pth_data = torch.load(image_path, weights_only=True)
pth_data = pad_or_clip_images(pth_data)
input_samples = {
'input_images': torch.tensor(pth_data).unsqueeze(0).to("cuda"),
'modal': 'head CT',
'labels': torch.tensor([original_label], dtype=torch.long).to("cuda"),
'task_type': 'agatston'
}
except Exception as e:
print(f"Error processing {image_path}: {e}")
continue
output = model(input_samples)
original_probabilities = F.softmax(output['logits'], dim=1)
original_prediction = torch.argmax(original_probabilities, dim=1).item()
# Convert model prediction to binary
binary_prediction = convert_to_binary(original_prediction)
# Calculate probability for positive class (sum probabilities for classes 1, 2, and 3)
positive_probability = float(sum(original_probabilities[0, 1:].cpu().numpy()))
# Store original predictions
all_predictions_original.append(original_prediction)
all_ground_truths_original.append(original_label)
all_probabilities_original.append(original_probabilities[0].tolist())
# Store binary predictions
all_predictions_binary.append(binary_prediction)
all_ground_truths_binary.append(binary_label)
all_probabilities_binary.append(positive_probability)
save_data_dict[patient_id][study_id] = {
'prediction_original': original_prediction,
'ground_truth_original': original_label,
'probabilities_original': original_probabilities[0].tolist(),
'prediction_binary': binary_prediction,
'ground_truth_binary': binary_label,
'probability_positive': positive_probability,
'correct_binary': binary_prediction == binary_label,
'calcium_score': calcium_score,
'time_difference_days': time_difference_days
}
# Save detailed results
with open(save_json_file, 'w') as f:
json.dump(save_data_dict, f, indent=2)
save_metric_data_dict = {}
# Calculate binary metrics
if all_predictions_binary:
binary_metrics = calculate_metrics_binary(
all_predictions_binary,
all_probabilities_binary,
all_ground_truths_binary
)
save_metric_data_dict['binary_metrics'] = {
**binary_metrics,
'total_samples': len(all_predictions_binary)
}
# Print binary metrics
print("\nBinary Classification Results (≥1 vs <1):")
print(f"Total samples: {len(all_predictions_binary)}")
print(f"Positive samples (≥1): {binary_metrics['positives_count']}")
print(f"Negative samples (<1): {binary_metrics['negatives_count']}")
print(f"Accuracy: {binary_metrics['overall_accuracy']:.4f}")
print(f"AUC: {binary_metrics['auc']:.4f}")
print(f"F1 Score: {binary_metrics['f1_score']:.4f}")
print(f"Precision: {binary_metrics['precision']:.4f}")
print(f"Recall: {binary_metrics['recall']:.4f}")
print(f"QWK: {binary_metrics['qwk']:.4f}")
print("\nConfusion Matrix (Binary):")
print(np.array(binary_metrics['confusion_matrix']))
# Save metrics
with open(save_json_file.replace(".json","_metric.json"), 'w') as f:
json.dump(save_metric_data_dict, f, indent=2)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='get_entities')
parser.add_argument('--input_json_file', type=str, default='/home/xiz569/rajpurkarlab/home/xiz569/code/ongoing/2024_HeadCT/agatston/src_agatston/data/5_fold/test.jsonl')
parser.add_argument('--save_json_file', type=str, default='./output/headct_swin/20241221070/result/epoch_0.json')
parser.add_argument('--checkpoint_file', type=str, default='./output/headct_swin/20241221070/checkpoint_0.pth')
parser.add_argument('--checkpoint_dir', type=str, default='./output/headct_swin/20241221070')
parser.add_argument('--img_root_dir', type=str, default='/home/xiz569/rajpurkarlab/home/xiz569/code/ongoing/2024_GMAI/data/headct/dataset/images_preprocessed')
parser.add_argument('--model_id', type=str, default="swin")
args = parser.parse_args()
for checkpoint_folder in os.listdir(args.checkpoint_dir):
args.checkpoint_file = os.path.join(args.checkpoint_dir, checkpoint_folder, 'checkpoint_best.pth')
args.save_json_file = os.path.join(args.checkpoint_dir, checkpoint_folder, "result", "best_epoch_binary.json")
if os.path.exists(args.save_json_file):
continue
# Create result directory if it doesn't exist
os.makedirs(os.path.dirname(args.save_json_file), exist_ok=True)
headct_inference(args.input_json_file, args.save_json_file, args.checkpoint_file, args.img_root_dir, args.model_id)