{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "### Generating Train-Val Split from Dataset" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "import shutil\n", "import random\n", "import multiprocessing\n", "from copy import deepcopy\n", "\n", "def recursive_copy_dicom(src_folder, dest_folder, file_counter):\n", " \"\"\"\n", " Recursively finds and copies DICOM files from the source to the destination folder, renaming them sequentially.\n", " \n", " :param src_folder: The source folder containing DICOM files (including subdirectories).\n", " :param dest_folder: The destination folder where the files will be copied and renamed.\n", " :param file_counter: The sequential counter for renaming files.\n", " :return: List of renamed files for further splitting.\n", " \"\"\"\n", " renamed_files = []\n", "\n", " for root, dirs, files in os.walk(src_folder):\n", " for dicom_file in files:\n", " if dicom_file.lower().endswith('.dcm'):\n", " # Get full path of the source file\n", " src_file_path = os.path.join(root, dicom_file)\n", " \n", " # Create the new file path in the destination folder\n", " dest_file_path = os.path.join(dest_folder, f\"{file_counter}.dcm\")\n", " \n", " # Copy and rename the file\n", " shutil.copy(src_file_path, dest_file_path)\n", " \n", " # Append the renamed file to the list\n", " renamed_files.append(f\"{file_counter}.dcm\")\n", " \n", " # Increment the file counter for the next file\n", " file_counter += 1\n", "\n", " return renamed_files\n", "\n", "def split_and_transfer_files(file_list, dest_folder, split_factor):\n", " \"\"\"\n", " Splits the list of renamed files into train and val sets and moves them into the appropriate folders.\n", " \n", " :param file_list: List of renamed DICOM files.\n", " :param dest_folder: Destination folder where train and val subfolders will be created.\n", " :param split_factor: The ratio of files to go into the train subfolder.\n", " \"\"\"\n", " # Ensure the destination folder and subfolders exist\n", " train_folder = os.path.join(dest_folder, 'train')\n", " val_folder = os.path.join(dest_folder, 'val')\n", " \n", " if not os.path.exists(train_folder):\n", " os.makedirs(train_folder)\n", " \n", " if not os.path.exists(val_folder):\n", " os.makedirs(val_folder)\n", "\n", " # Shuffle the files for randomness\n", " random.shuffle(file_list)\n", "\n", " # Calculate the number of files for the train and validation sets\n", " split_index = int(len(file_list) * split_factor)\n", " \n", " # Split the files into train and val sets\n", " train_files = file_list[:split_index]\n", " val_files = file_list[split_index:]\n", "\n", " # Move the files to the respective folders\n", " for file in train_files:\n", " src_file = os.path.join(dest_folder, file)\n", " dest_file = os.path.join(train_folder, file)\n", " shutil.move(src_file, dest_file)\n", " print(f\"Moved {file} to train folder\")\n", " \n", " for file in val_files:\n", " src_file = os.path.join(dest_folder, file)\n", " dest_file = os.path.join(val_folder, file)\n", " shutil.move(src_file, dest_file)\n", " print(f\"Moved {file} to val folder\")\n", "\n", "def process_dicom_files(src_folder, dest_folder, split_factor):\n", " \"\"\"\n", " Recursively finds, renames, copies DICOM files, and splits them into train and val sets.\n", " \n", " :param src_folder: The source folder containing DICOM files (including subdirectories).\n", " :param dest_folder: The destination folder where the renamed files and the train/val split will be created.\n", " :param split_factor: The ratio of files to go into the train subfolder.\n", " \"\"\"\n", " # Ensure the destination folder exists\n", " if not os.path.exists(dest_folder):\n", " os.makedirs(dest_folder)\n", "\n", " # Initialize file counter\n", " file_counter = 1\n", "\n", " # Recursively copy DICOM files and rename them\n", " renamed_files = recursive_copy_dicom(src_folder, dest_folder, file_counter)\n", "\n", " # Step 2: Split the renamed files into train and val sets\n", " split_and_transfer_files(renamed_files, dest_folder, split_factor)\n", "\n", "# Example usage:\n", "src_folder = r\"F:\\TCIA\" # Replace with your source folder path\n", "dest_folder = r\"F:\\TCIA_Split\" # Destination folder for the renamed files and train/val split\n", "split_factor = 0.95 # 90% of files will go to 'train', 10% will go to 'val'\n", "\n", "# Perform the entire process\n", "process_dicom_files(src_folder, dest_folder, split_factor)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Faster Train-Val Split Generation" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "import shutil\n", "import random\n", "import multiprocessing\n", "from concurrent.futures import ThreadPoolExecutor\n", "from copy import deepcopy\n", "\n", "def recursive_copy_dicom(src_folder, dest_folder, file_counter):\n", " \"\"\"\n", " Recursively finds and copies DICOM files from the source to the destination folder, renaming them sequentially.\n", " \"\"\"\n", " renamed_files = []\n", "\n", " for root, dirs, files in os.walk(src_folder):\n", " for dicom_file in files:\n", " if dicom_file.lower().endswith('.dcm'):\n", " # Get full path of the source file\n", " src_file_path = os.path.join(root, dicom_file)\n", " \n", " # Create the new file path in the destination folder\n", " dest_file_path = os.path.join(dest_folder, f\"{file_counter}.dcm\")\n", " \n", " # Copy and rename the file\n", " shutil.copy(src_file_path, dest_file_path)\n", " \n", " # Append the renamed file to the list\n", " renamed_files.append(f\"{file_counter}.dcm\")\n", " \n", " # Increment the file counter for the next file\n", " file_counter += 1\n", "\n", " return renamed_files\n", "\n", "def split_and_transfer_files(file_list, dest_folder, split_factor):\n", " \"\"\"\n", " Splits the list of renamed files into train and val sets and moves them into the appropriate folders.\n", " \"\"\"\n", " train_folder = os.path.join(dest_folder, 'train')\n", " val_folder = os.path.join(dest_folder, 'val')\n", " \n", " if not os.path.exists(train_folder):\n", " os.makedirs(train_folder)\n", " \n", " if not os.path.exists(val_folder):\n", " os.makedirs(val_folder)\n", "\n", " # Shuffle the files for randomness\n", " random.shuffle(file_list)\n", "\n", " # Calculate the number of files for the train and validation sets\n", " split_index = int(len(file_list) * split_factor)\n", " \n", " # Split the files into train and val sets\n", " train_files = file_list[:split_index]\n", " val_files = file_list[split_index:]\n", "\n", " # Move files in parallel using multiprocessing\n", " with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:\n", " pool.starmap(move_file, [(file, dest_folder, 'train') for file in train_files])\n", " pool.starmap(move_file, [(file, dest_folder, 'val') for file in val_files])\n", "\n", "def move_file(file, dest_folder, folder_name):\n", " \"\"\"Move file from the source to destination folder.\"\"\"\n", " src_file = os.path.join(dest_folder, file)\n", " dest_file = os.path.join(dest_folder, folder_name, file)\n", " shutil.move(src_file, dest_file)\n", " print(f\"Moved {file} to {folder_name} folder\")\n", "\n", "def process_dicom_files(src_folder, dest_folder, split_factor):\n", " \"\"\"\n", " Recursively finds, renames, copies DICOM files, and splits them into train and val sets.\n", " \"\"\"\n", " # Ensure the destination folder exists\n", " if not os.path.exists(dest_folder):\n", " os.makedirs(dest_folder)\n", "\n", " # Initialize file counter\n", " file_counter = 1\n", "\n", " # Recursively copy DICOM files and rename them\n", " renamed_files = recursive_copy_dicom(src_folder, dest_folder, file_counter)\n", "\n", " # Step 2: Split the renamed files into train and val sets\n", " split_and_transfer_files(renamed_files, dest_folder, split_factor)\n", "\n", "# Example usage:\n", "src_folder = r\"F:\\TCIA\" # Replace with your source folder path\n", "dest_folder = r\"D:\\TCIA_Split\" # Destination folder for the renamed files and train/val split\n", "split_factor = 0.95 # 90% of files will go to 'train', 10% will go to 'val'\n", "\n", "# Perform the entire process\n", "process_dicom_files(src_folder, dest_folder, split_factor)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Filtering through only 512 x 512 scans" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "import pydicom\n", "\n", "def filter_dicom_images(input_dirs, min_size=512):\n", " for dir_path in input_dirs:\n", " total_images = 0\n", " filtered_images = 0\n", " \n", " # Use a list to store files to delete to avoid modifying directory during iteration\n", " files_to_delete = []\n", " \n", " for filename in os.listdir(dir_path):\n", " if filename.endswith('.dcm'):\n", " full_path = os.path.join(dir_path, filename)\n", " \n", " try:\n", " # Read DICOM file\n", " dcm = pydicom.dcmread(full_path)\n", " \n", " # Check image dimensions\n", " if dcm.pixel_array.shape[0] < min_size or dcm.pixel_array.shape[1] < min_size or dcm.pixel_array.shape[0] > min_size or dcm.pixel_array.shape[1] > min_size:\n", " files_to_delete.append(full_path)\n", " filtered_images += 1\n", " \n", " total_images += 1\n", " \n", " except Exception as e:\n", " print(f\"Error processing {filename}: {e}\")\n", " \n", " # Delete files\n", " for file_path in files_to_delete:\n", " os.remove(file_path)\n", " \n", " print(f\"Directory: {dir_path}\")\n", " print(f\"Total images: {total_images}\")\n", " print(f\"Images deleted: {filtered_images}\\n\")\n", "\n", "# Usage\n", "input_dirs = [\"./TCIA_Split/train\", \"./TCIA_Split/val\"]\n", "filter_dicom_images(input_dirs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Basic U-Net" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import pydicom\n", "import numpy as np\n", "from torch.utils.data import Dataset, DataLoader\n", "import os\n", "from torch.utils.checkpoint import checkpoint\n", "from tqdm import tqdm # Import tqdm for progress bar\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "class MedicalImageDataset(Dataset):\n", " def __init__(self, dicom_dir):\n", " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n", " \n", " def __len__(self):\n", " return len(self.dicom_files)\n", " \n", " def __getitem__(self, idx):\n", " # Read DICOM file and normalize\n", " dcm = pydicom.dcmread(self.dicom_files[idx])\n", " image = dcm.pixel_array.astype(float)\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n", " return image_tensor, image_tensor\n", "\n", "class UNetBlock(nn.Module):\n", " def __init__(self, in_channels, out_channels):\n", " super().__init__()\n", " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n", " self.bn1 = nn.BatchNorm2d(out_channels)\n", " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n", " self.bn2 = nn.BatchNorm2d(out_channels)\n", " \n", " def forward(self, x):\n", " x = F.relu(self.bn1(self.conv1(x)))\n", " x = F.relu(self.bn2(self.conv2(x)))\n", " return x\n", "\n", "class UNet(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Encoder\n", " self.enc1 = UNetBlock(in_channels, 64)\n", " self.enc2 = UNetBlock(64, 128)\n", " self.enc3 = UNetBlock(128, 256)\n", " \n", " # Decoder\n", " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n", " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n", " self.dec1 = UNetBlock(64, out_channels)\n", " \n", " # Pooling and upsampling\n", " self.pool = nn.MaxPool2d(2, 2)\n", " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n", " \n", " def forward(self, x):\n", " # Encoder path\n", " e1 = checkpoint(self.enc1, x)\n", " e2 = checkpoint(self.enc2, self.pool(e1))\n", " e3 = checkpoint(self.enc3, self.pool(e2))\n", " \n", " # Decoder path with skip connections\n", " d3 = self.upsample(e3)\n", " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n", " d3 = checkpoint(self.dec3, d3)\n", " \n", " d2 = self.upsample(d3)\n", " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n", " d2 = checkpoint(self.dec2, d2)\n", " \n", " d1 = self.dec1(d2) # No checkpointing for final output layer\n", " \n", " return d1\n", "\n", "def calculate_loss(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " return total_loss / len(dataloader)\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " # Ensure the values are in the correct range\n", " mse = F.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def calculate_loss_and_psnr(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " total_psnr = 0\n", " num_batches = len(dataloader)\n", " \n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " \n", " # Calculate MSE loss\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " \n", " # Calculate PSNR\n", " psnr = calculate_psnr(outputs, targets)\n", " total_psnr += psnr\n", " \n", " avg_loss = total_loss / num_batches\n", " avg_psnr = total_psnr / num_batches\n", " \n", " return avg_loss, avg_psnr\n", "\n", "best_val_loss = float('inf')\n", "best_model_path = 'best_model.pth'\n", "\n", "def train_unet(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n", " # Dataset and DataLoader\n", " dataset = MedicalImageDataset(dicom_dir)\n", " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n", " val_dataset = MedicalImageDataset(val_dicom_dir)\n", " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n", " \n", " # Model, Loss, Optimizer\n", " model = UNet().to(device)\n", " criterion = nn.MSELoss()\n", " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n", " \n", " # Training loop with tqdm\n", " for epoch in range(epochs):\n", " model.train()\n", " total_loss = 0\n", " optimizer.zero_grad()\n", " \n", " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n", " for i, (images, targets) in enumerate(tepoch):\n", " images, targets = images.to(device), targets.to(device)\n", " \n", " # Forward pass\n", " outputs = model(images)\n", " loss = criterion(outputs, targets)\n", " loss.backward()\n", " \n", " # Gradient accumulation\n", " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n", " optimizer.step()\n", " optimizer.zero_grad()\n", " \n", " total_loss += loss.item()\n", " \n", " # Update the tqdm progress bar with the current loss\n", " tepoch.set_postfix(loss=total_loss / ((i + 1) * batch_size))\n", " \n", " avg_train_loss = total_loss / len(train_dataloader)\n", " avg_val_loss, avg_val_psnr = calculate_loss_and_psnr(model, val_dataloader, criterion)\n", " \n", " print(f\"Epoch [{epoch+1}/{epochs}] - Train Loss: {avg_train_loss:.4f}, Validation Loss: {avg_val_loss:.4f}, Validation PSNR: {avg_val_psnr:.4f}\")\n", "\n", " if avg_val_loss < best_val_loss:\n", " best_val_loss = avg_val_loss\n", " torch.save(model.state_dict(), best_model_path)\n", " print(f\"Model saved with improved validation loss: {avg_val_loss:.4f}\")\n", " \n", " return model\n", "\n", "# Example usage with train and validation directories\n", "model = train_unet(r\"D:\\TCIA_Split\\train\", r\"D:\\TCIA_Split\\val\", epochs=50, batch_size=4, grad_accumulation_steps=8)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### U-Net Inference" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import pydicom\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import os\n", "\n", "# Import the UNet and related classes from the previous script\n", "# Replace with the actual import method\n", "\n", "def load_dicom_image(dicom_path):\n", " \"\"\"\n", " Load and normalize a DICOM image\n", " \n", " Args:\n", " dicom_path (str): Path to the DICOM file\n", " \n", " Returns:\n", " torch.Tensor: Normalized image tensor\n", " \"\"\"\n", " # Read DICOM file\n", " dcm = pydicom.dcmread(dicom_path)\n", " image = dcm.pixel_array.astype(float)\n", " \n", " # Normalize image\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0)\n", " return image_tensor\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " \"\"\"\n", " Calculate Peak Signal-to-Noise Ratio (PSNR)\n", " \n", " Args:\n", " output (torch.Tensor): Reconstructed image\n", " target (torch.Tensor): Original image\n", " max_pixel (float): Maximum pixel value\n", " \n", " Returns:\n", " float: PSNR value\n", " \"\"\"\n", " # Ensure the values are in the correct range\n", " mse = torch.nn.functional.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def visualize_reconstruction(original_image, reconstructed_image, psnr):\n", " \"\"\"\n", " Visualize original and reconstructed images\n", " \n", " Args:\n", " original_image (torch.Tensor): Original image tensor\n", " reconstructed_image (torch.Tensor): Reconstructed image tensor\n", " psnr (float): Peak Signal-to-Noise Ratio\n", " \"\"\"\n", " # Convert tensors to numpy for visualization\n", " original = original_image.squeeze().cpu().numpy()\n", " reconstructed = reconstructed_image.squeeze().cpu().numpy()\n", " \n", " # Create subplot\n", " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n", " \n", " # Plot original image\n", " im1 = ax1.imshow(original, cmap='gray')\n", " ax1.set_title('Original Image')\n", " plt.colorbar(im1, ax=ax1)\n", " \n", " # Plot reconstructed image\n", " im2 = ax2.imshow(reconstructed, cmap='gray')\n", " ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n", " plt.colorbar(im2, ax=ax2)\n", " \n", " plt.tight_layout()\n", " plt.show()\n", "\n", "def inference_single_image(model_path, test_dicom_path):\n", " \"\"\"\n", " Perform inference on a single DICOM image\n", " \n", " Args:\n", " model_path (str): Path to the saved model weights\n", " test_dicom_path (str): Path to the test DICOM file\n", " \"\"\"\n", " # Set device\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " \n", " # Initialize model\n", " model = UNet().to(device)\n", " \n", " # Load saved model weights\n", " model.load_state_dict(torch.load(model_path))\n", " model.eval()\n", " \n", " # Load and preprocess test image\n", " with torch.no_grad():\n", " test_image = load_dicom_image(test_dicom_path).to(device)\n", " \n", " # Perform reconstruction\n", " reconstructed_image = model(test_image)\n", " \n", " # Calculate PSNR\n", " psnr = calculate_psnr(reconstructed_image, test_image)\n", "\n", " print(f\"PSNR: {psnr:.2f} dB\")\n", " \n", " # Visualize results\n", " visualize_reconstruction(test_image, reconstructed_image, psnr)\n", "\n", "# Example usage\n", "if __name__ == \"__main__\":\n", " # Paths to model and test image\n", " MODEL_PATH = r\"D:\\VSCODE\\PreSense\\best_model.pth\" # Path to your saved model\n", " TEST_DICOM_PATH = r\"D:\\VSCODE\\PreSense\\test.dcm\" # Replace with actual path to test DICOM\n", " \n", " # Run inference\n", " inference_single_image(MODEL_PATH, TEST_DICOM_PATH)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### U-Net Inference for Complete Scan" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import pydicom\n", "import numpy as np\n", "import os\n", "from tqdm import tqdm\n", "\n", "# Import the UNet and related classes from the previous script\n", "\n", "def load_dicom_image(dicom_path):\n", " \"\"\"\n", " Load and normalize a DICOM image\n", " \n", " Args:\n", " dicom_path (str): Path to the DICOM file\n", " \n", " Returns:\n", " torch.Tensor: Normalized image tensor\n", " \"\"\"\n", " # Read DICOM file\n", " dcm = pydicom.dcmread(dicom_path)\n", " image = dcm.pixel_array.astype(float)\n", " \n", " # Normalize image\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0)\n", " return image_tensor, dcm\n", "\n", "def save_reconstructed_dicom(image_tensor, original_dcm, output_path):\n", " \"\"\"\n", " Save reconstructed image as a DICOM file\n", " \n", " Args:\n", " image_tensor (torch.Tensor): Reconstructed image tensor\n", " original_dcm (pydicom.Dataset): Original DICOM dataset\n", " output_path (str): Path to save the reconstructed image\n", " \"\"\"\n", " # Convert tensor to numpy and scale back to original pixel range\n", " reconstructed_image = image_tensor.squeeze().cpu().numpy()\n", " \n", " # Scale to original pixel array range\n", " min_val = original_dcm.pixel_array.min()\n", " max_val = original_dcm.pixel_array.max()\n", " reconstructed_image = reconstructed_image * (max_val - min_val) + min_val\n", " \n", " # Create a copy of the original DICOM dataset\n", " ds = pydicom.Dataset()\n", " ds.update(original_dcm)\n", " \n", " # Set the new pixel data\n", " ds.PixelData = reconstructed_image.astype(original_dcm.pixel_array.dtype).tobytes()\n", " \n", " # Set transfer syntax to explicit VR little endian (common default)\n", " ds.file_meta = pydicom.Dataset()\n", " ds.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian\n", " \n", " # Write the DICOM file\n", " pydicom.dcmwrite(output_path, ds)\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " \"\"\"\n", " Calculate Peak Signal-to-Noise Ratio (PSNR)\n", " \n", " Args:\n", " output (torch.Tensor): Reconstructed image\n", " target (torch.Tensor): Original image\n", " max_pixel (float): Maximum pixel value\n", " \n", " Returns:\n", " float: PSNR value\n", " \"\"\"\n", " # Ensure the values are in the correct range\n", " mse = torch.nn.functional.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def batch_inference(model_path, input_dir, output_dir):\n", " \"\"\"\n", " Perform batch inference on all DICOM files in a directory\n", " \n", " Args:\n", " model_path (str): Path to the saved model weights\n", " input_dir (str): Directory containing input DICOM files\n", " output_dir (str): Directory to save reconstructed DICOM files\n", " \"\"\"\n", " # Create output directory if it doesn't exist\n", " os.makedirs(output_dir, exist_ok=True)\n", " \n", " # Set device\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " \n", " # Initialize model\n", " model = UNet().to(device)\n", " \n", " # Load saved model weights\n", " model.load_state_dict(torch.load(model_path))\n", " model.eval()\n", " \n", " # Get list of DICOM files\n", " dcm_files = [f for f in os.listdir(input_dir) if f.endswith('.dcm')]\n", " \n", " # Prepare for inference\n", " print(f\"Starting batch inference on {len(dcm_files)} DICOM files...\")\n", " \n", " # Store PSNR values\n", " psnr_values = {}\n", " \n", " # Perform inference\n", " with torch.no_grad():\n", " for dcm_file in tqdm(dcm_files, desc=\"Reconstructing Images\"):\n", " # Full paths\n", " input_path = os.path.join(input_dir, dcm_file)\n", " output_path = os.path.join(output_dir, dcm_file)\n", " \n", " # Load image\n", " test_image, original_dcm = load_dicom_image(input_path)\n", " test_image = test_image.to(device)\n", " \n", " # Perform reconstruction\n", " reconstructed_image = model(test_image)\n", " \n", " # Calculate PSNR\n", " psnr = calculate_psnr(reconstructed_image, test_image)\n", " psnr_values[dcm_file] = psnr\n", " \n", " # Save reconstructed image\n", " save_reconstructed_dicom(reconstructed_image, original_dcm, output_path)\n", " \n", " # Print PSNR values\n", " print(\"\\nPSNR Values:\")\n", " for filename, psnr in psnr_values.items():\n", " print(f\"{filename}: {psnr:.2f} dB\")\n", " \n", " # Calculate and print overall statistics\n", " psnr_list = list(psnr_values.values())\n", " print(f\"\\nPSNR Statistics:\")\n", " print(f\"Average PSNR: {np.mean(psnr_list):.2f} dB\")\n", " print(f\"Minimum PSNR: {np.min(psnr_list):.2f} dB\")\n", " print(f\"Maximum PSNR: {np.max(psnr_list):.2f} dB\")\n", "\n", "# Example usage\n", "if __name__ == \"__main__\":\n", " # Paths to model, input, and output directories\n", " MODEL_PATH = r\"D:\\VSCODE\\PreSense\\best_model.pth\" # Path to your saved model\n", " INPUT_DICOM_DIR = r\"D:\\Pancreatic Neuroendocrine\\manifest-1662644254281\\CTpred-Sunitinib-panNET\\PAN_01\\04-11-2001-NA-NA-29221\\3.000000-CEFC07AIDR 3D STD-16260\" # Directory with input DICOM files\n", " OUTPUT_DICOM_DIR = r\"D:\\VSCODE\\PreSense\\reconstructed_dicom\" # Directory to save reconstructed DICOM files\n", " \n", " # Run batch inference\n", " batch_inference(MODEL_PATH, INPUT_DICOM_DIR, OUTPUT_DICOM_DIR)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import pydicom\n", "import numpy as np\n", "import os\n", "from tqdm import tqdm\n", "from PIL import Image\n", "\n", "# Import the UNet and related classes from the previous script\n", "\n", "def load_dicom_image(dicom_path):\n", " \"\"\"\n", " Load and normalize a DICOM image\n", " \n", " Args:\n", " dicom_path (str): Path to the DICOM file\n", " \n", " Returns:\n", " torch.Tensor: Normalized image tensor\n", " \"\"\"\n", " # Read DICOM file\n", " dcm = pydicom.dcmread(dicom_path)\n", " image = dcm.pixel_array.astype(float)\n", " \n", " # Normalize image\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0)\n", " return image_tensor, dcm\n", "\n", "def save_reconstructed_image(image_tensor, output_path):\n", " \"\"\"\n", " Save reconstructed image as a JPEG file\n", " \n", " Args:\n", " image_tensor (torch.Tensor): Reconstructed image tensor\n", " output_path (str): Path to save the reconstructed JPEG image\n", " \"\"\"\n", " # Convert tensor to numpy array\n", " reconstructed_image = image_tensor.squeeze().cpu().numpy()\n", " \n", " # Scale back to the original pixel range (assuming input was normalized to [0, 1])\n", " reconstructed_image = np.uint8(reconstructed_image * 255)\n", " \n", " # Convert to PIL Image\n", " pil_image = Image.fromarray(reconstructed_image)\n", " \n", " # Save as JPEG\n", " pil_image.save(output_path, 'JPEG')\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " \"\"\"\n", " Calculate Peak Signal-to-Noise Ratio (PSNR)\n", " \n", " Args:\n", " output (torch.Tensor): Reconstructed image\n", " target (torch.Tensor): Original image\n", " max_pixel (float): Maximum pixel value\n", " \n", " Returns:\n", " float: PSNR value\n", " \"\"\"\n", " # Ensure the values are in the correct range\n", " mse = torch.nn.functional.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def batch_inference(model_path, input_dir, output_dir):\n", " \"\"\"\n", " Perform batch inference on all DICOM files in a directory\n", " \n", " Args:\n", " model_path (str): Path to the saved model weights\n", " input_dir (str): Directory containing input DICOM files\n", " output_dir (str): Directory to save reconstructed JPEG images\n", " \"\"\"\n", " # Create output directory if it doesn't exist\n", " os.makedirs(output_dir, exist_ok=True)\n", " \n", " # Set device\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " \n", " # Initialize model\n", " model = UNet().to(device)\n", " \n", " # Load saved model weights\n", " model.load_state_dict(torch.load(model_path))\n", " model.eval()\n", " \n", " # Get list of DICOM files\n", " dcm_files = [f for f in os.listdir(input_dir) if f.endswith('.dcm')]\n", " \n", " # Prepare for inference\n", " print(f\"Starting batch inference on {len(dcm_files)} DICOM files...\")\n", " \n", " # Store PSNR values\n", " psnr_values = {}\n", " \n", " # Perform inference\n", " with torch.no_grad():\n", " for dcm_file in tqdm(dcm_files, desc=\"Reconstructing Images\"):\n", " # Full paths\n", " input_path = os.path.join(input_dir, dcm_file)\n", " output_path = os.path.join(output_dir, f\"{os.path.splitext(dcm_file)[0]}.jpg\") # Save as .jpg\n", " \n", " # Load image\n", " test_image, original_dcm = load_dicom_image(input_path)\n", " test_image = test_image.to(device)\n", " \n", " # Perform reconstruction\n", " reconstructed_image = model(test_image)\n", " \n", " # Calculate PSNR\n", " psnr = calculate_psnr(reconstructed_image, test_image)\n", " psnr_values[dcm_file] = psnr\n", " \n", " # Save reconstructed image as JPEG\n", " save_reconstructed_image(reconstructed_image, output_path)\n", " \n", " # Print PSNR values\n", " print(\"\\nPSNR Values:\")\n", " for filename, psnr in psnr_values.items():\n", " print(f\"{filename}: {psnr:.2f} dB\")\n", " \n", " # Calculate and print overall statistics\n", " psnr_list = list(psnr_values.values())\n", " print(f\"\\nPSNR Statistics:\")\n", " print(f\"Average PSNR: {np.mean(psnr_list):.2f} dB\")\n", " print(f\"Minimum PSNR: {np.min(psnr_list):.2f} dB\")\n", " print(f\"Maximum PSNR: {np.max(psnr_list):.2f} dB\")\n", "\n", "# Example usage\n", "if __name__ == \"__main__\":\n", " # Paths to model, input, and output directories\n", " MODEL_PATH = r\"D:\\VSCODE\\PreSense\\best_model.pth\" # Path to your saved model\n", " INPUT_DICOM_DIR = r\"D:\\Pancreatic Neuroendocrine\\manifest-1662644254281\\CTpred-Sunitinib-panNET\\PAN_01\\04-11-2001-NA-NA-29221\\3.000000-CEFC07AIDR 3D STD-16260\" # Directory with input DICOM files\n", " OUTPUT_JPEG_DIR = r\"D:\\VSCODE\\PreSense\\reconstructed_images\" # Directory to save reconstructed JPEG images\n", " \n", " # Run batch inference\n", " batch_inference(MODEL_PATH, INPUT_DICOM_DIR, OUTPUT_JPEG_DIR)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Small Reconstructor and Denoiser U-Net (smallRD)" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import pydicom\n", "import numpy as np\n", "from torch.utils.data import Dataset, DataLoader\n", "import os\n", "from torch.utils.checkpoint import checkpoint\n", "from tqdm import tqdm # Import tqdm for progress bar\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "class MedicalImageDataset(Dataset):\n", " def __init__(self, dicom_dir):\n", " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n", " \n", " def __len__(self):\n", " return len(self.dicom_files)\n", " \n", " def __getitem__(self, idx):\n", " # Read DICOM file and normalize\n", " dcm = pydicom.dcmread(self.dicom_files[idx])\n", " image = dcm.pixel_array.astype(float)\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n", " return image_tensor, image_tensor\n", "\n", "class UNetBlock(nn.Module):\n", " def __init__(self, in_channels, out_channels):\n", " super().__init__()\n", " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n", " self.bn1 = nn.BatchNorm2d(out_channels)\n", " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n", " self.bn2 = nn.BatchNorm2d(out_channels)\n", " \n", " def forward(self, x):\n", " x = F.relu(self.bn1(self.conv1(x)))\n", " x = F.relu(self.bn2(self.conv2(x)))\n", " return x\n", "\n", "class UNet(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Encoder\n", " self.enc1 = UNetBlock(in_channels, 64)\n", " self.enc2 = UNetBlock(64, 128)\n", " self.enc3 = UNetBlock(128, 256)\n", " \n", " # Decoder\n", " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n", " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n", " self.dec1 = UNetBlock(64, out_channels)\n", " \n", " # Pooling and upsampling\n", " self.pool = nn.MaxPool2d(2, 2)\n", " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n", " \n", " def forward(self, x):\n", " # Encoder path\n", " e1 = checkpoint(self.enc1, x)\n", " e2 = checkpoint(self.enc2, self.pool(e1))\n", " e3 = checkpoint(self.enc3, self.pool(e2))\n", " \n", " # Decoder path with skip connections\n", " d3 = self.upsample(e3)\n", " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n", " d3 = checkpoint(self.dec3, d3)\n", " \n", " d2 = self.upsample(d3)\n", " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n", " d2 = checkpoint(self.dec2, d2)\n", " \n", " d1 = self.dec1(d2) # No checkpointing for final output layer\n", " \n", " return d1\n", "\n", "def calculate_loss(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " return total_loss / len(dataloader)\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " # Ensure the values are in the correct range\n", " mse = F.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def calculate_loss_and_psnr(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " total_psnr = 0\n", " num_batches = len(dataloader)\n", " \n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " \n", " # Calculate MSE loss\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " \n", " # Calculate PSNR\n", " psnr = calculate_psnr(outputs, targets)\n", " total_psnr += psnr\n", " \n", " avg_loss = total_loss / num_batches\n", " avg_psnr = total_psnr / num_batches\n", " \n", " return avg_loss, avg_psnr\n", "\n", "class UNet(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Encoder\n", " self.enc1 = UNetBlock(in_channels, 64)\n", " self.enc2 = UNetBlock(64, 128)\n", " self.enc3 = UNetBlock(128, 256)\n", " \n", " # Decoder\n", " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n", " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n", " self.dec1 = UNetBlock(64, out_channels)\n", " \n", " # Pooling and upsampling\n", " self.pool = nn.MaxPool2d(2, 2)\n", " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n", " \n", " def forward(self, x):\n", " # Encoder path\n", " e1 = checkpoint(self.enc1, x)\n", " e2 = checkpoint(self.enc2, self.pool(e1))\n", " e3 = checkpoint(self.enc3, self.pool(e2))\n", " \n", " # Decoder path with skip connections\n", " d3 = self.upsample(e3)\n", " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n", " d3 = checkpoint(self.dec3, d3)\n", " \n", " d2 = self.upsample(d3)\n", " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n", " d2 = checkpoint(self.dec2, d2)\n", " \n", " d1 = self.dec1(d2) # No checkpointing for final output layer\n", " \n", " return d1\n", "\n", "class Reconstructor(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Same UNet architecture for reconstruction\n", " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n", " \n", " def forward(self, x):\n", " return self.unet(x)\n", "\n", "\n", "class Denoiser(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Same UNet architecture for denoising\n", " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n", " \n", " def forward(self, x):\n", " return self.unet(x)\n", " \n", "def train_reconstructor_and_denoiser(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n", " # Dataset and DataLoader\n", " dataset = MedicalImageDataset(dicom_dir)\n", " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n", " val_dataset = MedicalImageDataset(val_dicom_dir)\n", " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n", " \n", " # Initialize both models\n", " reconstructor = Reconstructor().to(device)\n", " denoiser = Denoiser().to(device)\n", " \n", " # Loss functions for both models\n", " reconstructor_criterion = nn.MSELoss()\n", " denoiser_criterion = nn.MSELoss()\n", " \n", " # Optimizers for both models\n", " reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(), lr=0.0001)\n", " denoiser_optimizer = torch.optim.Adam(denoiser.parameters(), lr=0.0001)\n", " \n", " # Best validation loss initialization\n", " best_reconstructor_val_loss = float('inf')\n", " best_denoiser_val_loss = float('inf')\n", " best_reconstructor_model_path = 'best_reconstructor_model.pth'\n", " best_denoiser_model_path = 'best_denoiser_model.pth'\n", "\n", " # Training loop with tqdm\n", " for epoch in range(epochs):\n", " reconstructor.train()\n", " denoiser.train()\n", " \n", " reconstructor_total_loss = 0\n", " denoiser_total_loss = 0\n", " \n", " reconstructor_optimizer.zero_grad()\n", " denoiser_optimizer.zero_grad()\n", "\n", " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n", " for i, (images, targets) in enumerate(tepoch):\n", " images, targets = images.to(device), targets.to(device)\n", " \n", " # Training Reconstructor\n", " reconstructor_outputs = reconstructor(images)\n", " reconstructor_loss = reconstructor_criterion(reconstructor_outputs, targets)\n", " reconstructor_loss.backward(retain_graph=True)\n", "\n", " # Gradient accumulation for reconstructor\n", " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n", " reconstructor_optimizer.step()\n", " reconstructor_optimizer.zero_grad()\n", "\n", " reconstructor_total_loss += reconstructor_loss.item()\n", "\n", " # Training Denoiser (using output from Reconstructor as noisy input)\n", " noisy_images = reconstructor_outputs.detach() # Detach from the computation graph to avoid in-place error\n", " denoiser_outputs = denoiser(noisy_images)\n", " denoiser_loss = denoiser_criterion(denoiser_outputs, targets)\n", " denoiser_loss.backward()\n", "\n", " # Gradient accumulation for denoiser\n", " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n", " denoiser_optimizer.step()\n", " denoiser_optimizer.zero_grad()\n", "\n", " denoiser_total_loss += denoiser_loss.item()\n", "\n", " # Update the tqdm progress bar with current loss\n", " tepoch.set_postfix(\n", " reconstructor_loss=reconstructor_total_loss / ((i + 1) * batch_size),\n", " denoiser_loss=denoiser_total_loss / ((i + 1) * batch_size)\n", " )\n", " \n", " # Calculate validation loss for both models\n", " avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n", " avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n", " \n", " avg_reconstructor_val_loss, avg_reconstructor_val_psnr = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n", " avg_denoiser_val_loss, avg_denoiser_val_psnr = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n", " \n", " print(f\"Epoch [{epoch+1}/{epochs}] - \"\n", " f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n", " f\"Denoiser Train Loss: {avg_denoiser_train_loss:.4f}, \"\n", " f\"Reconstructor Val Loss: {avg_reconstructor_val_loss:.4f}, \"\n", " f\"Denoiser Val Loss: {avg_denoiser_val_loss:.4f}, \"\n", " f\"Reconstructor Validation PSNR: {avg_reconstructor_val_psnr:.4f}, \"\n", " f\"Denoiser Validation PSNR: {avg_denoiser_val_psnr:.4f}\")\n", " \n", " # Save models if validation loss is improved\n", " if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n", " best_reconstructor_val_loss = avg_reconstructor_val_loss\n", " torch.save(reconstructor.state_dict(), best_reconstructor_model_path)\n", " print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f}\")\n", " \n", " if avg_denoiser_val_loss < best_denoiser_val_loss:\n", " best_denoiser_val_loss = avg_denoiser_val_loss\n", " torch.save(denoiser.state_dict(), best_denoiser_model_path)\n", " print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f}\")\n", " \n", " return reconstructor, denoiser\n", "\n", "# Example usage with train and validation directories\n", "reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n", " r\"D:/PN_Split/train\", r\"D:/PN_Split/val\", epochs=50, batch_size=20, grad_accumulation_steps=2\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### smallRD Single Image Inference" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import pydicom\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import os\n", "\n", "# Import the models from the previous script\n", "# Assuming they are defined or imported correctly\n", "\n", "def load_dicom_image(dicom_path):\n", " \"\"\"\n", " Load and normalize a DICOM image\n", " \n", " Args:\n", " dicom_path (str): Path to the DICOM file\n", " \n", " Returns:\n", " torch.Tensor: Normalized image tensor\n", " \"\"\"\n", " # Read DICOM file\n", " dcm = pydicom.dcmread(dicom_path)\n", " image = dcm.pixel_array.astype(float)\n", " \n", " # Normalize image\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions\n", " return image_tensor\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " \"\"\"\n", " Calculate Peak Signal-to-Noise Ratio (PSNR)\n", " \n", " Args:\n", " output (torch.Tensor): Reconstructed image\n", " target (torch.Tensor): Original image\n", " max_pixel (float): Maximum pixel value\n", " \n", " Returns:\n", " float: PSNR value\n", " \"\"\"\n", " # Ensure the values are in the correct range\n", " mse = torch.nn.functional.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def visualize_reconstruction(original_image, reconstructed_image, psnr):\n", " \"\"\"\n", " Visualize original and reconstructed images\n", " \n", " Args:\n", " original_image (torch.Tensor): Original image tensor\n", " reconstructed_image (torch.Tensor): Reconstructed image tensor\n", " psnr (float): Peak Signal-to-Noise Ratio\n", " \"\"\"\n", " # Convert tensors to numpy for visualization\n", " original = original_image.squeeze().cpu().numpy()\n", " reconstructed = reconstructed_image.squeeze().cpu().numpy()\n", " \n", " # Create subplot\n", " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n", " \n", " # Plot original image\n", " im1 = ax1.imshow(original, cmap='gray')\n", " ax1.set_title('Original Image')\n", " plt.colorbar(im1, ax=ax1)\n", " \n", " # Plot reconstructed image\n", " im2 = ax2.imshow(reconstructed, cmap='gray')\n", " ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n", " plt.colorbar(im2, ax=ax2)\n", " \n", " plt.tight_layout()\n", " plt.show()\n", "\n", "def inference_single_image(reconstructor_model_path, denoiser_model_path, test_dicom_path):\n", " \"\"\"\n", " Perform inference on a single DICOM image using both Reconstructor and Denoiser models.\n", " \n", " Args:\n", " reconstructor_model_path (str): Path to the saved Reconstructor model weights\n", " denoiser_model_path (str): Path to the saved Denoiser model weights\n", " test_dicom_path (str): Path to the test DICOM file\n", " \"\"\"\n", " # Set device\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " \n", " # Initialize models\n", " reconstructor = Reconstructor().to(device)\n", " denoiser = Denoiser().to(device)\n", " \n", " # Load saved model weights\n", " reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n", " denoiser.load_state_dict(torch.load(denoiser_model_path))\n", " \n", " reconstructor.eval()\n", " denoiser.eval()\n", " \n", " # Load and preprocess test image\n", " with torch.no_grad():\n", " test_image = load_dicom_image(test_dicom_path).to(device)\n", " \n", " # Perform reconstruction\n", " reconstructed_image = reconstructor(test_image)\n", " \n", " # Perform denoising on the reconstructed image\n", " denoised_image = denoiser(reconstructed_image)\n", " \n", " # Calculate PSNR for both original and denoised outputs\n", " psnr_reconstructed = calculate_psnr(reconstructed_image, test_image)\n", " psnr_denoised = calculate_psnr(denoised_image, test_image)\n", "\n", " print(f\"PSNR (Reconstructed): {psnr_reconstructed:.2f} dB\")\n", " print(f\"PSNR (Denoised): {psnr_denoised:.2f} dB\")\n", " \n", " # Visualize results\n", " visualize_reconstruction(test_image, reconstructed_image, psnr_reconstructed)\n", " visualize_reconstruction(test_image, denoised_image, psnr_denoised)\n", "\n", "# Example usage\n", "if __name__ == \"__main__\":\n", " # Paths to models and test image\n", " RECONSTRUCTOR_MODEL_PATH = \"./small_reconstructor.pth\" # Path to your saved Reconstructor model\n", " DENOISER_MODEL_PATH = \"./small_denoiser.pth\" # Path to your saved Denoiser model\n", " TEST_DICOM_PATH = \"./test.dcm\" # Replace with actual path to test DICOM \n", " # Run inference\n", " inference_single_image(RECONSTRUCTOR_MODEL_PATH, DENOISER_MODEL_PATH, TEST_DICOM_PATH)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Medium Reconstructor and Denoiser U-Net (mediumRD)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import pydicom\n", "import numpy as np\n", "from torch.utils.data import Dataset, DataLoader\n", "import os\n", "from torch.utils.checkpoint import checkpoint\n", "from tqdm import tqdm # Import tqdm for progress bar\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "class MedicalImageDataset(Dataset):\n", " def __init__(self, dicom_dir):\n", " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n", " \n", " def __len__(self):\n", " return len(self.dicom_files)\n", " \n", " def __getitem__(self, idx):\n", " # Read DICOM file and normalize\n", " dcm = pydicom.dcmread(self.dicom_files[idx])\n", " image = dcm.pixel_array.astype(float)\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n", " return image_tensor, image_tensor\n", "\n", "class UNetBlock(nn.Module):\n", " def __init__(self, in_channels, out_channels):\n", " super().__init__()\n", " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n", " self.bn1 = nn.BatchNorm2d(out_channels)\n", " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n", " self.bn2 = nn.BatchNorm2d(out_channels)\n", " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n", " self.bn2 = nn.BatchNorm2d(out_channels)\n", " \n", " def forward(self, x):\n", " x = F.relu(self.bn1(self.conv1(x)))\n", " x = F.relu(self.bn2(self.conv2(x)))\n", " return x\n", "\n", "class UNet(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Encoder\n", " self.enc1 = UNetBlock(in_channels, 96)\n", " self.enc2 = UNetBlock(96, 192)\n", " self.enc3 = UNetBlock(192, 384)\n", " self.enc4 = UNetBlock(384, 784)\n", " \n", " # Decoder with learned upsampling (transposed convolutions)\n", " self.upconv4 = nn.ConvTranspose2d(784, 384, kernel_size=2, stride=2) # Learnable upsampling\n", " self.dec4 = UNetBlock(384 + 384, 384) # Adjust input channels after concatenation\n", "\n", " self.upconv3 = nn.ConvTranspose2d(384, 192, kernel_size=2, stride=2) # Learnable upsampling\n", " self.dec3 = UNetBlock(192 + 192, 192) # Adjust input channels after concatenation\n", "\n", " self.upconv2 = nn.ConvTranspose2d(192, 96, kernel_size=2, stride=2) # Learnable upsampling\n", " self.dec2 = UNetBlock(96 + 96, 96) # Adjust input channels after concatenation\n", "\n", " self.dec1 = UNetBlock(96, out_channels) # Final output\n", "\n", " self.pool = nn.MaxPool2d(2, 2)\n", " \n", " def forward(self, x):\n", " # Encoder path\n", " e1 = checkpoint(self.enc1, x)\n", " e2 = checkpoint(self.enc2, self.pool(e1))\n", " e3 = checkpoint(self.enc3, self.pool(e2))\n", " e4 = checkpoint(self.enc4, self.pool(e3))\n", " \n", " # Decoder path with learned upsampling and skip connections\n", " d4 = self.upconv4(e4) # Learnable upsampling\n", " d4 = torch.cat([d4, e3], dim=1) # Concatenate with encoder features\n", " d4 = checkpoint(self.dec4, d4)\n", "\n", " d3 = self.upconv3(d4) # Learnable upsampling\n", " d3 = torch.cat([d3, e2], dim=1) # Concatenate with encoder features\n", " d3 = checkpoint(self.dec3, d3)\n", "\n", " d2 = self.upconv2(d3) # Learnable upsampling\n", " d2 = torch.cat([d2, e1], dim=1) # Concatenate with encoder features\n", " d2 = checkpoint(self.dec2, d2)\n", " \n", " d1 = self.dec1(d2) # No checkpointing for final output layer\n", " \n", " return d1\n", "\n", "def calculate_loss(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " return total_loss / len(dataloader)\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " # Ensure the values are in the correct range\n", " mse = F.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def calculate_loss_and_psnr(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " total_psnr = 0\n", " num_batches = len(dataloader)\n", " \n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " \n", " # Calculate MSE loss\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " \n", " # Calculate PSNR\n", " psnr = calculate_psnr(outputs, targets)\n", " total_psnr += psnr\n", " \n", " avg_loss = total_loss / num_batches\n", " avg_psnr = total_psnr / num_batches\n", " \n", " return avg_loss, avg_psnr\n", "\n", "class Reconstructor(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Same UNet architecture for reconstruction\n", " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n", " \n", " def forward(self, x):\n", " return self.unet(x)\n", "\n", "\n", "class Denoiser(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Same UNet architecture for denoising\n", " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n", " \n", " def forward(self, x):\n", " return self.unet(x)\n", " \n", "import os\n", "\n", "def train_reconstructor_and_denoiser(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n", " # Dataset and DataLoader\n", " dataset = MedicalImageDataset(dicom_dir)\n", " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n", " val_dataset = MedicalImageDataset(val_dicom_dir)\n", " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n", " \n", " # Initialize both models\n", " reconstructor = Reconstructor().to(device)\n", " denoiser = Denoiser().to(device)\n", " \n", " # Check if pre-trained models exist\n", " reconstructor_model_path = 'large_reconstructor.pth'\n", " denoiser_model_path = 'large_denoiser.pth'\n", "\n", " # Resume from existing models if they exist\n", " if os.path.exists(reconstructor_model_path):\n", " reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n", " print(f\"Resumed training from {reconstructor_model_path}\")\n", " else:\n", " print(f\"No pre-trained reconstructor model found, starting from scratch.\")\n", " \n", " if os.path.exists(denoiser_model_path):\n", " denoiser.load_state_dict(torch.load(denoiser_model_path))\n", " print(f\"Resumed training from {denoiser_model_path}\")\n", " else:\n", " print(f\"No pre-trained denoiser model found, starting from scratch.\")\n", " \n", " # Loss functions for both models\n", " reconstructor_criterion = nn.MSELoss()\n", " denoiser_criterion = nn.MSELoss()\n", " \n", " # Optimizers for both models\n", " reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(), lr=0.0001)\n", " denoiser_optimizer = torch.optim.Adam(denoiser.parameters(), lr=0.0001)\n", " \n", " # Best validation loss initialization\n", " best_reconstructor_val_loss = float('inf')\n", " best_denoiser_val_loss = float('inf')\n", " best_reconstructor_model_path = 'best_reconstructor_model.pth'\n", " best_denoiser_model_path = 'best_denoiser_model.pth'\n", "\n", " # Training loop with tqdm\n", " for epoch in range(epochs):\n", " reconstructor.train()\n", " denoiser.train()\n", " \n", " reconstructor_total_loss = 0\n", " denoiser_total_loss = 0\n", " \n", " reconstructor_optimizer.zero_grad()\n", " denoiser_optimizer.zero_grad()\n", "\n", " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n", " for i, (images, targets) in enumerate(tepoch):\n", " images, targets = images.to(device), targets.to(device)\n", " \n", " # Training Reconstructor\n", " reconstructor_outputs = reconstructor(images)\n", " reconstructor_loss = reconstructor_criterion(reconstructor_outputs, targets)\n", " reconstructor_loss.backward(retain_graph=True)\n", "\n", " # Gradient accumulation for reconstructor\n", " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n", " reconstructor_optimizer.step()\n", " reconstructor_optimizer.zero_grad()\n", "\n", " reconstructor_total_loss += reconstructor_loss.item()\n", "\n", " # Training Denoiser (using output from Reconstructor as noisy input)\n", " noisy_images = reconstructor_outputs.detach() # Detach from the computation graph to avoid in-place error\n", " denoiser_outputs = denoiser(noisy_images)\n", " denoiser_loss = denoiser_criterion(denoiser_outputs, targets)\n", " denoiser_loss.backward()\n", "\n", " # Gradient accumulation for denoiser\n", " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n", " denoiser_optimizer.step()\n", " denoiser_optimizer.zero_grad()\n", "\n", " denoiser_total_loss += denoiser_loss.item()\n", "\n", " # Update the tqdm progress bar with current loss\n", " tepoch.set_postfix(\n", " reconstructor_loss=reconstructor_total_loss / ((i + 1) * batch_size),\n", " denoiser_loss=denoiser_total_loss / ((i + 1) * batch_size)\n", " )\n", " \n", " # Calculate validation loss for both models\n", " avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n", " avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n", " \n", " avg_reconstructor_val_loss, _ = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n", " avg_denoiser_val_loss, _ = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n", " \n", " print(f\"Epoch [{epoch+1}/{epochs}] - \"\n", " f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n", " f\"Denoiser Train Loss: {avg_denoiser_train_loss:.4f}, \"\n", " f\"Reconstructor Val Loss: {avg_reconstructor_val_loss:.4f}, \"\n", " f\"Denoiser Val Loss: {avg_denoiser_val_loss:.4f}\")\n", " \n", " # Save models if validation loss is improved\n", " if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n", " best_reconstructor_val_loss = avg_reconstructor_val_loss\n", " torch.save(reconstructor.state_dict(), reconstructor_model_path)\n", " print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f}\")\n", " \n", " if avg_denoiser_val_loss < best_denoiser_val_loss:\n", " best_denoiser_val_loss = avg_denoiser_val_loss\n", " torch.save(denoiser.state_dict(), denoiser_model_path)\n", " print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f}\")\n", " \n", " return reconstructor, denoiser\n", "\n", "# Example usage with train and validation directories\n", "reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n", " \"./TCIA_Split/train\", \"./TCIA_Split/val\", epochs=50, batch_size=6, grad_accumulation_steps=16\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### mediumRD Inference" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import pydicom\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import os\n", "\n", "# Import the models from the previous script\n", "# Assuming they are defined or imported correctly\n", "\n", "def load_dicom_image(dicom_path):\n", " \"\"\"\n", " Load and normalize a DICOM image\n", " \n", " Args:\n", " dicom_path (str): Path to the DICOM file\n", " \n", " Returns:\n", " torch.Tensor: Normalized image tensor\n", " \"\"\"\n", " # Read DICOM file\n", " dcm = pydicom.dcmread(dicom_path)\n", " image = dcm.pixel_array.astype(float)\n", " \n", " # Normalize image\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions\n", " return image_tensor\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " \"\"\"\n", " Calculate Peak Signal-to-Noise Ratio (PSNR)\n", " \n", " Args:\n", " output (torch.Tensor): Reconstructed image\n", " target (torch.Tensor): Original image\n", " max_pixel (float): Maximum pixel value\n", " \n", " Returns:\n", " float: PSNR value\n", " \"\"\"\n", " # Ensure the values are in the correct range\n", " mse = torch.nn.functional.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def visualize_reconstruction(original_image, reconstructed_image, psnr):\n", " \"\"\"\n", " Visualize original and reconstructed images\n", " \n", " Args:\n", " original_image (torch.Tensor): Original image tensor\n", " reconstructed_image (torch.Tensor): Reconstructed image tensor\n", " psnr (float): Peak Signal-to-Noise Ratio\n", " \"\"\"\n", " # Convert tensors to numpy for visualization\n", " original = original_image.squeeze().cpu().numpy()\n", " reconstructed = reconstructed_image.squeeze().cpu().numpy()\n", " \n", " # Create subplot\n", " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n", " \n", " # Plot original image\n", " im1 = ax1.imshow(original, cmap='gray')\n", " ax1.set_title('Original Image')\n", " plt.colorbar(im1, ax=ax1)\n", " \n", " # Plot reconstructed image\n", " im2 = ax2.imshow(reconstructed, cmap='gray')\n", " ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n", " plt.colorbar(im2, ax=ax2)\n", " \n", " plt.tight_layout()\n", " plt.show()\n", "\n", "def inference_single_image(reconstructor_model_path, denoiser_model_path, test_dicom_path):\n", " \"\"\"\n", " Perform inference on a single DICOM image using both Reconstructor and Denoiser models.\n", " \n", " Args:\n", " reconstructor_model_path (str): Path to the saved Reconstructor model weights\n", " denoiser_model_path (str): Path to the saved Denoiser model weights\n", " test_dicom_path (str): Path to the test DICOM file\n", " \"\"\"\n", " # Set device\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " \n", " # Initialize models\n", " reconstructor = Reconstructor().to(device)\n", " denoiser = Denoiser().to(device)\n", " \n", " # Load saved model weights\n", " reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n", " denoiser.load_state_dict(torch.load(denoiser_model_path))\n", " \n", " reconstructor.eval()\n", " denoiser.eval()\n", " \n", " # Load and preprocess test image\n", " with torch.no_grad():\n", " test_image = load_dicom_image(test_dicom_path).to(device)\n", " \n", " # Perform reconstruction\n", " reconstructed_image = reconstructor(test_image)\n", " \n", " # Perform denoising on the reconstructed image\n", " denoised_image = denoiser(reconstructed_image)\n", " \n", " # Calculate PSNR for both original and denoised outputs\n", " psnr_reconstructed = calculate_psnr(reconstructed_image, test_image)\n", " psnr_denoised = calculate_psnr(denoised_image, test_image)\n", "\n", " print(f\"PSNR (Reconstructed): {psnr_reconstructed:.2f} dB\")\n", " print(f\"PSNR (Denoised): {psnr_denoised:.2f} dB\")\n", " \n", " # Visualize results\n", " visualize_reconstruction(test_image, reconstructed_image, psnr_reconstructed)\n", " visualize_reconstruction(test_image, denoised_image, psnr_denoised)\n", "\n", "# Example usage\n", "if __name__ == \"__main__\":\n", " # Paths to models and test image\n", " RECONSTRUCTOR_MODEL_PATH = \"./medium_reconstructor.pth\" # Path to your saved Reconstructor model\n", " DENOISER_MODEL_PATH = \"./medium_denoiser.pth\" # Path to your saved Denoiser model\n", " TEST_DICOM_PATH = \"./test.dcm\" # Replace with actual path to test DICOM \n", " # Run inference\n", " inference_single_image(RECONSTRUCTOR_MODEL_PATH, DENOISER_MODEL_PATH, TEST_DICOM_PATH)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Larger Reconstructor U-Net (largeR)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import pydicom\n", "import numpy as np\n", "from torch.utils.data import Dataset, DataLoader\n", "import os\n", "from torch.utils.checkpoint import checkpoint\n", "from tqdm import tqdm # Import tqdm for progress bar\n", "\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "class MedicalImageDataset(Dataset):\n", " def __init__(self, dicom_dir):\n", " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n", " \n", " def __len__(self):\n", " return len(self.dicom_files)\n", " \n", " def __getitem__(self, idx):\n", " # Read DICOM file and normalize\n", " dcm = pydicom.dcmread(self.dicom_files[idx])\n", " image = dcm.pixel_array.astype(float)\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n", " return image_tensor, image_tensor\n", "\n", "class UNetBlock(nn.Module):\n", " def __init__(self, in_channels, out_channels):\n", " super().__init__()\n", " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n", " self.bn1 = nn.BatchNorm2d(out_channels)\n", " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n", " self.bn2 = nn.BatchNorm2d(out_channels)\n", " self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n", " self.bn3 = nn.BatchNorm2d(out_channels)\n", " self.conv4 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n", " self.bn4 = nn.BatchNorm2d(out_channels)\n", " \n", " def forward(self, x):\n", " x = F.relu(self.bn1(self.conv1(x)))\n", " x = F.relu(self.bn2(self.conv2(x)))\n", " x = F.relu(self.bn3(self.conv3(x)))\n", " x = F.relu(self.bn4(self.conv4(x)))\n", " return x\n", "\n", "class UNet(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Encoder\n", " self.enc1 = UNetBlock(in_channels, 64)\n", " self.enc2 = UNetBlock(64, 128)\n", " self.enc3 = UNetBlock(128, 256)\n", " self.enc4 = UNetBlock(256, 512)\n", " self.enc5 = UNetBlock(512, 1024)\n", " \n", " # Decoder with learned upsampling (transposed convolutions)\n", "\n", " self.upconv5 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2) # Learnable upsampling\n", " self.dec5 = UNetBlock(512 + 512, 512) # Adjust input channels after concatenation\n", "\n", " self.upconv4 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) # Learnable upsampling\n", " self.dec4 = UNetBlock(256 + 256, 256) # Adjust input channels after concatenation\n", "\n", " self.upconv3 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) # Learnable upsampling\n", " self.dec3 = UNetBlock(128 + 128, 128) # Adjust input channels after concatenation\n", "\n", " self.upconv2 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) # Learnable upsampling\n", " self.dec2 = UNetBlock(64 + 64, 64) # Adjust input channels after concatenation\n", "\n", " self.dec1 = UNetBlock(64, out_channels) # Final output\n", "\n", " self.pool = nn.MaxPool2d(2, 2)\n", " \n", " def forward(self, x):\n", " # Encoder path\n", " e1 = checkpoint(self.enc1, x)\n", " e2 = checkpoint(self.enc2, self.pool(e1))\n", " e3 = checkpoint(self.enc3, self.pool(e2))\n", " e4 = checkpoint(self.enc4, self.pool(e3))\n", " e5 = checkpoint(self.enc5, self.pool(e4))\n", " \n", " # Decoder path with learned upsampling and skip connections\n", "\n", " d5 = self.upconv5(e5) # Learnable upsampling\n", "\n", " d5 = torch.cat([d5, e4], dim=1) # Concatenate with encoder features\n", " d5 = checkpoint(self.dec5, d5)\n", "\n", " d4 = self.upconv4(d5) # Learnable upsampling\n", "\n", " d4 = torch.cat([d4, e3], dim=1) # Concatenate with encoder features\n", " d4 = checkpoint(self.dec4, d4)\n", "\n", " d3 = self.upconv3(d4) # Learnable upsampling\n", "\n", " d3 = torch.cat([d3, e2], dim=1) # Concatenate with encoder features\n", " d3 = checkpoint(self.dec3, d3)\n", "\n", " d2 = self.upconv2(d3) # Learnable upsampling\n", "\n", " d2 = torch.cat([d2, e1], dim=1) # Concatenate with encoder features\n", " d2 = checkpoint(self.dec2, d2)\n", " \n", " d1 = self.dec1(d2) # No checkpointing for final output layer\n", " \n", " return d1\n", "\n", "def calculate_loss(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " return total_loss / len(dataloader)\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " # Ensure the values are in the correct range\n", " mse = F.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def calculate_loss_and_psnr(model, dataloader, criterion):\n", " model.eval()\n", " total_loss = 0\n", " total_psnr = 0\n", " num_batches = len(dataloader)\n", " \n", " with torch.no_grad():\n", " for images, targets in dataloader:\n", " images, targets = images.to(device), targets.to(device)\n", " outputs = model(images)\n", " \n", " # Calculate MSE loss\n", " loss = criterion(outputs, targets)\n", " total_loss += loss.item()\n", " \n", " # Calculate PSNR\n", " psnr = calculate_psnr(outputs, targets)\n", " total_psnr += psnr\n", " \n", " avg_loss = total_loss / num_batches\n", " avg_psnr = total_psnr / num_batches\n", " \n", " return avg_loss, avg_psnr\n", "\n", "class Reconstructor(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Same UNet architecture for reconstruction\n", " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n", " \n", " def forward(self, x):\n", " return self.unet(x)\n", "\n", "class Denoiser(nn.Module):\n", " def __init__(self, in_channels=1, out_channels=1):\n", " super().__init__()\n", " # Same UNet architecture for denoising\n", " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n", " \n", " def forward(self, x):\n", " return self.unet(x)\n", " \n", "def train_reconstructor_and_denoiser(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n", " # Dataset and DataLoader\n", " dataset = MedicalImageDataset(dicom_dir)\n", " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n", " val_dataset = MedicalImageDataset(val_dicom_dir)\n", " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n", " \n", " # Initialize both models\n", " reconstructor = Reconstructor().to(device)\n", " denoiser = Denoiser().to(device)\n", " \n", " # Loss functions for both models\n", " reconstructor_criterion = nn.MSELoss()\n", " denoiser_criterion = nn.MSELoss()\n", " \n", " # Optimizers for both models\n", " reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(), lr=0.0001)\n", " denoiser_optimizer = torch.optim.Adam(denoiser.parameters(), lr=0.0001)\n", " \n", " # Best validation loss initialization\n", " best_reconstructor_val_loss = float('inf')\n", " best_denoiser_val_loss = float('inf')\n", " best_reconstructor_model_path = 'largeR.pth'\n", " best_denoiser_model_path = 'largeD.pth'\n", "\n", " # Training loop with tqdm\n", " for epoch in range(epochs):\n", " reconstructor.train()\n", " denoiser.train()\n", " \n", " reconstructor_total_loss = 0\n", " denoiser_total_loss = 0\n", " \n", " reconstructor_optimizer.zero_grad()\n", " denoiser_optimizer.zero_grad()\n", "\n", " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n", " for i, (images, targets) in enumerate(tepoch):\n", " images, targets = images.to(device), targets.to(device)\n", " \n", " # Training Reconstructor\n", " reconstructor_outputs = reconstructor(images)\n", " reconstructor_loss = reconstructor_criterion(reconstructor_outputs, targets)\n", " reconstructor_loss.backward(retain_graph=True)\n", "\n", " # Gradient accumulation for reconstructor\n", " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n", " reconstructor_optimizer.step()\n", " reconstructor_optimizer.zero_grad()\n", "\n", " reconstructor_total_loss += reconstructor_loss.item()\n", "\n", " # Training Denoiser (using output from Reconstructor as noisy input)\n", " noisy_images = reconstructor_outputs.detach() # Detach from the computation graph to avoid in-place error\n", " denoiser_outputs = denoiser(noisy_images)\n", " denoiser_loss = denoiser_criterion(denoiser_outputs, targets)\n", " denoiser_loss.backward()\n", "\n", " # Gradient accumulation for denoiser\n", " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n", " denoiser_optimizer.step()\n", " denoiser_optimizer.zero_grad()\n", "\n", " denoiser_total_loss += denoiser_loss.item()\n", "\n", " # Update the tqdm progress bar with current loss\n", " tepoch.set_postfix(\n", " reconstructor_loss=reconstructor_total_loss / ((i + 1) * batch_size),\n", " denoiser_loss=denoiser_total_loss / ((i + 1) * batch_size)\n", " )\n", " \n", " # Calculate validation loss for both models\n", " avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n", " avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n", " \n", " avg_reconstructor_val_loss, avg_reconstructor_val_psnr = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n", " avg_denoiser_val_loss, avg_denoiser_val_psnr = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n", " \n", " print(f\"Epoch [{epoch+1}/{epochs}] - \"\n", " f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n", " f\"Denoiser Train Loss: {avg_denoiser_train_loss:.4f}, \"\n", " f\"Reconstructor Val Loss: {avg_reconstructor_val_loss:.4f}, \"\n", " f\"Denoiser Val Loss: {avg_denoiser_val_loss:.4f}\")\n", " \n", " # Save models if validation loss is improved\n", " if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n", " best_reconstructor_val_loss = avg_reconstructor_val_loss\n", " torch.save(reconstructor.state_dict(), best_reconstructor_model_path)\n", " print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f} and PSNR: {avg_reconstructor_val_psnr}\")\n", " \n", " if avg_denoiser_val_loss < best_denoiser_val_loss:\n", " best_denoiser_val_loss = avg_denoiser_val_loss\n", " torch.save(denoiser.state_dict(), best_denoiser_model_path)\n", " print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f} and PSNR: {avg_denoiser_val_psnr}\")\n", " \n", " return reconstructor, denoiser\n", "\n", "# Example usage with train and validation directories\n", "reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n", " \"./TCIA_Split/train\", \"./TCIA_Split/val\", epochs=50, batch_size=8, grad_accumulation_steps=8\n", ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### largeR Inference" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import torch\n", "import pydicom\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import os\n", "\n", "# Import the models from the previous script\n", "# Assuming they are defined or imported correctly\n", "\n", "def load_dicom_image(dicom_path):\n", " \"\"\"\n", " Load and normalize a DICOM image\n", " \n", " Args:\n", " dicom_path (str): Path to the DICOM file\n", " \n", " Returns:\n", " torch.Tensor: Normalized image tensor\n", " \"\"\"\n", " # Read DICOM file\n", " dcm = pydicom.dcmread(dicom_path)\n", " image = dcm.pixel_array.astype(float)\n", " \n", " # Normalize image\n", " image = (image - image.min()) / (image.max() - image.min())\n", " \n", " # Convert to tensor\n", " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions\n", " return image_tensor\n", "\n", "def calculate_psnr(output, target, max_pixel=1.0):\n", " \"\"\"\n", " Calculate Peak Signal-to-Noise Ratio (PSNR)\n", " \n", " Args:\n", " output (torch.Tensor): Reconstructed image\n", " target (torch.Tensor): Original image\n", " max_pixel (float): Maximum pixel value\n", " \n", " Returns:\n", " float: PSNR value\n", " \"\"\"\n", " # Ensure the values are in the correct range\n", " mse = torch.nn.functional.mse_loss(output, target)\n", " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n", " return psnr.item()\n", "\n", "def visualize_reconstruction(original_image, reconstructed_image, psnr):\n", " \"\"\"\n", " Visualize original and reconstructed images\n", " \n", " Args:\n", " original_image (torch.Tensor): Original image tensor\n", " reconstructed_image (torch.Tensor): Reconstructed image tensor\n", " psnr (float): Peak Signal-to-Noise Ratio\n", " \"\"\"\n", " # Convert tensors to numpy for visualization\n", " original = original_image.squeeze().cpu().numpy()\n", " reconstructed = reconstructed_image.squeeze().cpu().numpy()\n", " \n", " # Create subplot\n", " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n", " \n", " # Plot original image\n", " im1 = ax1.imshow(original, cmap='gray')\n", " ax1.set_title('Original Image')\n", " plt.colorbar(im1, ax=ax1)\n", " \n", " # Plot reconstructed image\n", " im2 = ax2.imshow(reconstructed, cmap='gray')\n", " ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n", " plt.colorbar(im2, ax=ax2)\n", " \n", " plt.tight_layout()\n", " plt.show()\n", "\n", "def inference_single_image(reconstructor_model_path, test_dicom_path):\n", " \"\"\"\n", " Perform inference on a single DICOM image using both Reconstructor and Denoiser models.\n", " \n", " Args:\n", " reconstructor_model_path (str): Path to the saved Reconstructor model weights\n", " denoiser_model_path (str): Path to the saved Denoiser model weights\n", " test_dicom_path (str): Path to the test DICOM file\n", " \"\"\"\n", " # Set device\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " \n", " # Initialize models\n", " reconstructor = Reconstructor().to(device)\n", " \n", " # Load saved model weights\n", " reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n", " \n", " reconstructor.eval()\n", " \n", " # Load and preprocess test image\n", " with torch.no_grad():\n", " test_image = load_dicom_image(test_dicom_path).to(device)\n", " \n", " # Perform reconstruction\n", " reconstructed_image = reconstructor(test_image)\n", " \n", " \n", " # Calculate PSNR for both original and denoised outputs\n", " psnr_reconstructed = calculate_psnr(reconstructed_image, test_image)\n", "\n", " print(f\"PSNR (Reconstructed): {psnr_reconstructed:.2f} dB\")\n", " \n", " # Visualize results\n", " visualize_reconstruction(test_image, reconstructed_image, psnr_reconstructed)\n", "\n", "# Example usage\n", "if __name__ == \"__main__\":\n", " # Paths to models and test image\n", " RECONSTRUCTOR_MODEL_PATH = \"./large_reconstructor.pth\" # Path to your saved Reconstructor model\n", " TEST_DICOM_PATH = \"./test.dcm\" # Replace with actual path to test DICOM \n", " # Run inference\n", " inference_single_image(RECONSTRUCTOR_MODEL_PATH, TEST_DICOM_PATH)" ] } ], "metadata": { "kernelspec": { "display_name": "tf", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.3" } }, "nbformat": 4, "nbformat_minor": 2 }