Upload mri_autoencoder.ipynb with huggingface_hub
Browse files- mri_autoencoder.ipynb +311 -40
mri_autoencoder.ipynb
CHANGED
@@ -261,7 +261,7 @@
|
|
261 |
" dcm = pydicom.dcmread(full_path)\n",
|
262 |
" \n",
|
263 |
" # Check image dimensions\n",
|
264 |
-
" if dcm.pixel_array.shape[0] < min_size or dcm.pixel_array.shape[1] < min_size:\n",
|
265 |
" files_to_delete.append(full_path)\n",
|
266 |
" filtered_images += 1\n",
|
267 |
" \n",
|
@@ -279,7 +279,7 @@
|
|
279 |
" print(f\"Images deleted: {filtered_images}\\n\")\n",
|
280 |
"\n",
|
281 |
"# Usage\n",
|
282 |
-
"input_dirs = [
|
283 |
"filter_dicom_images(input_dirs)"
|
284 |
]
|
285 |
},
|
@@ -928,7 +928,7 @@
|
|
928 |
},
|
929 |
{
|
930 |
"cell_type": "code",
|
931 |
-
"execution_count":
|
932 |
"metadata": {},
|
933 |
"outputs": [],
|
934 |
"source": [
|
@@ -1347,9 +1347,9 @@
|
|
1347 |
"# Example usage\n",
|
1348 |
"if __name__ == \"__main__\":\n",
|
1349 |
" # Paths to models and test image\n",
|
1350 |
-
" RECONSTRUCTOR_MODEL_PATH =
|
1351 |
-
" DENOISER_MODEL_PATH =
|
1352 |
-
" TEST_DICOM_PATH =
|
1353 |
" # Run inference\n",
|
1354 |
" inference_single_image(RECONSTRUCTOR_MODEL_PATH, DENOISER_MODEL_PATH, TEST_DICOM_PATH)"
|
1355 |
]
|
@@ -1644,7 +1644,150 @@
|
|
1644 |
"cell_type": "markdown",
|
1645 |
"metadata": {},
|
1646 |
"source": [
|
1647 |
-
"###
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1648 |
]
|
1649 |
},
|
1650 |
{
|
@@ -1691,38 +1834,41 @@
|
|
1691 |
" self.bn2 = nn.BatchNorm2d(out_channels)\n",
|
1692 |
" self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
|
1693 |
" self.bn3 = nn.BatchNorm2d(out_channels)\n",
|
|
|
|
|
1694 |
" \n",
|
1695 |
" def forward(self, x):\n",
|
1696 |
" x = F.relu(self.bn1(self.conv1(x)))\n",
|
1697 |
" x = F.relu(self.bn2(self.conv2(x)))\n",
|
1698 |
" x = F.relu(self.bn3(self.conv3(x)))\n",
|
|
|
1699 |
" return x\n",
|
1700 |
"\n",
|
1701 |
"class UNet(nn.Module):\n",
|
1702 |
" def __init__(self, in_channels=1, out_channels=1):\n",
|
1703 |
" super().__init__()\n",
|
1704 |
" # Encoder\n",
|
1705 |
-
" self.enc1 = UNetBlock(in_channels,
|
1706 |
-
" self.enc2 = UNetBlock(
|
1707 |
-
" self.enc3 = UNetBlock(
|
1708 |
-
" self.enc4 = UNetBlock(
|
1709 |
-
" self.enc5 = UNetBlock(
|
1710 |
" \n",
|
1711 |
" # Decoder with learned upsampling (transposed convolutions)\n",
|
1712 |
"\n",
|
1713 |
-
" self.upconv5 = nn.ConvTranspose2d(
|
1714 |
-
" self.dec5 = UNetBlock(
|
1715 |
"\n",
|
1716 |
-
" self.upconv4 = nn.ConvTranspose2d(
|
1717 |
-
" self.dec4 = UNetBlock(
|
1718 |
"\n",
|
1719 |
-
" self.upconv3 = nn.ConvTranspose2d(
|
1720 |
-
" self.dec3 = UNetBlock(
|
1721 |
"\n",
|
1722 |
-
" self.upconv2 = nn.ConvTranspose2d(
|
1723 |
-
" self.dec2 = UNetBlock(
|
1724 |
"\n",
|
1725 |
-
" self.dec1 = UNetBlock(
|
1726 |
"\n",
|
1727 |
" self.pool = nn.MaxPool2d(2, 2)\n",
|
1728 |
" \n",
|
@@ -1737,30 +1883,22 @@
|
|
1737 |
" # Decoder path with learned upsampling and skip connections\n",
|
1738 |
"\n",
|
1739 |
" d5 = self.upconv5(e5) # Learnable upsampling\n",
|
1740 |
-
"
|
1741 |
-
" # # Resize e4 to match d5's dimensions\n",
|
1742 |
-
" # e4 = F.interpolate(e4, size=(d5.size(2), d5.size(3)), mode='nearest')\n",
|
1743 |
" d5 = torch.cat([d5, e4], dim=1) # Concatenate with encoder features\n",
|
1744 |
" d5 = checkpoint(self.dec5, d5)\n",
|
1745 |
"\n",
|
1746 |
" d4 = self.upconv4(d5) # Learnable upsampling\n",
|
1747 |
-
"
|
1748 |
-
" # # Resize e3 to match d4's dimensions\n",
|
1749 |
-
" # e3 = F.interpolate(e3, size=(d4.size(2), d4.size(3)), mode='nearest')\n",
|
1750 |
" d4 = torch.cat([d4, e3], dim=1) # Concatenate with encoder features\n",
|
1751 |
" d4 = checkpoint(self.dec4, d4)\n",
|
1752 |
"\n",
|
1753 |
" d3 = self.upconv3(d4) # Learnable upsampling\n",
|
1754 |
-
"
|
1755 |
-
" # # Resize e2 to match d3's dimensions\n",
|
1756 |
-
" # e2 = F.interpolate(e2, size=(d3.size(2), d3.size(3)), mode='nearest')\n",
|
1757 |
" d3 = torch.cat([d3, e2], dim=1) # Concatenate with encoder features\n",
|
1758 |
" d3 = checkpoint(self.dec3, d3)\n",
|
1759 |
"\n",
|
1760 |
" d2 = self.upconv2(d3) # Learnable upsampling\n",
|
1761 |
-
"
|
1762 |
-
" # # Resize e1 to match d2's dimensions\n",
|
1763 |
-
" # e1 = F.interpolate(e1, size=(d2.size(2), d2.size(3)), mode='nearest')\n",
|
1764 |
" d2 = torch.cat([d2, e1], dim=1) # Concatenate with encoder features\n",
|
1765 |
" d2 = checkpoint(self.dec2, d2)\n",
|
1766 |
" \n",
|
@@ -1818,7 +1956,6 @@
|
|
1818 |
" def forward(self, x):\n",
|
1819 |
" return self.unet(x)\n",
|
1820 |
"\n",
|
1821 |
-
"\n",
|
1822 |
"class Denoiser(nn.Module):\n",
|
1823 |
" def __init__(self, in_channels=1, out_channels=1):\n",
|
1824 |
" super().__init__()\n",
|
@@ -1903,8 +2040,8 @@
|
|
1903 |
" avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n",
|
1904 |
" avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n",
|
1905 |
" \n",
|
1906 |
-
" avg_reconstructor_val_loss,
|
1907 |
-
" avg_denoiser_val_loss,
|
1908 |
" \n",
|
1909 |
" print(f\"Epoch [{epoch+1}/{epochs}] - \"\n",
|
1910 |
" f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n",
|
@@ -1916,20 +2053,154 @@
|
|
1916 |
" if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n",
|
1917 |
" best_reconstructor_val_loss = avg_reconstructor_val_loss\n",
|
1918 |
" torch.save(reconstructor.state_dict(), best_reconstructor_model_path)\n",
|
1919 |
-
" print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f}\")\n",
|
1920 |
" \n",
|
1921 |
" if avg_denoiser_val_loss < best_denoiser_val_loss:\n",
|
1922 |
" best_denoiser_val_loss = avg_denoiser_val_loss\n",
|
1923 |
" torch.save(denoiser.state_dict(), best_denoiser_model_path)\n",
|
1924 |
-
" print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f}\")\n",
|
1925 |
" \n",
|
1926 |
" return reconstructor, denoiser\n",
|
1927 |
"\n",
|
1928 |
"# Example usage with train and validation directories\n",
|
1929 |
"reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n",
|
1930 |
-
"
|
1931 |
")"
|
1932 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1933 |
}
|
1934 |
],
|
1935 |
"metadata": {
|
@@ -1948,7 +2219,7 @@
|
|
1948 |
"name": "python",
|
1949 |
"nbconvert_exporter": "python",
|
1950 |
"pygments_lexer": "ipython3",
|
1951 |
-
"version": "3.
|
1952 |
}
|
1953 |
},
|
1954 |
"nbformat": 4,
|
|
|
261 |
" dcm = pydicom.dcmread(full_path)\n",
|
262 |
" \n",
|
263 |
" # Check image dimensions\n",
|
264 |
+
" if dcm.pixel_array.shape[0] < min_size or dcm.pixel_array.shape[1] < min_size or dcm.pixel_array.shape[0] > min_size or dcm.pixel_array.shape[1] > min_size:\n",
|
265 |
" files_to_delete.append(full_path)\n",
|
266 |
" filtered_images += 1\n",
|
267 |
" \n",
|
|
|
279 |
" print(f\"Images deleted: {filtered_images}\\n\")\n",
|
280 |
"\n",
|
281 |
"# Usage\n",
|
282 |
+
"input_dirs = [\"./TCIA_Split/train\", \"./TCIA_Split/val\"]\n",
|
283 |
"filter_dicom_images(input_dirs)"
|
284 |
]
|
285 |
},
|
|
|
928 |
},
|
929 |
{
|
930 |
"cell_type": "code",
|
931 |
+
"execution_count": 18,
|
932 |
"metadata": {},
|
933 |
"outputs": [],
|
934 |
"source": [
|
|
|
1347 |
"# Example usage\n",
|
1348 |
"if __name__ == \"__main__\":\n",
|
1349 |
" # Paths to models and test image\n",
|
1350 |
+
" RECONSTRUCTOR_MODEL_PATH = \"./small_reconstructor.pth\" # Path to your saved Reconstructor model\n",
|
1351 |
+
" DENOISER_MODEL_PATH = \"./small_denoiser.pth\" # Path to your saved Denoiser model\n",
|
1352 |
+
" TEST_DICOM_PATH = \"./test.dcm\" # Replace with actual path to test DICOM \n",
|
1353 |
" # Run inference\n",
|
1354 |
" inference_single_image(RECONSTRUCTOR_MODEL_PATH, DENOISER_MODEL_PATH, TEST_DICOM_PATH)"
|
1355 |
]
|
|
|
1644 |
"cell_type": "markdown",
|
1645 |
"metadata": {},
|
1646 |
"source": [
|
1647 |
+
"### mediumRD Inference"
|
1648 |
+
]
|
1649 |
+
},
|
1650 |
+
{
|
1651 |
+
"cell_type": "code",
|
1652 |
+
"execution_count": null,
|
1653 |
+
"metadata": {},
|
1654 |
+
"outputs": [],
|
1655 |
+
"source": [
|
1656 |
+
"import torch\n",
|
1657 |
+
"import pydicom\n",
|
1658 |
+
"import numpy as np\n",
|
1659 |
+
"import matplotlib.pyplot as plt\n",
|
1660 |
+
"import os\n",
|
1661 |
+
"\n",
|
1662 |
+
"# Import the models from the previous script\n",
|
1663 |
+
"# Assuming they are defined or imported correctly\n",
|
1664 |
+
"\n",
|
1665 |
+
"def load_dicom_image(dicom_path):\n",
|
1666 |
+
" \"\"\"\n",
|
1667 |
+
" Load and normalize a DICOM image\n",
|
1668 |
+
" \n",
|
1669 |
+
" Args:\n",
|
1670 |
+
" dicom_path (str): Path to the DICOM file\n",
|
1671 |
+
" \n",
|
1672 |
+
" Returns:\n",
|
1673 |
+
" torch.Tensor: Normalized image tensor\n",
|
1674 |
+
" \"\"\"\n",
|
1675 |
+
" # Read DICOM file\n",
|
1676 |
+
" dcm = pydicom.dcmread(dicom_path)\n",
|
1677 |
+
" image = dcm.pixel_array.astype(float)\n",
|
1678 |
+
" \n",
|
1679 |
+
" # Normalize image\n",
|
1680 |
+
" image = (image - image.min()) / (image.max() - image.min())\n",
|
1681 |
+
" \n",
|
1682 |
+
" # Convert to tensor\n",
|
1683 |
+
" image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions\n",
|
1684 |
+
" return image_tensor\n",
|
1685 |
+
"\n",
|
1686 |
+
"def calculate_psnr(output, target, max_pixel=1.0):\n",
|
1687 |
+
" \"\"\"\n",
|
1688 |
+
" Calculate Peak Signal-to-Noise Ratio (PSNR)\n",
|
1689 |
+
" \n",
|
1690 |
+
" Args:\n",
|
1691 |
+
" output (torch.Tensor): Reconstructed image\n",
|
1692 |
+
" target (torch.Tensor): Original image\n",
|
1693 |
+
" max_pixel (float): Maximum pixel value\n",
|
1694 |
+
" \n",
|
1695 |
+
" Returns:\n",
|
1696 |
+
" float: PSNR value\n",
|
1697 |
+
" \"\"\"\n",
|
1698 |
+
" # Ensure the values are in the correct range\n",
|
1699 |
+
" mse = torch.nn.functional.mse_loss(output, target)\n",
|
1700 |
+
" psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
|
1701 |
+
" return psnr.item()\n",
|
1702 |
+
"\n",
|
1703 |
+
"def visualize_reconstruction(original_image, reconstructed_image, psnr):\n",
|
1704 |
+
" \"\"\"\n",
|
1705 |
+
" Visualize original and reconstructed images\n",
|
1706 |
+
" \n",
|
1707 |
+
" Args:\n",
|
1708 |
+
" original_image (torch.Tensor): Original image tensor\n",
|
1709 |
+
" reconstructed_image (torch.Tensor): Reconstructed image tensor\n",
|
1710 |
+
" psnr (float): Peak Signal-to-Noise Ratio\n",
|
1711 |
+
" \"\"\"\n",
|
1712 |
+
" # Convert tensors to numpy for visualization\n",
|
1713 |
+
" original = original_image.squeeze().cpu().numpy()\n",
|
1714 |
+
" reconstructed = reconstructed_image.squeeze().cpu().numpy()\n",
|
1715 |
+
" \n",
|
1716 |
+
" # Create subplot\n",
|
1717 |
+
" fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n",
|
1718 |
+
" \n",
|
1719 |
+
" # Plot original image\n",
|
1720 |
+
" im1 = ax1.imshow(original, cmap='gray')\n",
|
1721 |
+
" ax1.set_title('Original Image')\n",
|
1722 |
+
" plt.colorbar(im1, ax=ax1)\n",
|
1723 |
+
" \n",
|
1724 |
+
" # Plot reconstructed image\n",
|
1725 |
+
" im2 = ax2.imshow(reconstructed, cmap='gray')\n",
|
1726 |
+
" ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n",
|
1727 |
+
" plt.colorbar(im2, ax=ax2)\n",
|
1728 |
+
" \n",
|
1729 |
+
" plt.tight_layout()\n",
|
1730 |
+
" plt.show()\n",
|
1731 |
+
"\n",
|
1732 |
+
"def inference_single_image(reconstructor_model_path, denoiser_model_path, test_dicom_path):\n",
|
1733 |
+
" \"\"\"\n",
|
1734 |
+
" Perform inference on a single DICOM image using both Reconstructor and Denoiser models.\n",
|
1735 |
+
" \n",
|
1736 |
+
" Args:\n",
|
1737 |
+
" reconstructor_model_path (str): Path to the saved Reconstructor model weights\n",
|
1738 |
+
" denoiser_model_path (str): Path to the saved Denoiser model weights\n",
|
1739 |
+
" test_dicom_path (str): Path to the test DICOM file\n",
|
1740 |
+
" \"\"\"\n",
|
1741 |
+
" # Set device\n",
|
1742 |
+
" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
|
1743 |
+
" \n",
|
1744 |
+
" # Initialize models\n",
|
1745 |
+
" reconstructor = Reconstructor().to(device)\n",
|
1746 |
+
" denoiser = Denoiser().to(device)\n",
|
1747 |
+
" \n",
|
1748 |
+
" # Load saved model weights\n",
|
1749 |
+
" reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n",
|
1750 |
+
" denoiser.load_state_dict(torch.load(denoiser_model_path))\n",
|
1751 |
+
" \n",
|
1752 |
+
" reconstructor.eval()\n",
|
1753 |
+
" denoiser.eval()\n",
|
1754 |
+
" \n",
|
1755 |
+
" # Load and preprocess test image\n",
|
1756 |
+
" with torch.no_grad():\n",
|
1757 |
+
" test_image = load_dicom_image(test_dicom_path).to(device)\n",
|
1758 |
+
" \n",
|
1759 |
+
" # Perform reconstruction\n",
|
1760 |
+
" reconstructed_image = reconstructor(test_image)\n",
|
1761 |
+
" \n",
|
1762 |
+
" # Perform denoising on the reconstructed image\n",
|
1763 |
+
" denoised_image = denoiser(reconstructed_image)\n",
|
1764 |
+
" \n",
|
1765 |
+
" # Calculate PSNR for both original and denoised outputs\n",
|
1766 |
+
" psnr_reconstructed = calculate_psnr(reconstructed_image, test_image)\n",
|
1767 |
+
" psnr_denoised = calculate_psnr(denoised_image, test_image)\n",
|
1768 |
+
"\n",
|
1769 |
+
" print(f\"PSNR (Reconstructed): {psnr_reconstructed:.2f} dB\")\n",
|
1770 |
+
" print(f\"PSNR (Denoised): {psnr_denoised:.2f} dB\")\n",
|
1771 |
+
" \n",
|
1772 |
+
" # Visualize results\n",
|
1773 |
+
" visualize_reconstruction(test_image, reconstructed_image, psnr_reconstructed)\n",
|
1774 |
+
" visualize_reconstruction(test_image, denoised_image, psnr_denoised)\n",
|
1775 |
+
"\n",
|
1776 |
+
"# Example usage\n",
|
1777 |
+
"if __name__ == \"__main__\":\n",
|
1778 |
+
" # Paths to models and test image\n",
|
1779 |
+
" RECONSTRUCTOR_MODEL_PATH = \"./medium_reconstructor.pth\" # Path to your saved Reconstructor model\n",
|
1780 |
+
" DENOISER_MODEL_PATH = \"./medium_denoiser.pth\" # Path to your saved Denoiser model\n",
|
1781 |
+
" TEST_DICOM_PATH = \"./test.dcm\" # Replace with actual path to test DICOM \n",
|
1782 |
+
" # Run inference\n",
|
1783 |
+
" inference_single_image(RECONSTRUCTOR_MODEL_PATH, DENOISER_MODEL_PATH, TEST_DICOM_PATH)"
|
1784 |
+
]
|
1785 |
+
},
|
1786 |
+
{
|
1787 |
+
"cell_type": "markdown",
|
1788 |
+
"metadata": {},
|
1789 |
+
"source": [
|
1790 |
+
"### Larger Reconstructor U-Net (largeR)"
|
1791 |
]
|
1792 |
},
|
1793 |
{
|
|
|
1834 |
" self.bn2 = nn.BatchNorm2d(out_channels)\n",
|
1835 |
" self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
|
1836 |
" self.bn3 = nn.BatchNorm2d(out_channels)\n",
|
1837 |
+
" self.conv4 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
|
1838 |
+
" self.bn4 = nn.BatchNorm2d(out_channels)\n",
|
1839 |
" \n",
|
1840 |
" def forward(self, x):\n",
|
1841 |
" x = F.relu(self.bn1(self.conv1(x)))\n",
|
1842 |
" x = F.relu(self.bn2(self.conv2(x)))\n",
|
1843 |
" x = F.relu(self.bn3(self.conv3(x)))\n",
|
1844 |
+
" x = F.relu(self.bn4(self.conv4(x)))\n",
|
1845 |
" return x\n",
|
1846 |
"\n",
|
1847 |
"class UNet(nn.Module):\n",
|
1848 |
" def __init__(self, in_channels=1, out_channels=1):\n",
|
1849 |
" super().__init__()\n",
|
1850 |
" # Encoder\n",
|
1851 |
+
" self.enc1 = UNetBlock(in_channels, 64)\n",
|
1852 |
+
" self.enc2 = UNetBlock(64, 128)\n",
|
1853 |
+
" self.enc3 = UNetBlock(128, 256)\n",
|
1854 |
+
" self.enc4 = UNetBlock(256, 512)\n",
|
1855 |
+
" self.enc5 = UNetBlock(512, 1024)\n",
|
1856 |
" \n",
|
1857 |
" # Decoder with learned upsampling (transposed convolutions)\n",
|
1858 |
"\n",
|
1859 |
+
" self.upconv5 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2) # Learnable upsampling\n",
|
1860 |
+
" self.dec5 = UNetBlock(512 + 512, 512) # Adjust input channels after concatenation\n",
|
1861 |
"\n",
|
1862 |
+
" self.upconv4 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) # Learnable upsampling\n",
|
1863 |
+
" self.dec4 = UNetBlock(256 + 256, 256) # Adjust input channels after concatenation\n",
|
1864 |
"\n",
|
1865 |
+
" self.upconv3 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) # Learnable upsampling\n",
|
1866 |
+
" self.dec3 = UNetBlock(128 + 128, 128) # Adjust input channels after concatenation\n",
|
1867 |
"\n",
|
1868 |
+
" self.upconv2 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) # Learnable upsampling\n",
|
1869 |
+
" self.dec2 = UNetBlock(64 + 64, 64) # Adjust input channels after concatenation\n",
|
1870 |
"\n",
|
1871 |
+
" self.dec1 = UNetBlock(64, out_channels) # Final output\n",
|
1872 |
"\n",
|
1873 |
" self.pool = nn.MaxPool2d(2, 2)\n",
|
1874 |
" \n",
|
|
|
1883 |
" # Decoder path with learned upsampling and skip connections\n",
|
1884 |
"\n",
|
1885 |
" d5 = self.upconv5(e5) # Learnable upsampling\n",
|
1886 |
+
"\n",
|
|
|
|
|
1887 |
" d5 = torch.cat([d5, e4], dim=1) # Concatenate with encoder features\n",
|
1888 |
" d5 = checkpoint(self.dec5, d5)\n",
|
1889 |
"\n",
|
1890 |
" d4 = self.upconv4(d5) # Learnable upsampling\n",
|
1891 |
+
"\n",
|
|
|
|
|
1892 |
" d4 = torch.cat([d4, e3], dim=1) # Concatenate with encoder features\n",
|
1893 |
" d4 = checkpoint(self.dec4, d4)\n",
|
1894 |
"\n",
|
1895 |
" d3 = self.upconv3(d4) # Learnable upsampling\n",
|
1896 |
+
"\n",
|
|
|
|
|
1897 |
" d3 = torch.cat([d3, e2], dim=1) # Concatenate with encoder features\n",
|
1898 |
" d3 = checkpoint(self.dec3, d3)\n",
|
1899 |
"\n",
|
1900 |
" d2 = self.upconv2(d3) # Learnable upsampling\n",
|
1901 |
+
"\n",
|
|
|
|
|
1902 |
" d2 = torch.cat([d2, e1], dim=1) # Concatenate with encoder features\n",
|
1903 |
" d2 = checkpoint(self.dec2, d2)\n",
|
1904 |
" \n",
|
|
|
1956 |
" def forward(self, x):\n",
|
1957 |
" return self.unet(x)\n",
|
1958 |
"\n",
|
|
|
1959 |
"class Denoiser(nn.Module):\n",
|
1960 |
" def __init__(self, in_channels=1, out_channels=1):\n",
|
1961 |
" super().__init__()\n",
|
|
|
2040 |
" avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n",
|
2041 |
" avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n",
|
2042 |
" \n",
|
2043 |
+
" avg_reconstructor_val_loss, avg_reconstructor_val_psnr = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n",
|
2044 |
+
" avg_denoiser_val_loss, avg_denoiser_val_psnr = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n",
|
2045 |
" \n",
|
2046 |
" print(f\"Epoch [{epoch+1}/{epochs}] - \"\n",
|
2047 |
" f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n",
|
|
|
2053 |
" if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n",
|
2054 |
" best_reconstructor_val_loss = avg_reconstructor_val_loss\n",
|
2055 |
" torch.save(reconstructor.state_dict(), best_reconstructor_model_path)\n",
|
2056 |
+
" print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f} and PSNR: {avg_reconstructor_val_psnr}\")\n",
|
2057 |
" \n",
|
2058 |
" if avg_denoiser_val_loss < best_denoiser_val_loss:\n",
|
2059 |
" best_denoiser_val_loss = avg_denoiser_val_loss\n",
|
2060 |
" torch.save(denoiser.state_dict(), best_denoiser_model_path)\n",
|
2061 |
+
" print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f} and PSNR: {avg_denoiser_val_psnr}\")\n",
|
2062 |
" \n",
|
2063 |
" return reconstructor, denoiser\n",
|
2064 |
"\n",
|
2065 |
"# Example usage with train and validation directories\n",
|
2066 |
"reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n",
|
2067 |
+
" \"./TCIA_Split/train\", \"./TCIA_Split/val\", epochs=50, batch_size=8, grad_accumulation_steps=8\n",
|
2068 |
")"
|
2069 |
]
|
2070 |
+
},
|
2071 |
+
{
|
2072 |
+
"cell_type": "markdown",
|
2073 |
+
"metadata": {},
|
2074 |
+
"source": [
|
2075 |
+
"### largeR Inference"
|
2076 |
+
]
|
2077 |
+
},
|
2078 |
+
{
|
2079 |
+
"cell_type": "code",
|
2080 |
+
"execution_count": null,
|
2081 |
+
"metadata": {},
|
2082 |
+
"outputs": [],
|
2083 |
+
"source": [
|
2084 |
+
"import torch\n",
|
2085 |
+
"import pydicom\n",
|
2086 |
+
"import numpy as np\n",
|
2087 |
+
"import matplotlib.pyplot as plt\n",
|
2088 |
+
"import os\n",
|
2089 |
+
"\n",
|
2090 |
+
"# Import the models from the previous script\n",
|
2091 |
+
"# Assuming they are defined or imported correctly\n",
|
2092 |
+
"\n",
|
2093 |
+
"def load_dicom_image(dicom_path):\n",
|
2094 |
+
" \"\"\"\n",
|
2095 |
+
" Load and normalize a DICOM image\n",
|
2096 |
+
" \n",
|
2097 |
+
" Args:\n",
|
2098 |
+
" dicom_path (str): Path to the DICOM file\n",
|
2099 |
+
" \n",
|
2100 |
+
" Returns:\n",
|
2101 |
+
" torch.Tensor: Normalized image tensor\n",
|
2102 |
+
" \"\"\"\n",
|
2103 |
+
" # Read DICOM file\n",
|
2104 |
+
" dcm = pydicom.dcmread(dicom_path)\n",
|
2105 |
+
" image = dcm.pixel_array.astype(float)\n",
|
2106 |
+
" \n",
|
2107 |
+
" # Normalize image\n",
|
2108 |
+
" image = (image - image.min()) / (image.max() - image.min())\n",
|
2109 |
+
" \n",
|
2110 |
+
" # Convert to tensor\n",
|
2111 |
+
" image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions\n",
|
2112 |
+
" return image_tensor\n",
|
2113 |
+
"\n",
|
2114 |
+
"def calculate_psnr(output, target, max_pixel=1.0):\n",
|
2115 |
+
" \"\"\"\n",
|
2116 |
+
" Calculate Peak Signal-to-Noise Ratio (PSNR)\n",
|
2117 |
+
" \n",
|
2118 |
+
" Args:\n",
|
2119 |
+
" output (torch.Tensor): Reconstructed image\n",
|
2120 |
+
" target (torch.Tensor): Original image\n",
|
2121 |
+
" max_pixel (float): Maximum pixel value\n",
|
2122 |
+
" \n",
|
2123 |
+
" Returns:\n",
|
2124 |
+
" float: PSNR value\n",
|
2125 |
+
" \"\"\"\n",
|
2126 |
+
" # Ensure the values are in the correct range\n",
|
2127 |
+
" mse = torch.nn.functional.mse_loss(output, target)\n",
|
2128 |
+
" psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
|
2129 |
+
" return psnr.item()\n",
|
2130 |
+
"\n",
|
2131 |
+
"def visualize_reconstruction(original_image, reconstructed_image, psnr):\n",
|
2132 |
+
" \"\"\"\n",
|
2133 |
+
" Visualize original and reconstructed images\n",
|
2134 |
+
" \n",
|
2135 |
+
" Args:\n",
|
2136 |
+
" original_image (torch.Tensor): Original image tensor\n",
|
2137 |
+
" reconstructed_image (torch.Tensor): Reconstructed image tensor\n",
|
2138 |
+
" psnr (float): Peak Signal-to-Noise Ratio\n",
|
2139 |
+
" \"\"\"\n",
|
2140 |
+
" # Convert tensors to numpy for visualization\n",
|
2141 |
+
" original = original_image.squeeze().cpu().numpy()\n",
|
2142 |
+
" reconstructed = reconstructed_image.squeeze().cpu().numpy()\n",
|
2143 |
+
" \n",
|
2144 |
+
" # Create subplot\n",
|
2145 |
+
" fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n",
|
2146 |
+
" \n",
|
2147 |
+
" # Plot original image\n",
|
2148 |
+
" im1 = ax1.imshow(original, cmap='gray')\n",
|
2149 |
+
" ax1.set_title('Original Image')\n",
|
2150 |
+
" plt.colorbar(im1, ax=ax1)\n",
|
2151 |
+
" \n",
|
2152 |
+
" # Plot reconstructed image\n",
|
2153 |
+
" im2 = ax2.imshow(reconstructed, cmap='gray')\n",
|
2154 |
+
" ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n",
|
2155 |
+
" plt.colorbar(im2, ax=ax2)\n",
|
2156 |
+
" \n",
|
2157 |
+
" plt.tight_layout()\n",
|
2158 |
+
" plt.show()\n",
|
2159 |
+
"\n",
|
2160 |
+
"def inference_single_image(reconstructor_model_path, test_dicom_path):\n",
|
2161 |
+
" \"\"\"\n",
|
2162 |
+
" Perform inference on a single DICOM image using both Reconstructor and Denoiser models.\n",
|
2163 |
+
" \n",
|
2164 |
+
" Args:\n",
|
2165 |
+
" reconstructor_model_path (str): Path to the saved Reconstructor model weights\n",
|
2166 |
+
" denoiser_model_path (str): Path to the saved Denoiser model weights\n",
|
2167 |
+
" test_dicom_path (str): Path to the test DICOM file\n",
|
2168 |
+
" \"\"\"\n",
|
2169 |
+
" # Set device\n",
|
2170 |
+
" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
|
2171 |
+
" \n",
|
2172 |
+
" # Initialize models\n",
|
2173 |
+
" reconstructor = Reconstructor().to(device)\n",
|
2174 |
+
" \n",
|
2175 |
+
" # Load saved model weights\n",
|
2176 |
+
" reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n",
|
2177 |
+
" \n",
|
2178 |
+
" reconstructor.eval()\n",
|
2179 |
+
" \n",
|
2180 |
+
" # Load and preprocess test image\n",
|
2181 |
+
" with torch.no_grad():\n",
|
2182 |
+
" test_image = load_dicom_image(test_dicom_path).to(device)\n",
|
2183 |
+
" \n",
|
2184 |
+
" # Perform reconstruction\n",
|
2185 |
+
" reconstructed_image = reconstructor(test_image)\n",
|
2186 |
+
" \n",
|
2187 |
+
" \n",
|
2188 |
+
" # Calculate PSNR for both original and denoised outputs\n",
|
2189 |
+
" psnr_reconstructed = calculate_psnr(reconstructed_image, test_image)\n",
|
2190 |
+
"\n",
|
2191 |
+
" print(f\"PSNR (Reconstructed): {psnr_reconstructed:.2f} dB\")\n",
|
2192 |
+
" \n",
|
2193 |
+
" # Visualize results\n",
|
2194 |
+
" visualize_reconstruction(test_image, reconstructed_image, psnr_reconstructed)\n",
|
2195 |
+
"\n",
|
2196 |
+
"# Example usage\n",
|
2197 |
+
"if __name__ == \"__main__\":\n",
|
2198 |
+
" # Paths to models and test image\n",
|
2199 |
+
" RECONSTRUCTOR_MODEL_PATH = \"./large_reconstructor.pth\" # Path to your saved Reconstructor model\n",
|
2200 |
+
" TEST_DICOM_PATH = \"./test.dcm\" # Replace with actual path to test DICOM \n",
|
2201 |
+
" # Run inference\n",
|
2202 |
+
" inference_single_image(RECONSTRUCTOR_MODEL_PATH, TEST_DICOM_PATH)"
|
2203 |
+
]
|
2204 |
}
|
2205 |
],
|
2206 |
"metadata": {
|
|
|
2219 |
"name": "python",
|
2220 |
"nbconvert_exporter": "python",
|
2221 |
"pygments_lexer": "ipython3",
|
2222 |
+
"version": "3.12.3"
|
2223 |
}
|
2224 |
},
|
2225 |
"nbformat": 4,
|