aoxo commited on
Commit
d13a6ed
·
verified ·
1 Parent(s): 01d5636

Upload mri_autoencoder.ipynb

Browse files
Files changed (1) hide show
  1. mri_autoencoder.ipynb +1956 -0
mri_autoencoder.ipynb ADDED
@@ -0,0 +1,1956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "### Generating Train-Val Split from Dataset"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import os\n",
17
+ "import shutil\n",
18
+ "import random\n",
19
+ "import multiprocessing\n",
20
+ "from copy import deepcopy\n",
21
+ "\n",
22
+ "def recursive_copy_dicom(src_folder, dest_folder, file_counter):\n",
23
+ " \"\"\"\n",
24
+ " Recursively finds and copies DICOM files from the source to the destination folder, renaming them sequentially.\n",
25
+ " \n",
26
+ " :param src_folder: The source folder containing DICOM files (including subdirectories).\n",
27
+ " :param dest_folder: The destination folder where the files will be copied and renamed.\n",
28
+ " :param file_counter: The sequential counter for renaming files.\n",
29
+ " :return: List of renamed files for further splitting.\n",
30
+ " \"\"\"\n",
31
+ " renamed_files = []\n",
32
+ "\n",
33
+ " for root, dirs, files in os.walk(src_folder):\n",
34
+ " for dicom_file in files:\n",
35
+ " if dicom_file.lower().endswith('.dcm'):\n",
36
+ " # Get full path of the source file\n",
37
+ " src_file_path = os.path.join(root, dicom_file)\n",
38
+ " \n",
39
+ " # Create the new file path in the destination folder\n",
40
+ " dest_file_path = os.path.join(dest_folder, f\"{file_counter}.dcm\")\n",
41
+ " \n",
42
+ " # Copy and rename the file\n",
43
+ " shutil.copy(src_file_path, dest_file_path)\n",
44
+ " \n",
45
+ " # Append the renamed file to the list\n",
46
+ " renamed_files.append(f\"{file_counter}.dcm\")\n",
47
+ " \n",
48
+ " # Increment the file counter for the next file\n",
49
+ " file_counter += 1\n",
50
+ "\n",
51
+ " return renamed_files\n",
52
+ "\n",
53
+ "def split_and_transfer_files(file_list, dest_folder, split_factor):\n",
54
+ " \"\"\"\n",
55
+ " Splits the list of renamed files into train and val sets and moves them into the appropriate folders.\n",
56
+ " \n",
57
+ " :param file_list: List of renamed DICOM files.\n",
58
+ " :param dest_folder: Destination folder where train and val subfolders will be created.\n",
59
+ " :param split_factor: The ratio of files to go into the train subfolder.\n",
60
+ " \"\"\"\n",
61
+ " # Ensure the destination folder and subfolders exist\n",
62
+ " train_folder = os.path.join(dest_folder, 'train')\n",
63
+ " val_folder = os.path.join(dest_folder, 'val')\n",
64
+ " \n",
65
+ " if not os.path.exists(train_folder):\n",
66
+ " os.makedirs(train_folder)\n",
67
+ " \n",
68
+ " if not os.path.exists(val_folder):\n",
69
+ " os.makedirs(val_folder)\n",
70
+ "\n",
71
+ " # Shuffle the files for randomness\n",
72
+ " random.shuffle(file_list)\n",
73
+ "\n",
74
+ " # Calculate the number of files for the train and validation sets\n",
75
+ " split_index = int(len(file_list) * split_factor)\n",
76
+ " \n",
77
+ " # Split the files into train and val sets\n",
78
+ " train_files = file_list[:split_index]\n",
79
+ " val_files = file_list[split_index:]\n",
80
+ "\n",
81
+ " # Move the files to the respective folders\n",
82
+ " for file in train_files:\n",
83
+ " src_file = os.path.join(dest_folder, file)\n",
84
+ " dest_file = os.path.join(train_folder, file)\n",
85
+ " shutil.move(src_file, dest_file)\n",
86
+ " print(f\"Moved {file} to train folder\")\n",
87
+ " \n",
88
+ " for file in val_files:\n",
89
+ " src_file = os.path.join(dest_folder, file)\n",
90
+ " dest_file = os.path.join(val_folder, file)\n",
91
+ " shutil.move(src_file, dest_file)\n",
92
+ " print(f\"Moved {file} to val folder\")\n",
93
+ "\n",
94
+ "def process_dicom_files(src_folder, dest_folder, split_factor):\n",
95
+ " \"\"\"\n",
96
+ " Recursively finds, renames, copies DICOM files, and splits them into train and val sets.\n",
97
+ " \n",
98
+ " :param src_folder: The source folder containing DICOM files (including subdirectories).\n",
99
+ " :param dest_folder: The destination folder where the renamed files and the train/val split will be created.\n",
100
+ " :param split_factor: The ratio of files to go into the train subfolder.\n",
101
+ " \"\"\"\n",
102
+ " # Ensure the destination folder exists\n",
103
+ " if not os.path.exists(dest_folder):\n",
104
+ " os.makedirs(dest_folder)\n",
105
+ "\n",
106
+ " # Initialize file counter\n",
107
+ " file_counter = 1\n",
108
+ "\n",
109
+ " # Recursively copy DICOM files and rename them\n",
110
+ " renamed_files = recursive_copy_dicom(src_folder, dest_folder, file_counter)\n",
111
+ "\n",
112
+ " # Step 2: Split the renamed files into train and val sets\n",
113
+ " split_and_transfer_files(renamed_files, dest_folder, split_factor)\n",
114
+ "\n",
115
+ "# Example usage:\n",
116
+ "src_folder = r\"F:\\TCIA\" # Replace with your source folder path\n",
117
+ "dest_folder = r\"F:\\TCIA_Split\" # Destination folder for the renamed files and train/val split\n",
118
+ "split_factor = 0.95 # 90% of files will go to 'train', 10% will go to 'val'\n",
119
+ "\n",
120
+ "# Perform the entire process\n",
121
+ "process_dicom_files(src_folder, dest_folder, split_factor)"
122
+ ]
123
+ },
124
+ {
125
+ "cell_type": "markdown",
126
+ "metadata": {},
127
+ "source": [
128
+ "### Faster Train-Val Split Generation"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "code",
133
+ "execution_count": null,
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "import os\n",
138
+ "import shutil\n",
139
+ "import random\n",
140
+ "import multiprocessing\n",
141
+ "from concurrent.futures import ThreadPoolExecutor\n",
142
+ "from copy import deepcopy\n",
143
+ "\n",
144
+ "def recursive_copy_dicom(src_folder, dest_folder, file_counter):\n",
145
+ " \"\"\"\n",
146
+ " Recursively finds and copies DICOM files from the source to the destination folder, renaming them sequentially.\n",
147
+ " \"\"\"\n",
148
+ " renamed_files = []\n",
149
+ "\n",
150
+ " for root, dirs, files in os.walk(src_folder):\n",
151
+ " for dicom_file in files:\n",
152
+ " if dicom_file.lower().endswith('.dcm'):\n",
153
+ " # Get full path of the source file\n",
154
+ " src_file_path = os.path.join(root, dicom_file)\n",
155
+ " \n",
156
+ " # Create the new file path in the destination folder\n",
157
+ " dest_file_path = os.path.join(dest_folder, f\"{file_counter}.dcm\")\n",
158
+ " \n",
159
+ " # Copy and rename the file\n",
160
+ " shutil.copy(src_file_path, dest_file_path)\n",
161
+ " \n",
162
+ " # Append the renamed file to the list\n",
163
+ " renamed_files.append(f\"{file_counter}.dcm\")\n",
164
+ " \n",
165
+ " # Increment the file counter for the next file\n",
166
+ " file_counter += 1\n",
167
+ "\n",
168
+ " return renamed_files\n",
169
+ "\n",
170
+ "def split_and_transfer_files(file_list, dest_folder, split_factor):\n",
171
+ " \"\"\"\n",
172
+ " Splits the list of renamed files into train and val sets and moves them into the appropriate folders.\n",
173
+ " \"\"\"\n",
174
+ " train_folder = os.path.join(dest_folder, 'train')\n",
175
+ " val_folder = os.path.join(dest_folder, 'val')\n",
176
+ " \n",
177
+ " if not os.path.exists(train_folder):\n",
178
+ " os.makedirs(train_folder)\n",
179
+ " \n",
180
+ " if not os.path.exists(val_folder):\n",
181
+ " os.makedirs(val_folder)\n",
182
+ "\n",
183
+ " # Shuffle the files for randomness\n",
184
+ " random.shuffle(file_list)\n",
185
+ "\n",
186
+ " # Calculate the number of files for the train and validation sets\n",
187
+ " split_index = int(len(file_list) * split_factor)\n",
188
+ " \n",
189
+ " # Split the files into train and val sets\n",
190
+ " train_files = file_list[:split_index]\n",
191
+ " val_files = file_list[split_index:]\n",
192
+ "\n",
193
+ " # Move files in parallel using multiprocessing\n",
194
+ " with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:\n",
195
+ " pool.starmap(move_file, [(file, dest_folder, 'train') for file in train_files])\n",
196
+ " pool.starmap(move_file, [(file, dest_folder, 'val') for file in val_files])\n",
197
+ "\n",
198
+ "def move_file(file, dest_folder, folder_name):\n",
199
+ " \"\"\"Move file from the source to destination folder.\"\"\"\n",
200
+ " src_file = os.path.join(dest_folder, file)\n",
201
+ " dest_file = os.path.join(dest_folder, folder_name, file)\n",
202
+ " shutil.move(src_file, dest_file)\n",
203
+ " print(f\"Moved {file} to {folder_name} folder\")\n",
204
+ "\n",
205
+ "def process_dicom_files(src_folder, dest_folder, split_factor):\n",
206
+ " \"\"\"\n",
207
+ " Recursively finds, renames, copies DICOM files, and splits them into train and val sets.\n",
208
+ " \"\"\"\n",
209
+ " # Ensure the destination folder exists\n",
210
+ " if not os.path.exists(dest_folder):\n",
211
+ " os.makedirs(dest_folder)\n",
212
+ "\n",
213
+ " # Initialize file counter\n",
214
+ " file_counter = 1\n",
215
+ "\n",
216
+ " # Recursively copy DICOM files and rename them\n",
217
+ " renamed_files = recursive_copy_dicom(src_folder, dest_folder, file_counter)\n",
218
+ "\n",
219
+ " # Step 2: Split the renamed files into train and val sets\n",
220
+ " split_and_transfer_files(renamed_files, dest_folder, split_factor)\n",
221
+ "\n",
222
+ "# Example usage:\n",
223
+ "src_folder = r\"F:\\TCIA\" # Replace with your source folder path\n",
224
+ "dest_folder = r\"D:\\TCIA_Split\" # Destination folder for the renamed files and train/val split\n",
225
+ "split_factor = 0.95 # 90% of files will go to 'train', 10% will go to 'val'\n",
226
+ "\n",
227
+ "# Perform the entire process\n",
228
+ "process_dicom_files(src_folder, dest_folder, split_factor)"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "markdown",
233
+ "metadata": {},
234
+ "source": [
235
+ "### Filtering through only 512 x 512 scans"
236
+ ]
237
+ },
238
+ {
239
+ "cell_type": "code",
240
+ "execution_count": null,
241
+ "metadata": {},
242
+ "outputs": [],
243
+ "source": [
244
+ "import os\n",
245
+ "import pydicom\n",
246
+ "\n",
247
+ "def filter_dicom_images(input_dirs, min_size=512):\n",
248
+ " for dir_path in input_dirs:\n",
249
+ " total_images = 0\n",
250
+ " filtered_images = 0\n",
251
+ " \n",
252
+ " # Use a list to store files to delete to avoid modifying directory during iteration\n",
253
+ " files_to_delete = []\n",
254
+ " \n",
255
+ " for filename in os.listdir(dir_path):\n",
256
+ " if filename.endswith('.dcm'):\n",
257
+ " full_path = os.path.join(dir_path, filename)\n",
258
+ " \n",
259
+ " try:\n",
260
+ " # Read DICOM file\n",
261
+ " dcm = pydicom.dcmread(full_path)\n",
262
+ " \n",
263
+ " # Check image dimensions\n",
264
+ " if dcm.pixel_array.shape[0] < min_size or dcm.pixel_array.shape[1] < min_size:\n",
265
+ " files_to_delete.append(full_path)\n",
266
+ " filtered_images += 1\n",
267
+ " \n",
268
+ " total_images += 1\n",
269
+ " \n",
270
+ " except Exception as e:\n",
271
+ " print(f\"Error processing {filename}: {e}\")\n",
272
+ " \n",
273
+ " # Delete files\n",
274
+ " for file_path in files_to_delete:\n",
275
+ " os.remove(file_path)\n",
276
+ " \n",
277
+ " print(f\"Directory: {dir_path}\")\n",
278
+ " print(f\"Total images: {total_images}\")\n",
279
+ " print(f\"Images deleted: {filtered_images}\\n\")\n",
280
+ "\n",
281
+ "# Usage\n",
282
+ "input_dirs = [r\"D:\\TCIA_Split\\train\", r\"D:\\TCIA_Split\\val\"]\n",
283
+ "filter_dicom_images(input_dirs)"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "markdown",
288
+ "metadata": {},
289
+ "source": [
290
+ "### Basic U-Net"
291
+ ]
292
+ },
293
+ {
294
+ "cell_type": "code",
295
+ "execution_count": null,
296
+ "metadata": {},
297
+ "outputs": [],
298
+ "source": [
299
+ "import torch\n",
300
+ "import torch.nn as nn\n",
301
+ "import torch.nn.functional as F\n",
302
+ "import pydicom\n",
303
+ "import numpy as np\n",
304
+ "from torch.utils.data import Dataset, DataLoader\n",
305
+ "import os\n",
306
+ "from torch.utils.checkpoint import checkpoint\n",
307
+ "from tqdm import tqdm # Import tqdm for progress bar\n",
308
+ "\n",
309
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
310
+ "\n",
311
+ "class MedicalImageDataset(Dataset):\n",
312
+ " def __init__(self, dicom_dir):\n",
313
+ " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
314
+ " \n",
315
+ " def __len__(self):\n",
316
+ " return len(self.dicom_files)\n",
317
+ " \n",
318
+ " def __getitem__(self, idx):\n",
319
+ " # Read DICOM file and normalize\n",
320
+ " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
321
+ " image = dcm.pixel_array.astype(float)\n",
322
+ " image = (image - image.min()) / (image.max() - image.min())\n",
323
+ " \n",
324
+ " # Convert to tensor\n",
325
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
326
+ " return image_tensor, image_tensor\n",
327
+ "\n",
328
+ "class UNetBlock(nn.Module):\n",
329
+ " def __init__(self, in_channels, out_channels):\n",
330
+ " super().__init__()\n",
331
+ " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
332
+ " self.bn1 = nn.BatchNorm2d(out_channels)\n",
333
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
334
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
335
+ " \n",
336
+ " def forward(self, x):\n",
337
+ " x = F.relu(self.bn1(self.conv1(x)))\n",
338
+ " x = F.relu(self.bn2(self.conv2(x)))\n",
339
+ " return x\n",
340
+ "\n",
341
+ "class UNet(nn.Module):\n",
342
+ " def __init__(self, in_channels=1, out_channels=1):\n",
343
+ " super().__init__()\n",
344
+ " # Encoder\n",
345
+ " self.enc1 = UNetBlock(in_channels, 64)\n",
346
+ " self.enc2 = UNetBlock(64, 128)\n",
347
+ " self.enc3 = UNetBlock(128, 256)\n",
348
+ " \n",
349
+ " # Decoder\n",
350
+ " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
351
+ " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
352
+ " self.dec1 = UNetBlock(64, out_channels)\n",
353
+ " \n",
354
+ " # Pooling and upsampling\n",
355
+ " self.pool = nn.MaxPool2d(2, 2)\n",
356
+ " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
357
+ " \n",
358
+ " def forward(self, x):\n",
359
+ " # Encoder path\n",
360
+ " e1 = checkpoint(self.enc1, x)\n",
361
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
362
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
363
+ " \n",
364
+ " # Decoder path with skip connections\n",
365
+ " d3 = self.upsample(e3)\n",
366
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
367
+ " d3 = checkpoint(self.dec3, d3)\n",
368
+ " \n",
369
+ " d2 = self.upsample(d3)\n",
370
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
371
+ " d2 = checkpoint(self.dec2, d2)\n",
372
+ " \n",
373
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
374
+ " \n",
375
+ " return d1\n",
376
+ "\n",
377
+ "def calculate_loss(model, dataloader, criterion):\n",
378
+ " model.eval()\n",
379
+ " total_loss = 0\n",
380
+ " with torch.no_grad():\n",
381
+ " for images, targets in dataloader:\n",
382
+ " images, targets = images.to(device), targets.to(device)\n",
383
+ " outputs = model(images)\n",
384
+ " loss = criterion(outputs, targets)\n",
385
+ " total_loss += loss.item()\n",
386
+ " return total_loss / len(dataloader)\n",
387
+ "\n",
388
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
389
+ " # Ensure the values are in the correct range\n",
390
+ " mse = F.mse_loss(output, target)\n",
391
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
392
+ " return psnr.item()\n",
393
+ "\n",
394
+ "def calculate_loss_and_psnr(model, dataloader, criterion):\n",
395
+ " model.eval()\n",
396
+ " total_loss = 0\n",
397
+ " total_psnr = 0\n",
398
+ " num_batches = len(dataloader)\n",
399
+ " \n",
400
+ " with torch.no_grad():\n",
401
+ " for images, targets in dataloader:\n",
402
+ " images, targets = images.to(device), targets.to(device)\n",
403
+ " outputs = model(images)\n",
404
+ " \n",
405
+ " # Calculate MSE loss\n",
406
+ " loss = criterion(outputs, targets)\n",
407
+ " total_loss += loss.item()\n",
408
+ " \n",
409
+ " # Calculate PSNR\n",
410
+ " psnr = calculate_psnr(outputs, targets)\n",
411
+ " total_psnr += psnr\n",
412
+ " \n",
413
+ " avg_loss = total_loss / num_batches\n",
414
+ " avg_psnr = total_psnr / num_batches\n",
415
+ " \n",
416
+ " return avg_loss, avg_psnr\n",
417
+ "\n",
418
+ "best_val_loss = float('inf')\n",
419
+ "best_model_path = 'best_model.pth'\n",
420
+ "\n",
421
+ "def train_unet(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
422
+ " # Dataset and DataLoader\n",
423
+ " dataset = MedicalImageDataset(dicom_dir)\n",
424
+ " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
425
+ " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
426
+ " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
427
+ " \n",
428
+ " # Model, Loss, Optimizer\n",
429
+ " model = UNet().to(device)\n",
430
+ " criterion = nn.MSELoss()\n",
431
+ " optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n",
432
+ " \n",
433
+ " # Training loop with tqdm\n",
434
+ " for epoch in range(epochs):\n",
435
+ " model.train()\n",
436
+ " total_loss = 0\n",
437
+ " optimizer.zero_grad()\n",
438
+ " \n",
439
+ " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
440
+ " for i, (images, targets) in enumerate(tepoch):\n",
441
+ " images, targets = images.to(device), targets.to(device)\n",
442
+ " \n",
443
+ " # Forward pass\n",
444
+ " outputs = model(images)\n",
445
+ " loss = criterion(outputs, targets)\n",
446
+ " loss.backward()\n",
447
+ " \n",
448
+ " # Gradient accumulation\n",
449
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
450
+ " optimizer.step()\n",
451
+ " optimizer.zero_grad()\n",
452
+ " \n",
453
+ " total_loss += loss.item()\n",
454
+ " \n",
455
+ " # Update the tqdm progress bar with the current loss\n",
456
+ " tepoch.set_postfix(loss=total_loss / ((i + 1) * batch_size))\n",
457
+ " \n",
458
+ " avg_train_loss = total_loss / len(train_dataloader)\n",
459
+ " avg_val_loss, avg_val_psnr = calculate_loss_and_psnr(model, val_dataloader, criterion)\n",
460
+ " \n",
461
+ " print(f\"Epoch [{epoch+1}/{epochs}] - Train Loss: {avg_train_loss:.4f}, Validation Loss: {avg_val_loss:.4f}, Validation PSNR: {avg_val_psnr:.4f}\")\n",
462
+ "\n",
463
+ " if avg_val_loss < best_val_loss:\n",
464
+ " best_val_loss = avg_val_loss\n",
465
+ " torch.save(model.state_dict(), best_model_path)\n",
466
+ " print(f\"Model saved with improved validation loss: {avg_val_loss:.4f}\")\n",
467
+ " \n",
468
+ " return model\n",
469
+ "\n",
470
+ "# Example usage with train and validation directories\n",
471
+ "model = train_unet(r\"D:\\TCIA_Split\\train\", r\"D:\\TCIA_Split\\val\", epochs=50, batch_size=4, grad_accumulation_steps=8)"
472
+ ]
473
+ },
474
+ {
475
+ "cell_type": "markdown",
476
+ "metadata": {},
477
+ "source": [
478
+ "### U-Net Inference"
479
+ ]
480
+ },
481
+ {
482
+ "cell_type": "code",
483
+ "execution_count": null,
484
+ "metadata": {},
485
+ "outputs": [],
486
+ "source": [
487
+ "import torch\n",
488
+ "import torch.nn as nn\n",
489
+ "import pydicom\n",
490
+ "import numpy as np\n",
491
+ "import matplotlib.pyplot as plt\n",
492
+ "import os\n",
493
+ "\n",
494
+ "# Import the UNet and related classes from the previous script\n",
495
+ "# Replace with the actual import method\n",
496
+ "\n",
497
+ "def load_dicom_image(dicom_path):\n",
498
+ " \"\"\"\n",
499
+ " Load and normalize a DICOM image\n",
500
+ " \n",
501
+ " Args:\n",
502
+ " dicom_path (str): Path to the DICOM file\n",
503
+ " \n",
504
+ " Returns:\n",
505
+ " torch.Tensor: Normalized image tensor\n",
506
+ " \"\"\"\n",
507
+ " # Read DICOM file\n",
508
+ " dcm = pydicom.dcmread(dicom_path)\n",
509
+ " image = dcm.pixel_array.astype(float)\n",
510
+ " \n",
511
+ " # Normalize image\n",
512
+ " image = (image - image.min()) / (image.max() - image.min())\n",
513
+ " \n",
514
+ " # Convert to tensor\n",
515
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0)\n",
516
+ " return image_tensor\n",
517
+ "\n",
518
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
519
+ " \"\"\"\n",
520
+ " Calculate Peak Signal-to-Noise Ratio (PSNR)\n",
521
+ " \n",
522
+ " Args:\n",
523
+ " output (torch.Tensor): Reconstructed image\n",
524
+ " target (torch.Tensor): Original image\n",
525
+ " max_pixel (float): Maximum pixel value\n",
526
+ " \n",
527
+ " Returns:\n",
528
+ " float: PSNR value\n",
529
+ " \"\"\"\n",
530
+ " # Ensure the values are in the correct range\n",
531
+ " mse = torch.nn.functional.mse_loss(output, target)\n",
532
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
533
+ " return psnr.item()\n",
534
+ "\n",
535
+ "def visualize_reconstruction(original_image, reconstructed_image, psnr):\n",
536
+ " \"\"\"\n",
537
+ " Visualize original and reconstructed images\n",
538
+ " \n",
539
+ " Args:\n",
540
+ " original_image (torch.Tensor): Original image tensor\n",
541
+ " reconstructed_image (torch.Tensor): Reconstructed image tensor\n",
542
+ " psnr (float): Peak Signal-to-Noise Ratio\n",
543
+ " \"\"\"\n",
544
+ " # Convert tensors to numpy for visualization\n",
545
+ " original = original_image.squeeze().cpu().numpy()\n",
546
+ " reconstructed = reconstructed_image.squeeze().cpu().numpy()\n",
547
+ " \n",
548
+ " # Create subplot\n",
549
+ " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n",
550
+ " \n",
551
+ " # Plot original image\n",
552
+ " im1 = ax1.imshow(original, cmap='gray')\n",
553
+ " ax1.set_title('Original Image')\n",
554
+ " plt.colorbar(im1, ax=ax1)\n",
555
+ " \n",
556
+ " # Plot reconstructed image\n",
557
+ " im2 = ax2.imshow(reconstructed, cmap='gray')\n",
558
+ " ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n",
559
+ " plt.colorbar(im2, ax=ax2)\n",
560
+ " \n",
561
+ " plt.tight_layout()\n",
562
+ " plt.show()\n",
563
+ "\n",
564
+ "def inference_single_image(model_path, test_dicom_path):\n",
565
+ " \"\"\"\n",
566
+ " Perform inference on a single DICOM image\n",
567
+ " \n",
568
+ " Args:\n",
569
+ " model_path (str): Path to the saved model weights\n",
570
+ " test_dicom_path (str): Path to the test DICOM file\n",
571
+ " \"\"\"\n",
572
+ " # Set device\n",
573
+ " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
574
+ " \n",
575
+ " # Initialize model\n",
576
+ " model = UNet().to(device)\n",
577
+ " \n",
578
+ " # Load saved model weights\n",
579
+ " model.load_state_dict(torch.load(model_path))\n",
580
+ " model.eval()\n",
581
+ " \n",
582
+ " # Load and preprocess test image\n",
583
+ " with torch.no_grad():\n",
584
+ " test_image = load_dicom_image(test_dicom_path).to(device)\n",
585
+ " \n",
586
+ " # Perform reconstruction\n",
587
+ " reconstructed_image = model(test_image)\n",
588
+ " \n",
589
+ " # Calculate PSNR\n",
590
+ " psnr = calculate_psnr(reconstructed_image, test_image)\n",
591
+ "\n",
592
+ " print(f\"PSNR: {psnr:.2f} dB\")\n",
593
+ " \n",
594
+ " # Visualize results\n",
595
+ " visualize_reconstruction(test_image, reconstructed_image, psnr)\n",
596
+ "\n",
597
+ "# Example usage\n",
598
+ "if __name__ == \"__main__\":\n",
599
+ " # Paths to model and test image\n",
600
+ " MODEL_PATH = r\"D:\\VSCODE\\PreSense\\best_model.pth\" # Path to your saved model\n",
601
+ " TEST_DICOM_PATH = r\"D:\\VSCODE\\PreSense\\test.dcm\" # Replace with actual path to test DICOM\n",
602
+ " \n",
603
+ " # Run inference\n",
604
+ " inference_single_image(MODEL_PATH, TEST_DICOM_PATH)"
605
+ ]
606
+ },
607
+ {
608
+ "cell_type": "markdown",
609
+ "metadata": {},
610
+ "source": [
611
+ "### U-Net Inference for Complete Scan"
612
+ ]
613
+ },
614
+ {
615
+ "cell_type": "code",
616
+ "execution_count": null,
617
+ "metadata": {},
618
+ "outputs": [],
619
+ "source": [
620
+ "import torch\n",
621
+ "import torch.nn as nn\n",
622
+ "import pydicom\n",
623
+ "import numpy as np\n",
624
+ "import os\n",
625
+ "from tqdm import tqdm\n",
626
+ "\n",
627
+ "# Import the UNet and related classes from the previous script\n",
628
+ "\n",
629
+ "def load_dicom_image(dicom_path):\n",
630
+ " \"\"\"\n",
631
+ " Load and normalize a DICOM image\n",
632
+ " \n",
633
+ " Args:\n",
634
+ " dicom_path (str): Path to the DICOM file\n",
635
+ " \n",
636
+ " Returns:\n",
637
+ " torch.Tensor: Normalized image tensor\n",
638
+ " \"\"\"\n",
639
+ " # Read DICOM file\n",
640
+ " dcm = pydicom.dcmread(dicom_path)\n",
641
+ " image = dcm.pixel_array.astype(float)\n",
642
+ " \n",
643
+ " # Normalize image\n",
644
+ " image = (image - image.min()) / (image.max() - image.min())\n",
645
+ " \n",
646
+ " # Convert to tensor\n",
647
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0)\n",
648
+ " return image_tensor, dcm\n",
649
+ "\n",
650
+ "def save_reconstructed_dicom(image_tensor, original_dcm, output_path):\n",
651
+ " \"\"\"\n",
652
+ " Save reconstructed image as a DICOM file\n",
653
+ " \n",
654
+ " Args:\n",
655
+ " image_tensor (torch.Tensor): Reconstructed image tensor\n",
656
+ " original_dcm (pydicom.Dataset): Original DICOM dataset\n",
657
+ " output_path (str): Path to save the reconstructed image\n",
658
+ " \"\"\"\n",
659
+ " # Convert tensor to numpy and scale back to original pixel range\n",
660
+ " reconstructed_image = image_tensor.squeeze().cpu().numpy()\n",
661
+ " \n",
662
+ " # Scale to original pixel array range\n",
663
+ " min_val = original_dcm.pixel_array.min()\n",
664
+ " max_val = original_dcm.pixel_array.max()\n",
665
+ " reconstructed_image = reconstructed_image * (max_val - min_val) + min_val\n",
666
+ " \n",
667
+ " # Create a copy of the original DICOM dataset\n",
668
+ " ds = pydicom.Dataset()\n",
669
+ " ds.update(original_dcm)\n",
670
+ " \n",
671
+ " # Set the new pixel data\n",
672
+ " ds.PixelData = reconstructed_image.astype(original_dcm.pixel_array.dtype).tobytes()\n",
673
+ " \n",
674
+ " # Set transfer syntax to explicit VR little endian (common default)\n",
675
+ " ds.file_meta = pydicom.Dataset()\n",
676
+ " ds.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian\n",
677
+ " \n",
678
+ " # Write the DICOM file\n",
679
+ " pydicom.dcmwrite(output_path, ds)\n",
680
+ "\n",
681
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
682
+ " \"\"\"\n",
683
+ " Calculate Peak Signal-to-Noise Ratio (PSNR)\n",
684
+ " \n",
685
+ " Args:\n",
686
+ " output (torch.Tensor): Reconstructed image\n",
687
+ " target (torch.Tensor): Original image\n",
688
+ " max_pixel (float): Maximum pixel value\n",
689
+ " \n",
690
+ " Returns:\n",
691
+ " float: PSNR value\n",
692
+ " \"\"\"\n",
693
+ " # Ensure the values are in the correct range\n",
694
+ " mse = torch.nn.functional.mse_loss(output, target)\n",
695
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
696
+ " return psnr.item()\n",
697
+ "\n",
698
+ "def batch_inference(model_path, input_dir, output_dir):\n",
699
+ " \"\"\"\n",
700
+ " Perform batch inference on all DICOM files in a directory\n",
701
+ " \n",
702
+ " Args:\n",
703
+ " model_path (str): Path to the saved model weights\n",
704
+ " input_dir (str): Directory containing input DICOM files\n",
705
+ " output_dir (str): Directory to save reconstructed DICOM files\n",
706
+ " \"\"\"\n",
707
+ " # Create output directory if it doesn't exist\n",
708
+ " os.makedirs(output_dir, exist_ok=True)\n",
709
+ " \n",
710
+ " # Set device\n",
711
+ " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
712
+ " \n",
713
+ " # Initialize model\n",
714
+ " model = UNet().to(device)\n",
715
+ " \n",
716
+ " # Load saved model weights\n",
717
+ " model.load_state_dict(torch.load(model_path))\n",
718
+ " model.eval()\n",
719
+ " \n",
720
+ " # Get list of DICOM files\n",
721
+ " dcm_files = [f for f in os.listdir(input_dir) if f.endswith('.dcm')]\n",
722
+ " \n",
723
+ " # Prepare for inference\n",
724
+ " print(f\"Starting batch inference on {len(dcm_files)} DICOM files...\")\n",
725
+ " \n",
726
+ " # Store PSNR values\n",
727
+ " psnr_values = {}\n",
728
+ " \n",
729
+ " # Perform inference\n",
730
+ " with torch.no_grad():\n",
731
+ " for dcm_file in tqdm(dcm_files, desc=\"Reconstructing Images\"):\n",
732
+ " # Full paths\n",
733
+ " input_path = os.path.join(input_dir, dcm_file)\n",
734
+ " output_path = os.path.join(output_dir, dcm_file)\n",
735
+ " \n",
736
+ " # Load image\n",
737
+ " test_image, original_dcm = load_dicom_image(input_path)\n",
738
+ " test_image = test_image.to(device)\n",
739
+ " \n",
740
+ " # Perform reconstruction\n",
741
+ " reconstructed_image = model(test_image)\n",
742
+ " \n",
743
+ " # Calculate PSNR\n",
744
+ " psnr = calculate_psnr(reconstructed_image, test_image)\n",
745
+ " psnr_values[dcm_file] = psnr\n",
746
+ " \n",
747
+ " # Save reconstructed image\n",
748
+ " save_reconstructed_dicom(reconstructed_image, original_dcm, output_path)\n",
749
+ " \n",
750
+ " # Print PSNR values\n",
751
+ " print(\"\\nPSNR Values:\")\n",
752
+ " for filename, psnr in psnr_values.items():\n",
753
+ " print(f\"{filename}: {psnr:.2f} dB\")\n",
754
+ " \n",
755
+ " # Calculate and print overall statistics\n",
756
+ " psnr_list = list(psnr_values.values())\n",
757
+ " print(f\"\\nPSNR Statistics:\")\n",
758
+ " print(f\"Average PSNR: {np.mean(psnr_list):.2f} dB\")\n",
759
+ " print(f\"Minimum PSNR: {np.min(psnr_list):.2f} dB\")\n",
760
+ " print(f\"Maximum PSNR: {np.max(psnr_list):.2f} dB\")\n",
761
+ "\n",
762
+ "# Example usage\n",
763
+ "if __name__ == \"__main__\":\n",
764
+ " # Paths to model, input, and output directories\n",
765
+ " MODEL_PATH = r\"D:\\VSCODE\\PreSense\\best_model.pth\" # Path to your saved model\n",
766
+ " INPUT_DICOM_DIR = r\"D:\\Pancreatic Neuroendocrine\\manifest-1662644254281\\CTpred-Sunitinib-panNET\\PAN_01\\04-11-2001-NA-NA-29221\\3.000000-CEFC07AIDR 3D STD-16260\" # Directory with input DICOM files\n",
767
+ " OUTPUT_DICOM_DIR = r\"D:\\VSCODE\\PreSense\\reconstructed_dicom\" # Directory to save reconstructed DICOM files\n",
768
+ " \n",
769
+ " # Run batch inference\n",
770
+ " batch_inference(MODEL_PATH, INPUT_DICOM_DIR, OUTPUT_DICOM_DIR)"
771
+ ]
772
+ },
773
+ {
774
+ "cell_type": "code",
775
+ "execution_count": null,
776
+ "metadata": {},
777
+ "outputs": [],
778
+ "source": [
779
+ "import torch\n",
780
+ "import torch.nn as nn\n",
781
+ "import pydicom\n",
782
+ "import numpy as np\n",
783
+ "import os\n",
784
+ "from tqdm import tqdm\n",
785
+ "from PIL import Image\n",
786
+ "\n",
787
+ "# Import the UNet and related classes from the previous script\n",
788
+ "\n",
789
+ "def load_dicom_image(dicom_path):\n",
790
+ " \"\"\"\n",
791
+ " Load and normalize a DICOM image\n",
792
+ " \n",
793
+ " Args:\n",
794
+ " dicom_path (str): Path to the DICOM file\n",
795
+ " \n",
796
+ " Returns:\n",
797
+ " torch.Tensor: Normalized image tensor\n",
798
+ " \"\"\"\n",
799
+ " # Read DICOM file\n",
800
+ " dcm = pydicom.dcmread(dicom_path)\n",
801
+ " image = dcm.pixel_array.astype(float)\n",
802
+ " \n",
803
+ " # Normalize image\n",
804
+ " image = (image - image.min()) / (image.max() - image.min())\n",
805
+ " \n",
806
+ " # Convert to tensor\n",
807
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0)\n",
808
+ " return image_tensor, dcm\n",
809
+ "\n",
810
+ "def save_reconstructed_image(image_tensor, output_path):\n",
811
+ " \"\"\"\n",
812
+ " Save reconstructed image as a JPEG file\n",
813
+ " \n",
814
+ " Args:\n",
815
+ " image_tensor (torch.Tensor): Reconstructed image tensor\n",
816
+ " output_path (str): Path to save the reconstructed JPEG image\n",
817
+ " \"\"\"\n",
818
+ " # Convert tensor to numpy array\n",
819
+ " reconstructed_image = image_tensor.squeeze().cpu().numpy()\n",
820
+ " \n",
821
+ " # Scale back to the original pixel range (assuming input was normalized to [0, 1])\n",
822
+ " reconstructed_image = np.uint8(reconstructed_image * 255)\n",
823
+ " \n",
824
+ " # Convert to PIL Image\n",
825
+ " pil_image = Image.fromarray(reconstructed_image)\n",
826
+ " \n",
827
+ " # Save as JPEG\n",
828
+ " pil_image.save(output_path, 'JPEG')\n",
829
+ "\n",
830
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
831
+ " \"\"\"\n",
832
+ " Calculate Peak Signal-to-Noise Ratio (PSNR)\n",
833
+ " \n",
834
+ " Args:\n",
835
+ " output (torch.Tensor): Reconstructed image\n",
836
+ " target (torch.Tensor): Original image\n",
837
+ " max_pixel (float): Maximum pixel value\n",
838
+ " \n",
839
+ " Returns:\n",
840
+ " float: PSNR value\n",
841
+ " \"\"\"\n",
842
+ " # Ensure the values are in the correct range\n",
843
+ " mse = torch.nn.functional.mse_loss(output, target)\n",
844
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
845
+ " return psnr.item()\n",
846
+ "\n",
847
+ "def batch_inference(model_path, input_dir, output_dir):\n",
848
+ " \"\"\"\n",
849
+ " Perform batch inference on all DICOM files in a directory\n",
850
+ " \n",
851
+ " Args:\n",
852
+ " model_path (str): Path to the saved model weights\n",
853
+ " input_dir (str): Directory containing input DICOM files\n",
854
+ " output_dir (str): Directory to save reconstructed JPEG images\n",
855
+ " \"\"\"\n",
856
+ " # Create output directory if it doesn't exist\n",
857
+ " os.makedirs(output_dir, exist_ok=True)\n",
858
+ " \n",
859
+ " # Set device\n",
860
+ " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
861
+ " \n",
862
+ " # Initialize model\n",
863
+ " model = UNet().to(device)\n",
864
+ " \n",
865
+ " # Load saved model weights\n",
866
+ " model.load_state_dict(torch.load(model_path))\n",
867
+ " model.eval()\n",
868
+ " \n",
869
+ " # Get list of DICOM files\n",
870
+ " dcm_files = [f for f in os.listdir(input_dir) if f.endswith('.dcm')]\n",
871
+ " \n",
872
+ " # Prepare for inference\n",
873
+ " print(f\"Starting batch inference on {len(dcm_files)} DICOM files...\")\n",
874
+ " \n",
875
+ " # Store PSNR values\n",
876
+ " psnr_values = {}\n",
877
+ " \n",
878
+ " # Perform inference\n",
879
+ " with torch.no_grad():\n",
880
+ " for dcm_file in tqdm(dcm_files, desc=\"Reconstructing Images\"):\n",
881
+ " # Full paths\n",
882
+ " input_path = os.path.join(input_dir, dcm_file)\n",
883
+ " output_path = os.path.join(output_dir, f\"{os.path.splitext(dcm_file)[0]}.jpg\") # Save as .jpg\n",
884
+ " \n",
885
+ " # Load image\n",
886
+ " test_image, original_dcm = load_dicom_image(input_path)\n",
887
+ " test_image = test_image.to(device)\n",
888
+ " \n",
889
+ " # Perform reconstruction\n",
890
+ " reconstructed_image = model(test_image)\n",
891
+ " \n",
892
+ " # Calculate PSNR\n",
893
+ " psnr = calculate_psnr(reconstructed_image, test_image)\n",
894
+ " psnr_values[dcm_file] = psnr\n",
895
+ " \n",
896
+ " # Save reconstructed image as JPEG\n",
897
+ " save_reconstructed_image(reconstructed_image, output_path)\n",
898
+ " \n",
899
+ " # Print PSNR values\n",
900
+ " print(\"\\nPSNR Values:\")\n",
901
+ " for filename, psnr in psnr_values.items():\n",
902
+ " print(f\"{filename}: {psnr:.2f} dB\")\n",
903
+ " \n",
904
+ " # Calculate and print overall statistics\n",
905
+ " psnr_list = list(psnr_values.values())\n",
906
+ " print(f\"\\nPSNR Statistics:\")\n",
907
+ " print(f\"Average PSNR: {np.mean(psnr_list):.2f} dB\")\n",
908
+ " print(f\"Minimum PSNR: {np.min(psnr_list):.2f} dB\")\n",
909
+ " print(f\"Maximum PSNR: {np.max(psnr_list):.2f} dB\")\n",
910
+ "\n",
911
+ "# Example usage\n",
912
+ "if __name__ == \"__main__\":\n",
913
+ " # Paths to model, input, and output directories\n",
914
+ " MODEL_PATH = r\"D:\\VSCODE\\PreSense\\best_model.pth\" # Path to your saved model\n",
915
+ " INPUT_DICOM_DIR = r\"D:\\Pancreatic Neuroendocrine\\manifest-1662644254281\\CTpred-Sunitinib-panNET\\PAN_01\\04-11-2001-NA-NA-29221\\3.000000-CEFC07AIDR 3D STD-16260\" # Directory with input DICOM files\n",
916
+ " OUTPUT_JPEG_DIR = r\"D:\\VSCODE\\PreSense\\reconstructed_images\" # Directory to save reconstructed JPEG images\n",
917
+ " \n",
918
+ " # Run batch inference\n",
919
+ " batch_inference(MODEL_PATH, INPUT_DICOM_DIR, OUTPUT_JPEG_DIR)"
920
+ ]
921
+ },
922
+ {
923
+ "cell_type": "markdown",
924
+ "metadata": {},
925
+ "source": [
926
+ "### Small Reconstructor and Denoiser U-Net (smallRD)"
927
+ ]
928
+ },
929
+ {
930
+ "cell_type": "code",
931
+ "execution_count": null,
932
+ "metadata": {},
933
+ "outputs": [],
934
+ "source": [
935
+ "import torch\n",
936
+ "import torch.nn as nn\n",
937
+ "import torch.nn.functional as F\n",
938
+ "import pydicom\n",
939
+ "import numpy as np\n",
940
+ "from torch.utils.data import Dataset, DataLoader\n",
941
+ "import os\n",
942
+ "from torch.utils.checkpoint import checkpoint\n",
943
+ "from tqdm import tqdm # Import tqdm for progress bar\n",
944
+ "\n",
945
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
946
+ "\n",
947
+ "class MedicalImageDataset(Dataset):\n",
948
+ " def __init__(self, dicom_dir):\n",
949
+ " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
950
+ " \n",
951
+ " def __len__(self):\n",
952
+ " return len(self.dicom_files)\n",
953
+ " \n",
954
+ " def __getitem__(self, idx):\n",
955
+ " # Read DICOM file and normalize\n",
956
+ " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
957
+ " image = dcm.pixel_array.astype(float)\n",
958
+ " image = (image - image.min()) / (image.max() - image.min())\n",
959
+ " \n",
960
+ " # Convert to tensor\n",
961
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
962
+ " return image_tensor, image_tensor\n",
963
+ "\n",
964
+ "class UNetBlock(nn.Module):\n",
965
+ " def __init__(self, in_channels, out_channels):\n",
966
+ " super().__init__()\n",
967
+ " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
968
+ " self.bn1 = nn.BatchNorm2d(out_channels)\n",
969
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
970
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
971
+ " \n",
972
+ " def forward(self, x):\n",
973
+ " x = F.relu(self.bn1(self.conv1(x)))\n",
974
+ " x = F.relu(self.bn2(self.conv2(x)))\n",
975
+ " return x\n",
976
+ "\n",
977
+ "class UNet(nn.Module):\n",
978
+ " def __init__(self, in_channels=1, out_channels=1):\n",
979
+ " super().__init__()\n",
980
+ " # Encoder\n",
981
+ " self.enc1 = UNetBlock(in_channels, 64)\n",
982
+ " self.enc2 = UNetBlock(64, 128)\n",
983
+ " self.enc3 = UNetBlock(128, 256)\n",
984
+ " \n",
985
+ " # Decoder\n",
986
+ " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
987
+ " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
988
+ " self.dec1 = UNetBlock(64, out_channels)\n",
989
+ " \n",
990
+ " # Pooling and upsampling\n",
991
+ " self.pool = nn.MaxPool2d(2, 2)\n",
992
+ " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
993
+ " \n",
994
+ " def forward(self, x):\n",
995
+ " # Encoder path\n",
996
+ " e1 = checkpoint(self.enc1, x)\n",
997
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
998
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
999
+ " \n",
1000
+ " # Decoder path with skip connections\n",
1001
+ " d3 = self.upsample(e3)\n",
1002
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
1003
+ " d3 = checkpoint(self.dec3, d3)\n",
1004
+ " \n",
1005
+ " d2 = self.upsample(d3)\n",
1006
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
1007
+ " d2 = checkpoint(self.dec2, d2)\n",
1008
+ " \n",
1009
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
1010
+ " \n",
1011
+ " return d1\n",
1012
+ "\n",
1013
+ "def calculate_loss(model, dataloader, criterion):\n",
1014
+ " model.eval()\n",
1015
+ " total_loss = 0\n",
1016
+ " with torch.no_grad():\n",
1017
+ " for images, targets in dataloader:\n",
1018
+ " images, targets = images.to(device), targets.to(device)\n",
1019
+ " outputs = model(images)\n",
1020
+ " loss = criterion(outputs, targets)\n",
1021
+ " total_loss += loss.item()\n",
1022
+ " return total_loss / len(dataloader)\n",
1023
+ "\n",
1024
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
1025
+ " # Ensure the values are in the correct range\n",
1026
+ " mse = F.mse_loss(output, target)\n",
1027
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
1028
+ " return psnr.item()\n",
1029
+ "\n",
1030
+ "def calculate_loss_and_psnr(model, dataloader, criterion):\n",
1031
+ " model.eval()\n",
1032
+ " total_loss = 0\n",
1033
+ " total_psnr = 0\n",
1034
+ " num_batches = len(dataloader)\n",
1035
+ " \n",
1036
+ " with torch.no_grad():\n",
1037
+ " for images, targets in dataloader:\n",
1038
+ " images, targets = images.to(device), targets.to(device)\n",
1039
+ " outputs = model(images)\n",
1040
+ " \n",
1041
+ " # Calculate MSE loss\n",
1042
+ " loss = criterion(outputs, targets)\n",
1043
+ " total_loss += loss.item()\n",
1044
+ " \n",
1045
+ " # Calculate PSNR\n",
1046
+ " psnr = calculate_psnr(outputs, targets)\n",
1047
+ " total_psnr += psnr\n",
1048
+ " \n",
1049
+ " avg_loss = total_loss / num_batches\n",
1050
+ " avg_psnr = total_psnr / num_batches\n",
1051
+ " \n",
1052
+ " return avg_loss, avg_psnr\n",
1053
+ "\n",
1054
+ "class UNet(nn.Module):\n",
1055
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1056
+ " super().__init__()\n",
1057
+ " # Encoder\n",
1058
+ " self.enc1 = UNetBlock(in_channels, 64)\n",
1059
+ " self.enc2 = UNetBlock(64, 128)\n",
1060
+ " self.enc3 = UNetBlock(128, 256)\n",
1061
+ " \n",
1062
+ " # Decoder\n",
1063
+ " self.dec3 = UNetBlock(256 + 128, 128) # Adjust for concatenation with skip connection\n",
1064
+ " self.dec2 = UNetBlock(128 + 64, 64) # Adjust for concatenation with skip connection\n",
1065
+ " self.dec1 = UNetBlock(64, out_channels)\n",
1066
+ " \n",
1067
+ " # Pooling and upsampling\n",
1068
+ " self.pool = nn.MaxPool2d(2, 2)\n",
1069
+ " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n",
1070
+ " \n",
1071
+ " def forward(self, x):\n",
1072
+ " # Encoder path\n",
1073
+ " e1 = checkpoint(self.enc1, x)\n",
1074
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
1075
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
1076
+ " \n",
1077
+ " # Decoder path with skip connections\n",
1078
+ " d3 = self.upsample(e3)\n",
1079
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate along channels\n",
1080
+ " d3 = checkpoint(self.dec3, d3)\n",
1081
+ " \n",
1082
+ " d2 = self.upsample(d3)\n",
1083
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate along channels\n",
1084
+ " d2 = checkpoint(self.dec2, d2)\n",
1085
+ " \n",
1086
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
1087
+ " \n",
1088
+ " return d1\n",
1089
+ "\n",
1090
+ "class Reconstructor(nn.Module):\n",
1091
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1092
+ " super().__init__()\n",
1093
+ " # Same UNet architecture for reconstruction\n",
1094
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
1095
+ " \n",
1096
+ " def forward(self, x):\n",
1097
+ " return self.unet(x)\n",
1098
+ "\n",
1099
+ "\n",
1100
+ "class Denoiser(nn.Module):\n",
1101
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1102
+ " super().__init__()\n",
1103
+ " # Same UNet architecture for denoising\n",
1104
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
1105
+ " \n",
1106
+ " def forward(self, x):\n",
1107
+ " return self.unet(x)\n",
1108
+ " \n",
1109
+ "def train_reconstructor_and_denoiser(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
1110
+ " # Dataset and DataLoader\n",
1111
+ " dataset = MedicalImageDataset(dicom_dir)\n",
1112
+ " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
1113
+ " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
1114
+ " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
1115
+ " \n",
1116
+ " # Initialize both models\n",
1117
+ " reconstructor = Reconstructor().to(device)\n",
1118
+ " denoiser = Denoiser().to(device)\n",
1119
+ " \n",
1120
+ " # Loss functions for both models\n",
1121
+ " reconstructor_criterion = nn.MSELoss()\n",
1122
+ " denoiser_criterion = nn.MSELoss()\n",
1123
+ " \n",
1124
+ " # Optimizers for both models\n",
1125
+ " reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(), lr=0.0001)\n",
1126
+ " denoiser_optimizer = torch.optim.Adam(denoiser.parameters(), lr=0.0001)\n",
1127
+ " \n",
1128
+ " # Best validation loss initialization\n",
1129
+ " best_reconstructor_val_loss = float('inf')\n",
1130
+ " best_denoiser_val_loss = float('inf')\n",
1131
+ " best_reconstructor_model_path = 'best_reconstructor_model.pth'\n",
1132
+ " best_denoiser_model_path = 'best_denoiser_model.pth'\n",
1133
+ "\n",
1134
+ " # Training loop with tqdm\n",
1135
+ " for epoch in range(epochs):\n",
1136
+ " reconstructor.train()\n",
1137
+ " denoiser.train()\n",
1138
+ " \n",
1139
+ " reconstructor_total_loss = 0\n",
1140
+ " denoiser_total_loss = 0\n",
1141
+ " \n",
1142
+ " reconstructor_optimizer.zero_grad()\n",
1143
+ " denoiser_optimizer.zero_grad()\n",
1144
+ "\n",
1145
+ " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
1146
+ " for i, (images, targets) in enumerate(tepoch):\n",
1147
+ " images, targets = images.to(device), targets.to(device)\n",
1148
+ " \n",
1149
+ " # Training Reconstructor\n",
1150
+ " reconstructor_outputs = reconstructor(images)\n",
1151
+ " reconstructor_loss = reconstructor_criterion(reconstructor_outputs, targets)\n",
1152
+ " reconstructor_loss.backward(retain_graph=True)\n",
1153
+ "\n",
1154
+ " # Gradient accumulation for reconstructor\n",
1155
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
1156
+ " reconstructor_optimizer.step()\n",
1157
+ " reconstructor_optimizer.zero_grad()\n",
1158
+ "\n",
1159
+ " reconstructor_total_loss += reconstructor_loss.item()\n",
1160
+ "\n",
1161
+ " # Training Denoiser (using output from Reconstructor as noisy input)\n",
1162
+ " noisy_images = reconstructor_outputs.detach() # Detach from the computation graph to avoid in-place error\n",
1163
+ " denoiser_outputs = denoiser(noisy_images)\n",
1164
+ " denoiser_loss = denoiser_criterion(denoiser_outputs, targets)\n",
1165
+ " denoiser_loss.backward()\n",
1166
+ "\n",
1167
+ " # Gradient accumulation for denoiser\n",
1168
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
1169
+ " denoiser_optimizer.step()\n",
1170
+ " denoiser_optimizer.zero_grad()\n",
1171
+ "\n",
1172
+ " denoiser_total_loss += denoiser_loss.item()\n",
1173
+ "\n",
1174
+ " # Update the tqdm progress bar with current loss\n",
1175
+ " tepoch.set_postfix(\n",
1176
+ " reconstructor_loss=reconstructor_total_loss / ((i + 1) * batch_size),\n",
1177
+ " denoiser_loss=denoiser_total_loss / ((i + 1) * batch_size)\n",
1178
+ " )\n",
1179
+ " \n",
1180
+ " # Calculate validation loss for both models\n",
1181
+ " avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n",
1182
+ " avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n",
1183
+ " \n",
1184
+ " avg_reconstructor_val_loss, avg_reconstructor_val_psnr = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n",
1185
+ " avg_denoiser_val_loss, avg_denoiser_val_psnr = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n",
1186
+ " \n",
1187
+ " print(f\"Epoch [{epoch+1}/{epochs}] - \"\n",
1188
+ " f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n",
1189
+ " f\"Denoiser Train Loss: {avg_denoiser_train_loss:.4f}, \"\n",
1190
+ " f\"Reconstructor Val Loss: {avg_reconstructor_val_loss:.4f}, \"\n",
1191
+ " f\"Denoiser Val Loss: {avg_denoiser_val_loss:.4f}, \"\n",
1192
+ " f\"Reconstructor Validation PSNR: {avg_reconstructor_val_psnr:.4f}, \"\n",
1193
+ " f\"Denoiser Validation PSNR: {avg_denoiser_val_psnr:.4f}\")\n",
1194
+ " \n",
1195
+ " # Save models if validation loss is improved\n",
1196
+ " if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n",
1197
+ " best_reconstructor_val_loss = avg_reconstructor_val_loss\n",
1198
+ " torch.save(reconstructor.state_dict(), best_reconstructor_model_path)\n",
1199
+ " print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f}\")\n",
1200
+ " \n",
1201
+ " if avg_denoiser_val_loss < best_denoiser_val_loss:\n",
1202
+ " best_denoiser_val_loss = avg_denoiser_val_loss\n",
1203
+ " torch.save(denoiser.state_dict(), best_denoiser_model_path)\n",
1204
+ " print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f}\")\n",
1205
+ " \n",
1206
+ " return reconstructor, denoiser\n",
1207
+ "\n",
1208
+ "# Example usage with train and validation directories\n",
1209
+ "reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n",
1210
+ " r\"D:/PN_Split/train\", r\"D:/PN_Split/val\", epochs=50, batch_size=20, grad_accumulation_steps=2\n",
1211
+ ")"
1212
+ ]
1213
+ },
1214
+ {
1215
+ "cell_type": "markdown",
1216
+ "metadata": {},
1217
+ "source": [
1218
+ "### smallRD Single Image Inference"
1219
+ ]
1220
+ },
1221
+ {
1222
+ "cell_type": "code",
1223
+ "execution_count": null,
1224
+ "metadata": {},
1225
+ "outputs": [],
1226
+ "source": [
1227
+ "import torch\n",
1228
+ "import pydicom\n",
1229
+ "import numpy as np\n",
1230
+ "import matplotlib.pyplot as plt\n",
1231
+ "import os\n",
1232
+ "\n",
1233
+ "# Import the models from the previous script\n",
1234
+ "# Assuming they are defined or imported correctly\n",
1235
+ "\n",
1236
+ "def load_dicom_image(dicom_path):\n",
1237
+ " \"\"\"\n",
1238
+ " Load and normalize a DICOM image\n",
1239
+ " \n",
1240
+ " Args:\n",
1241
+ " dicom_path (str): Path to the DICOM file\n",
1242
+ " \n",
1243
+ " Returns:\n",
1244
+ " torch.Tensor: Normalized image tensor\n",
1245
+ " \"\"\"\n",
1246
+ " # Read DICOM file\n",
1247
+ " dcm = pydicom.dcmread(dicom_path)\n",
1248
+ " image = dcm.pixel_array.astype(float)\n",
1249
+ " \n",
1250
+ " # Normalize image\n",
1251
+ " image = (image - image.min()) / (image.max() - image.min())\n",
1252
+ " \n",
1253
+ " # Convert to tensor\n",
1254
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions\n",
1255
+ " return image_tensor\n",
1256
+ "\n",
1257
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
1258
+ " \"\"\"\n",
1259
+ " Calculate Peak Signal-to-Noise Ratio (PSNR)\n",
1260
+ " \n",
1261
+ " Args:\n",
1262
+ " output (torch.Tensor): Reconstructed image\n",
1263
+ " target (torch.Tensor): Original image\n",
1264
+ " max_pixel (float): Maximum pixel value\n",
1265
+ " \n",
1266
+ " Returns:\n",
1267
+ " float: PSNR value\n",
1268
+ " \"\"\"\n",
1269
+ " # Ensure the values are in the correct range\n",
1270
+ " mse = torch.nn.functional.mse_loss(output, target)\n",
1271
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
1272
+ " return psnr.item()\n",
1273
+ "\n",
1274
+ "def visualize_reconstruction(original_image, reconstructed_image, psnr):\n",
1275
+ " \"\"\"\n",
1276
+ " Visualize original and reconstructed images\n",
1277
+ " \n",
1278
+ " Args:\n",
1279
+ " original_image (torch.Tensor): Original image tensor\n",
1280
+ " reconstructed_image (torch.Tensor): Reconstructed image tensor\n",
1281
+ " psnr (float): Peak Signal-to-Noise Ratio\n",
1282
+ " \"\"\"\n",
1283
+ " # Convert tensors to numpy for visualization\n",
1284
+ " original = original_image.squeeze().cpu().numpy()\n",
1285
+ " reconstructed = reconstructed_image.squeeze().cpu().numpy()\n",
1286
+ " \n",
1287
+ " # Create subplot\n",
1288
+ " fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))\n",
1289
+ " \n",
1290
+ " # Plot original image\n",
1291
+ " im1 = ax1.imshow(original, cmap='gray')\n",
1292
+ " ax1.set_title('Original Image')\n",
1293
+ " plt.colorbar(im1, ax=ax1)\n",
1294
+ " \n",
1295
+ " # Plot reconstructed image\n",
1296
+ " im2 = ax2.imshow(reconstructed, cmap='gray')\n",
1297
+ " ax2.set_title(f'Reconstructed Image\\nPSNR: {psnr:.2f} dB')\n",
1298
+ " plt.colorbar(im2, ax=ax2)\n",
1299
+ " \n",
1300
+ " plt.tight_layout()\n",
1301
+ " plt.show()\n",
1302
+ "\n",
1303
+ "def inference_single_image(reconstructor_model_path, denoiser_model_path, test_dicom_path):\n",
1304
+ " \"\"\"\n",
1305
+ " Perform inference on a single DICOM image using both Reconstructor and Denoiser models.\n",
1306
+ " \n",
1307
+ " Args:\n",
1308
+ " reconstructor_model_path (str): Path to the saved Reconstructor model weights\n",
1309
+ " denoiser_model_path (str): Path to the saved Denoiser model weights\n",
1310
+ " test_dicom_path (str): Path to the test DICOM file\n",
1311
+ " \"\"\"\n",
1312
+ " # Set device\n",
1313
+ " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
1314
+ " \n",
1315
+ " # Initialize models\n",
1316
+ " reconstructor = Reconstructor().to(device)\n",
1317
+ " denoiser = Denoiser().to(device)\n",
1318
+ " \n",
1319
+ " # Load saved model weights\n",
1320
+ " reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n",
1321
+ " denoiser.load_state_dict(torch.load(denoiser_model_path))\n",
1322
+ " \n",
1323
+ " reconstructor.eval()\n",
1324
+ " denoiser.eval()\n",
1325
+ " \n",
1326
+ " # Load and preprocess test image\n",
1327
+ " with torch.no_grad():\n",
1328
+ " test_image = load_dicom_image(test_dicom_path).to(device)\n",
1329
+ " \n",
1330
+ " # Perform reconstruction\n",
1331
+ " reconstructed_image = reconstructor(test_image)\n",
1332
+ " \n",
1333
+ " # Perform denoising on the reconstructed image\n",
1334
+ " denoised_image = denoiser(reconstructed_image)\n",
1335
+ " \n",
1336
+ " # Calculate PSNR for both original and denoised outputs\n",
1337
+ " psnr_reconstructed = calculate_psnr(reconstructed_image, test_image)\n",
1338
+ " psnr_denoised = calculate_psnr(denoised_image, test_image)\n",
1339
+ "\n",
1340
+ " print(f\"PSNR (Reconstructed): {psnr_reconstructed:.2f} dB\")\n",
1341
+ " print(f\"PSNR (Denoised): {psnr_denoised:.2f} dB\")\n",
1342
+ " \n",
1343
+ " # Visualize results\n",
1344
+ " visualize_reconstruction(test_image, reconstructed_image, psnr_reconstructed)\n",
1345
+ " visualize_reconstruction(test_image, denoised_image, psnr_denoised)\n",
1346
+ "\n",
1347
+ "# Example usage\n",
1348
+ "if __name__ == \"__main__\":\n",
1349
+ " # Paths to models and test image\n",
1350
+ " RECONSTRUCTOR_MODEL_PATH = r\"D:/VSCODE/PreSense/small_reconstructor.pth\" # Path to your saved Reconstructor model\n",
1351
+ " DENOISER_MODEL_PATH = r\"D:/VSCODE/PreSense/small_denoiser.pth\" # Path to your saved Denoiser model\n",
1352
+ " TEST_DICOM_PATH = r\"D:/VSCODE/PreSense/test2.dcm\" # Replace with actual path to test DICOM \n",
1353
+ " # Run inference\n",
1354
+ " inference_single_image(RECONSTRUCTOR_MODEL_PATH, DENOISER_MODEL_PATH, TEST_DICOM_PATH)"
1355
+ ]
1356
+ },
1357
+ {
1358
+ "cell_type": "markdown",
1359
+ "metadata": {},
1360
+ "source": [
1361
+ "### Medium Reconstructor and Denoiser U-Net (mediumRD)"
1362
+ ]
1363
+ },
1364
+ {
1365
+ "cell_type": "code",
1366
+ "execution_count": null,
1367
+ "metadata": {},
1368
+ "outputs": [],
1369
+ "source": [
1370
+ "import torch\n",
1371
+ "import torch.nn as nn\n",
1372
+ "import torch.nn.functional as F\n",
1373
+ "import pydicom\n",
1374
+ "import numpy as np\n",
1375
+ "from torch.utils.data import Dataset, DataLoader\n",
1376
+ "import os\n",
1377
+ "from torch.utils.checkpoint import checkpoint\n",
1378
+ "from tqdm import tqdm # Import tqdm for progress bar\n",
1379
+ "\n",
1380
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
1381
+ "\n",
1382
+ "class MedicalImageDataset(Dataset):\n",
1383
+ " def __init__(self, dicom_dir):\n",
1384
+ " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
1385
+ " \n",
1386
+ " def __len__(self):\n",
1387
+ " return len(self.dicom_files)\n",
1388
+ " \n",
1389
+ " def __getitem__(self, idx):\n",
1390
+ " # Read DICOM file and normalize\n",
1391
+ " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
1392
+ " image = dcm.pixel_array.astype(float)\n",
1393
+ " image = (image - image.min()) / (image.max() - image.min())\n",
1394
+ " \n",
1395
+ " # Convert to tensor\n",
1396
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
1397
+ " return image_tensor, image_tensor\n",
1398
+ "\n",
1399
+ "class UNetBlock(nn.Module):\n",
1400
+ " def __init__(self, in_channels, out_channels):\n",
1401
+ " super().__init__()\n",
1402
+ " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
1403
+ " self.bn1 = nn.BatchNorm2d(out_channels)\n",
1404
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
1405
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
1406
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
1407
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
1408
+ " \n",
1409
+ " def forward(self, x):\n",
1410
+ " x = F.relu(self.bn1(self.conv1(x)))\n",
1411
+ " x = F.relu(self.bn2(self.conv2(x)))\n",
1412
+ " return x\n",
1413
+ "\n",
1414
+ "class UNet(nn.Module):\n",
1415
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1416
+ " super().__init__()\n",
1417
+ " # Encoder\n",
1418
+ " self.enc1 = UNetBlock(in_channels, 96)\n",
1419
+ " self.enc2 = UNetBlock(96, 192)\n",
1420
+ " self.enc3 = UNetBlock(192, 384)\n",
1421
+ " self.enc4 = UNetBlock(384, 784)\n",
1422
+ " \n",
1423
+ " # Decoder with learned upsampling (transposed convolutions)\n",
1424
+ " self.upconv4 = nn.ConvTranspose2d(784, 384, kernel_size=2, stride=2) # Learnable upsampling\n",
1425
+ " self.dec4 = UNetBlock(384 + 384, 384) # Adjust input channels after concatenation\n",
1426
+ "\n",
1427
+ " self.upconv3 = nn.ConvTranspose2d(384, 192, kernel_size=2, stride=2) # Learnable upsampling\n",
1428
+ " self.dec3 = UNetBlock(192 + 192, 192) # Adjust input channels after concatenation\n",
1429
+ "\n",
1430
+ " self.upconv2 = nn.ConvTranspose2d(192, 96, kernel_size=2, stride=2) # Learnable upsampling\n",
1431
+ " self.dec2 = UNetBlock(96 + 96, 96) # Adjust input channels after concatenation\n",
1432
+ "\n",
1433
+ " self.dec1 = UNetBlock(96, out_channels) # Final output\n",
1434
+ "\n",
1435
+ " self.pool = nn.MaxPool2d(2, 2)\n",
1436
+ " \n",
1437
+ " def forward(self, x):\n",
1438
+ " # Encoder path\n",
1439
+ " e1 = checkpoint(self.enc1, x)\n",
1440
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
1441
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
1442
+ " e4 = checkpoint(self.enc4, self.pool(e3))\n",
1443
+ " \n",
1444
+ " # Decoder path with learned upsampling and skip connections\n",
1445
+ " d4 = self.upconv4(e4) # Learnable upsampling\n",
1446
+ " d4 = torch.cat([d4, e3], dim=1) # Concatenate with encoder features\n",
1447
+ " d4 = checkpoint(self.dec4, d4)\n",
1448
+ "\n",
1449
+ " d3 = self.upconv3(d4) # Learnable upsampling\n",
1450
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate with encoder features\n",
1451
+ " d3 = checkpoint(self.dec3, d3)\n",
1452
+ "\n",
1453
+ " d2 = self.upconv2(d3) # Learnable upsampling\n",
1454
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate with encoder features\n",
1455
+ " d2 = checkpoint(self.dec2, d2)\n",
1456
+ " \n",
1457
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
1458
+ " \n",
1459
+ " return d1\n",
1460
+ "\n",
1461
+ "def calculate_loss(model, dataloader, criterion):\n",
1462
+ " model.eval()\n",
1463
+ " total_loss = 0\n",
1464
+ " with torch.no_grad():\n",
1465
+ " for images, targets in dataloader:\n",
1466
+ " images, targets = images.to(device), targets.to(device)\n",
1467
+ " outputs = model(images)\n",
1468
+ " loss = criterion(outputs, targets)\n",
1469
+ " total_loss += loss.item()\n",
1470
+ " return total_loss / len(dataloader)\n",
1471
+ "\n",
1472
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
1473
+ " # Ensure the values are in the correct range\n",
1474
+ " mse = F.mse_loss(output, target)\n",
1475
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
1476
+ " return psnr.item()\n",
1477
+ "\n",
1478
+ "def calculate_loss_and_psnr(model, dataloader, criterion):\n",
1479
+ " model.eval()\n",
1480
+ " total_loss = 0\n",
1481
+ " total_psnr = 0\n",
1482
+ " num_batches = len(dataloader)\n",
1483
+ " \n",
1484
+ " with torch.no_grad():\n",
1485
+ " for images, targets in dataloader:\n",
1486
+ " images, targets = images.to(device), targets.to(device)\n",
1487
+ " outputs = model(images)\n",
1488
+ " \n",
1489
+ " # Calculate MSE loss\n",
1490
+ " loss = criterion(outputs, targets)\n",
1491
+ " total_loss += loss.item()\n",
1492
+ " \n",
1493
+ " # Calculate PSNR\n",
1494
+ " psnr = calculate_psnr(outputs, targets)\n",
1495
+ " total_psnr += psnr\n",
1496
+ " \n",
1497
+ " avg_loss = total_loss / num_batches\n",
1498
+ " avg_psnr = total_psnr / num_batches\n",
1499
+ " \n",
1500
+ " return avg_loss, avg_psnr\n",
1501
+ "\n",
1502
+ "class Reconstructor(nn.Module):\n",
1503
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1504
+ " super().__init__()\n",
1505
+ " # Same UNet architecture for reconstruction\n",
1506
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
1507
+ " \n",
1508
+ " def forward(self, x):\n",
1509
+ " return self.unet(x)\n",
1510
+ "\n",
1511
+ "\n",
1512
+ "class Denoiser(nn.Module):\n",
1513
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1514
+ " super().__init__()\n",
1515
+ " # Same UNet architecture for denoising\n",
1516
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
1517
+ " \n",
1518
+ " def forward(self, x):\n",
1519
+ " return self.unet(x)\n",
1520
+ " \n",
1521
+ "import os\n",
1522
+ "\n",
1523
+ "def train_reconstructor_and_denoiser(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
1524
+ " # Dataset and DataLoader\n",
1525
+ " dataset = MedicalImageDataset(dicom_dir)\n",
1526
+ " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
1527
+ " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
1528
+ " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
1529
+ " \n",
1530
+ " # Initialize both models\n",
1531
+ " reconstructor = Reconstructor().to(device)\n",
1532
+ " denoiser = Denoiser().to(device)\n",
1533
+ " \n",
1534
+ " # Check if pre-trained models exist\n",
1535
+ " reconstructor_model_path = 'large_reconstructor.pth'\n",
1536
+ " denoiser_model_path = 'large_denoiser.pth'\n",
1537
+ "\n",
1538
+ " # Resume from existing models if they exist\n",
1539
+ " if os.path.exists(reconstructor_model_path):\n",
1540
+ " reconstructor.load_state_dict(torch.load(reconstructor_model_path))\n",
1541
+ " print(f\"Resumed training from {reconstructor_model_path}\")\n",
1542
+ " else:\n",
1543
+ " print(f\"No pre-trained reconstructor model found, starting from scratch.\")\n",
1544
+ " \n",
1545
+ " if os.path.exists(denoiser_model_path):\n",
1546
+ " denoiser.load_state_dict(torch.load(denoiser_model_path))\n",
1547
+ " print(f\"Resumed training from {denoiser_model_path}\")\n",
1548
+ " else:\n",
1549
+ " print(f\"No pre-trained denoiser model found, starting from scratch.\")\n",
1550
+ " \n",
1551
+ " # Loss functions for both models\n",
1552
+ " reconstructor_criterion = nn.MSELoss()\n",
1553
+ " denoiser_criterion = nn.MSELoss()\n",
1554
+ " \n",
1555
+ " # Optimizers for both models\n",
1556
+ " reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(), lr=0.0001)\n",
1557
+ " denoiser_optimizer = torch.optim.Adam(denoiser.parameters(), lr=0.0001)\n",
1558
+ " \n",
1559
+ " # Best validation loss initialization\n",
1560
+ " best_reconstructor_val_loss = float('inf')\n",
1561
+ " best_denoiser_val_loss = float('inf')\n",
1562
+ " best_reconstructor_model_path = 'best_reconstructor_model.pth'\n",
1563
+ " best_denoiser_model_path = 'best_denoiser_model.pth'\n",
1564
+ "\n",
1565
+ " # Training loop with tqdm\n",
1566
+ " for epoch in range(epochs):\n",
1567
+ " reconstructor.train()\n",
1568
+ " denoiser.train()\n",
1569
+ " \n",
1570
+ " reconstructor_total_loss = 0\n",
1571
+ " denoiser_total_loss = 0\n",
1572
+ " \n",
1573
+ " reconstructor_optimizer.zero_grad()\n",
1574
+ " denoiser_optimizer.zero_grad()\n",
1575
+ "\n",
1576
+ " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
1577
+ " for i, (images, targets) in enumerate(tepoch):\n",
1578
+ " images, targets = images.to(device), targets.to(device)\n",
1579
+ " \n",
1580
+ " # Training Reconstructor\n",
1581
+ " reconstructor_outputs = reconstructor(images)\n",
1582
+ " reconstructor_loss = reconstructor_criterion(reconstructor_outputs, targets)\n",
1583
+ " reconstructor_loss.backward(retain_graph=True)\n",
1584
+ "\n",
1585
+ " # Gradient accumulation for reconstructor\n",
1586
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
1587
+ " reconstructor_optimizer.step()\n",
1588
+ " reconstructor_optimizer.zero_grad()\n",
1589
+ "\n",
1590
+ " reconstructor_total_loss += reconstructor_loss.item()\n",
1591
+ "\n",
1592
+ " # Training Denoiser (using output from Reconstructor as noisy input)\n",
1593
+ " noisy_images = reconstructor_outputs.detach() # Detach from the computation graph to avoid in-place error\n",
1594
+ " denoiser_outputs = denoiser(noisy_images)\n",
1595
+ " denoiser_loss = denoiser_criterion(denoiser_outputs, targets)\n",
1596
+ " denoiser_loss.backward()\n",
1597
+ "\n",
1598
+ " # Gradient accumulation for denoiser\n",
1599
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
1600
+ " denoiser_optimizer.step()\n",
1601
+ " denoiser_optimizer.zero_grad()\n",
1602
+ "\n",
1603
+ " denoiser_total_loss += denoiser_loss.item()\n",
1604
+ "\n",
1605
+ " # Update the tqdm progress bar with current loss\n",
1606
+ " tepoch.set_postfix(\n",
1607
+ " reconstructor_loss=reconstructor_total_loss / ((i + 1) * batch_size),\n",
1608
+ " denoiser_loss=denoiser_total_loss / ((i + 1) * batch_size)\n",
1609
+ " )\n",
1610
+ " \n",
1611
+ " # Calculate validation loss for both models\n",
1612
+ " avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n",
1613
+ " avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n",
1614
+ " \n",
1615
+ " avg_reconstructor_val_loss, _ = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n",
1616
+ " avg_denoiser_val_loss, _ = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n",
1617
+ " \n",
1618
+ " print(f\"Epoch [{epoch+1}/{epochs}] - \"\n",
1619
+ " f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n",
1620
+ " f\"Denoiser Train Loss: {avg_denoiser_train_loss:.4f}, \"\n",
1621
+ " f\"Reconstructor Val Loss: {avg_reconstructor_val_loss:.4f}, \"\n",
1622
+ " f\"Denoiser Val Loss: {avg_denoiser_val_loss:.4f}\")\n",
1623
+ " \n",
1624
+ " # Save models if validation loss is improved\n",
1625
+ " if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n",
1626
+ " best_reconstructor_val_loss = avg_reconstructor_val_loss\n",
1627
+ " torch.save(reconstructor.state_dict(), reconstructor_model_path)\n",
1628
+ " print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f}\")\n",
1629
+ " \n",
1630
+ " if avg_denoiser_val_loss < best_denoiser_val_loss:\n",
1631
+ " best_denoiser_val_loss = avg_denoiser_val_loss\n",
1632
+ " torch.save(denoiser.state_dict(), denoiser_model_path)\n",
1633
+ " print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f}\")\n",
1634
+ " \n",
1635
+ " return reconstructor, denoiser\n",
1636
+ "\n",
1637
+ "# Example usage with train and validation directories\n",
1638
+ "reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n",
1639
+ " \"./TCIA_Split/train\", \"./TCIA_Split/val\", epochs=50, batch_size=6, grad_accumulation_steps=16\n",
1640
+ ")"
1641
+ ]
1642
+ },
1643
+ {
1644
+ "cell_type": "markdown",
1645
+ "metadata": {},
1646
+ "source": [
1647
+ "### Larger Reconstructor and Denoiser U-Net (largeRD)"
1648
+ ]
1649
+ },
1650
+ {
1651
+ "cell_type": "code",
1652
+ "execution_count": null,
1653
+ "metadata": {},
1654
+ "outputs": [],
1655
+ "source": [
1656
+ "import torch\n",
1657
+ "import torch.nn as nn\n",
1658
+ "import torch.nn.functional as F\n",
1659
+ "import pydicom\n",
1660
+ "import numpy as np\n",
1661
+ "from torch.utils.data import Dataset, DataLoader\n",
1662
+ "import os\n",
1663
+ "from torch.utils.checkpoint import checkpoint\n",
1664
+ "from tqdm import tqdm # Import tqdm for progress bar\n",
1665
+ "\n",
1666
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
1667
+ "\n",
1668
+ "class MedicalImageDataset(Dataset):\n",
1669
+ " def __init__(self, dicom_dir):\n",
1670
+ " self.dicom_files = [os.path.join(dicom_dir, f) for f in os.listdir(dicom_dir) if f.endswith('.dcm')]\n",
1671
+ " \n",
1672
+ " def __len__(self):\n",
1673
+ " return len(self.dicom_files)\n",
1674
+ " \n",
1675
+ " def __getitem__(self, idx):\n",
1676
+ " # Read DICOM file and normalize\n",
1677
+ " dcm = pydicom.dcmread(self.dicom_files[idx])\n",
1678
+ " image = dcm.pixel_array.astype(float)\n",
1679
+ " image = (image - image.min()) / (image.max() - image.min())\n",
1680
+ " \n",
1681
+ " # Convert to tensor\n",
1682
+ " image_tensor = torch.from_numpy(image).float().unsqueeze(0)\n",
1683
+ " return image_tensor, image_tensor\n",
1684
+ "\n",
1685
+ "class UNetBlock(nn.Module):\n",
1686
+ " def __init__(self, in_channels, out_channels):\n",
1687
+ " super().__init__()\n",
1688
+ " self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n",
1689
+ " self.bn1 = nn.BatchNorm2d(out_channels)\n",
1690
+ " self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
1691
+ " self.bn2 = nn.BatchNorm2d(out_channels)\n",
1692
+ " self.conv3 = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
1693
+ " self.bn3 = nn.BatchNorm2d(out_channels)\n",
1694
+ " \n",
1695
+ " def forward(self, x):\n",
1696
+ " x = F.relu(self.bn1(self.conv1(x)))\n",
1697
+ " x = F.relu(self.bn2(self.conv2(x)))\n",
1698
+ " x = F.relu(self.bn3(self.conv3(x)))\n",
1699
+ " return x\n",
1700
+ "\n",
1701
+ "class UNet(nn.Module):\n",
1702
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1703
+ " super().__init__()\n",
1704
+ " # Encoder\n",
1705
+ " self.enc1 = UNetBlock(in_channels, 96)\n",
1706
+ " self.enc2 = UNetBlock(96, 192)\n",
1707
+ " self.enc3 = UNetBlock(192, 384)\n",
1708
+ " self.enc4 = UNetBlock(384, 768)\n",
1709
+ " self.enc5 = UNetBlock(768, 1536)\n",
1710
+ " \n",
1711
+ " # Decoder with learned upsampling (transposed convolutions)\n",
1712
+ "\n",
1713
+ " self.upconv5 = nn.ConvTranspose2d(1536, 768, kernel_size=2, stride=2) # Learnable upsampling\n",
1714
+ " self.dec5 = UNetBlock(768 + 768, 768) # Adjust input channels after concatenation\n",
1715
+ "\n",
1716
+ " self.upconv4 = nn.ConvTranspose2d(768, 384, kernel_size=2, stride=2) # Learnable upsampling\n",
1717
+ " self.dec4 = UNetBlock(384 + 384, 384) # Adjust input channels after concatenation\n",
1718
+ "\n",
1719
+ " self.upconv3 = nn.ConvTranspose2d(384, 192, kernel_size=2, stride=2) # Learnable upsampling\n",
1720
+ " self.dec3 = UNetBlock(192 + 192, 192) # Adjust input channels after concatenation\n",
1721
+ "\n",
1722
+ " self.upconv2 = nn.ConvTranspose2d(192, 96, kernel_size=2, stride=2) # Learnable upsampling\n",
1723
+ " self.dec2 = UNetBlock(96 + 96, 96) # Adjust input channels after concatenation\n",
1724
+ "\n",
1725
+ " self.dec1 = UNetBlock(96, out_channels) # Final output\n",
1726
+ "\n",
1727
+ " self.pool = nn.MaxPool2d(2, 2)\n",
1728
+ " \n",
1729
+ " def forward(self, x):\n",
1730
+ " # Encoder path\n",
1731
+ " e1 = checkpoint(self.enc1, x)\n",
1732
+ " e2 = checkpoint(self.enc2, self.pool(e1))\n",
1733
+ " e3 = checkpoint(self.enc3, self.pool(e2))\n",
1734
+ " e4 = checkpoint(self.enc4, self.pool(e3))\n",
1735
+ " e5 = checkpoint(self.enc5, self.pool(e4))\n",
1736
+ " \n",
1737
+ " # Decoder path with learned upsampling and skip connections\n",
1738
+ "\n",
1739
+ " d5 = self.upconv5(e5) # Learnable upsampling\n",
1740
+ " # if d5.size(2) != e4.size(2) or d5.size(3) != e4.size(3):\n",
1741
+ " # # Resize e4 to match d5's dimensions\n",
1742
+ " # e4 = F.interpolate(e4, size=(d5.size(2), d5.size(3)), mode='nearest')\n",
1743
+ " d5 = torch.cat([d5, e4], dim=1) # Concatenate with encoder features\n",
1744
+ " d5 = checkpoint(self.dec5, d5)\n",
1745
+ "\n",
1746
+ " d4 = self.upconv4(d5) # Learnable upsampling\n",
1747
+ " # if d4.size(2) != e2.size(2) or d4.size(3) != e2.size(3):\n",
1748
+ " # # Resize e3 to match d4's dimensions\n",
1749
+ " # e3 = F.interpolate(e3, size=(d4.size(2), d4.size(3)), mode='nearest')\n",
1750
+ " d4 = torch.cat([d4, e3], dim=1) # Concatenate with encoder features\n",
1751
+ " d4 = checkpoint(self.dec4, d4)\n",
1752
+ "\n",
1753
+ " d3 = self.upconv3(d4) # Learnable upsampling\n",
1754
+ " # if d3.size(2) != e2.size(2) or d3.size(3) != e2.size(3):\n",
1755
+ " # # Resize e2 to match d3's dimensions\n",
1756
+ " # e2 = F.interpolate(e2, size=(d3.size(2), d3.size(3)), mode='nearest')\n",
1757
+ " d3 = torch.cat([d3, e2], dim=1) # Concatenate with encoder features\n",
1758
+ " d3 = checkpoint(self.dec3, d3)\n",
1759
+ "\n",
1760
+ " d2 = self.upconv2(d3) # Learnable upsampling\n",
1761
+ " # if d2.size(2) != e1.size(2) or d2.size(3) != e1.size(3):\n",
1762
+ " # # Resize e1 to match d2's dimensions\n",
1763
+ " # e1 = F.interpolate(e1, size=(d2.size(2), d2.size(3)), mode='nearest')\n",
1764
+ " d2 = torch.cat([d2, e1], dim=1) # Concatenate with encoder features\n",
1765
+ " d2 = checkpoint(self.dec2, d2)\n",
1766
+ " \n",
1767
+ " d1 = self.dec1(d2) # No checkpointing for final output layer\n",
1768
+ " \n",
1769
+ " return d1\n",
1770
+ "\n",
1771
+ "def calculate_loss(model, dataloader, criterion):\n",
1772
+ " model.eval()\n",
1773
+ " total_loss = 0\n",
1774
+ " with torch.no_grad():\n",
1775
+ " for images, targets in dataloader:\n",
1776
+ " images, targets = images.to(device), targets.to(device)\n",
1777
+ " outputs = model(images)\n",
1778
+ " loss = criterion(outputs, targets)\n",
1779
+ " total_loss += loss.item()\n",
1780
+ " return total_loss / len(dataloader)\n",
1781
+ "\n",
1782
+ "def calculate_psnr(output, target, max_pixel=1.0):\n",
1783
+ " # Ensure the values are in the correct range\n",
1784
+ " mse = F.mse_loss(output, target)\n",
1785
+ " psnr = 20 * torch.log10(max_pixel / torch.sqrt(mse))\n",
1786
+ " return psnr.item()\n",
1787
+ "\n",
1788
+ "def calculate_loss_and_psnr(model, dataloader, criterion):\n",
1789
+ " model.eval()\n",
1790
+ " total_loss = 0\n",
1791
+ " total_psnr = 0\n",
1792
+ " num_batches = len(dataloader)\n",
1793
+ " \n",
1794
+ " with torch.no_grad():\n",
1795
+ " for images, targets in dataloader:\n",
1796
+ " images, targets = images.to(device), targets.to(device)\n",
1797
+ " outputs = model(images)\n",
1798
+ " \n",
1799
+ " # Calculate MSE loss\n",
1800
+ " loss = criterion(outputs, targets)\n",
1801
+ " total_loss += loss.item()\n",
1802
+ " \n",
1803
+ " # Calculate PSNR\n",
1804
+ " psnr = calculate_psnr(outputs, targets)\n",
1805
+ " total_psnr += psnr\n",
1806
+ " \n",
1807
+ " avg_loss = total_loss / num_batches\n",
1808
+ " avg_psnr = total_psnr / num_batches\n",
1809
+ " \n",
1810
+ " return avg_loss, avg_psnr\n",
1811
+ "\n",
1812
+ "class Reconstructor(nn.Module):\n",
1813
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1814
+ " super().__init__()\n",
1815
+ " # Same UNet architecture for reconstruction\n",
1816
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
1817
+ " \n",
1818
+ " def forward(self, x):\n",
1819
+ " return self.unet(x)\n",
1820
+ "\n",
1821
+ "\n",
1822
+ "class Denoiser(nn.Module):\n",
1823
+ " def __init__(self, in_channels=1, out_channels=1):\n",
1824
+ " super().__init__()\n",
1825
+ " # Same UNet architecture for denoising\n",
1826
+ " self.unet = UNet(in_channels=in_channels, out_channels=out_channels)\n",
1827
+ " \n",
1828
+ " def forward(self, x):\n",
1829
+ " return self.unet(x)\n",
1830
+ " \n",
1831
+ "def train_reconstructor_and_denoiser(dicom_dir, val_dicom_dir, epochs=50, batch_size=4, grad_accumulation_steps=2):\n",
1832
+ " # Dataset and DataLoader\n",
1833
+ " dataset = MedicalImageDataset(dicom_dir)\n",
1834
+ " train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n",
1835
+ " val_dataset = MedicalImageDataset(val_dicom_dir)\n",
1836
+ " val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n",
1837
+ " \n",
1838
+ " # Initialize both models\n",
1839
+ " reconstructor = Reconstructor().to(device)\n",
1840
+ " denoiser = Denoiser().to(device)\n",
1841
+ " \n",
1842
+ " # Loss functions for both models\n",
1843
+ " reconstructor_criterion = nn.MSELoss()\n",
1844
+ " denoiser_criterion = nn.MSELoss()\n",
1845
+ " \n",
1846
+ " # Optimizers for both models\n",
1847
+ " reconstructor_optimizer = torch.optim.Adam(reconstructor.parameters(), lr=0.0001)\n",
1848
+ " denoiser_optimizer = torch.optim.Adam(denoiser.parameters(), lr=0.0001)\n",
1849
+ " \n",
1850
+ " # Best validation loss initialization\n",
1851
+ " best_reconstructor_val_loss = float('inf')\n",
1852
+ " best_denoiser_val_loss = float('inf')\n",
1853
+ " best_reconstructor_model_path = 'largeR.pth'\n",
1854
+ " best_denoiser_model_path = 'largeD.pth'\n",
1855
+ "\n",
1856
+ " # Training loop with tqdm\n",
1857
+ " for epoch in range(epochs):\n",
1858
+ " reconstructor.train()\n",
1859
+ " denoiser.train()\n",
1860
+ " \n",
1861
+ " reconstructor_total_loss = 0\n",
1862
+ " denoiser_total_loss = 0\n",
1863
+ " \n",
1864
+ " reconstructor_optimizer.zero_grad()\n",
1865
+ " denoiser_optimizer.zero_grad()\n",
1866
+ "\n",
1867
+ " with tqdm(train_dataloader, unit=\"batch\", desc=f\"Epoch {epoch+1}/{epochs}\") as tepoch:\n",
1868
+ " for i, (images, targets) in enumerate(tepoch):\n",
1869
+ " images, targets = images.to(device), targets.to(device)\n",
1870
+ " \n",
1871
+ " # Training Reconstructor\n",
1872
+ " reconstructor_outputs = reconstructor(images)\n",
1873
+ " reconstructor_loss = reconstructor_criterion(reconstructor_outputs, targets)\n",
1874
+ " reconstructor_loss.backward(retain_graph=True)\n",
1875
+ "\n",
1876
+ " # Gradient accumulation for reconstructor\n",
1877
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
1878
+ " reconstructor_optimizer.step()\n",
1879
+ " reconstructor_optimizer.zero_grad()\n",
1880
+ "\n",
1881
+ " reconstructor_total_loss += reconstructor_loss.item()\n",
1882
+ "\n",
1883
+ " # Training Denoiser (using output from Reconstructor as noisy input)\n",
1884
+ " noisy_images = reconstructor_outputs.detach() # Detach from the computation graph to avoid in-place error\n",
1885
+ " denoiser_outputs = denoiser(noisy_images)\n",
1886
+ " denoiser_loss = denoiser_criterion(denoiser_outputs, targets)\n",
1887
+ " denoiser_loss.backward()\n",
1888
+ "\n",
1889
+ " # Gradient accumulation for denoiser\n",
1890
+ " if (i + 1) % grad_accumulation_steps == 0 or (i + 1) == len(tepoch):\n",
1891
+ " denoiser_optimizer.step()\n",
1892
+ " denoiser_optimizer.zero_grad()\n",
1893
+ "\n",
1894
+ " denoiser_total_loss += denoiser_loss.item()\n",
1895
+ "\n",
1896
+ " # Update the tqdm progress bar with current loss\n",
1897
+ " tepoch.set_postfix(\n",
1898
+ " reconstructor_loss=reconstructor_total_loss / ((i + 1) * batch_size),\n",
1899
+ " denoiser_loss=denoiser_total_loss / ((i + 1) * batch_size)\n",
1900
+ " )\n",
1901
+ " \n",
1902
+ " # Calculate validation loss for both models\n",
1903
+ " avg_reconstructor_train_loss = reconstructor_total_loss / len(train_dataloader)\n",
1904
+ " avg_denoiser_train_loss = denoiser_total_loss / len(train_dataloader)\n",
1905
+ " \n",
1906
+ " avg_reconstructor_val_loss, _ = calculate_loss_and_psnr(reconstructor, val_dataloader, reconstructor_criterion)\n",
1907
+ " avg_denoiser_val_loss, _ = calculate_loss_and_psnr(denoiser, val_dataloader, denoiser_criterion)\n",
1908
+ " \n",
1909
+ " print(f\"Epoch [{epoch+1}/{epochs}] - \"\n",
1910
+ " f\"Reconstructor Train Loss: {avg_reconstructor_train_loss:.4f}, \"\n",
1911
+ " f\"Denoiser Train Loss: {avg_denoiser_train_loss:.4f}, \"\n",
1912
+ " f\"Reconstructor Val Loss: {avg_reconstructor_val_loss:.4f}, \"\n",
1913
+ " f\"Denoiser Val Loss: {avg_denoiser_val_loss:.4f}\")\n",
1914
+ " \n",
1915
+ " # Save models if validation loss is improved\n",
1916
+ " if avg_reconstructor_val_loss < best_reconstructor_val_loss:\n",
1917
+ " best_reconstructor_val_loss = avg_reconstructor_val_loss\n",
1918
+ " torch.save(reconstructor.state_dict(), best_reconstructor_model_path)\n",
1919
+ " print(f\"Reconstructor model saved with improved validation loss: {avg_reconstructor_val_loss:.4f}\")\n",
1920
+ " \n",
1921
+ " if avg_denoiser_val_loss < best_denoiser_val_loss:\n",
1922
+ " best_denoiser_val_loss = avg_denoiser_val_loss\n",
1923
+ " torch.save(denoiser.state_dict(), best_denoiser_model_path)\n",
1924
+ " print(f\"Denoiser model saved with improved validation loss: {avg_denoiser_val_loss:.4f}\")\n",
1925
+ " \n",
1926
+ " return reconstructor, denoiser\n",
1927
+ "\n",
1928
+ "# Example usage with train and validation directories\n",
1929
+ "reconstructor_model, denoiser_model = train_reconstructor_and_denoiser(\n",
1930
+ " r\"D:\\TCIA_Split\\train\", r\"D:\\TCIA_Split\\val\", epochs=50, batch_size=1, grad_accumulation_steps=64\n",
1931
+ ")"
1932
+ ]
1933
+ }
1934
+ ],
1935
+ "metadata": {
1936
+ "kernelspec": {
1937
+ "display_name": "tf",
1938
+ "language": "python",
1939
+ "name": "python3"
1940
+ },
1941
+ "language_info": {
1942
+ "codemirror_mode": {
1943
+ "name": "ipython",
1944
+ "version": 3
1945
+ },
1946
+ "file_extension": ".py",
1947
+ "mimetype": "text/x-python",
1948
+ "name": "python",
1949
+ "nbconvert_exporter": "python",
1950
+ "pygments_lexer": "ipython3",
1951
+ "version": "3.10.11"
1952
+ }
1953
+ },
1954
+ "nbformat": 4,
1955
+ "nbformat_minor": 2
1956
+ }