PranayChamala commited on
Commit
40a8a2a
·
1 Parent(s): bbf4d25

removed some of the unnecessary files

Browse files
41598_2023_41576_Fig1_HTML.jpg DELETED
Binary file (54.7 kB)
 
app.py DELETED
@@ -1,392 +0,0 @@
1
- # app.py
2
- import os, io, base64, cv2, torch, numpy as np
3
- from PIL import Image
4
- from flask import Flask, request, render_template, jsonify
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
- import torchvision.models as models
8
- import torchvision.transforms as transforms
9
- from monai.transforms import EnsureChannelFirst, ScaleIntensity, Resize, ToTensor
10
-
11
- # Enable debug logging
12
- import logging
13
- logging.basicConfig(level=logging.DEBUG)
14
-
15
- # -------------------------------
16
- # Global Setup
17
- # -------------------------------
18
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
- def pil_to_base64(pil_img):
20
- buff = io.BytesIO()
21
- pil_img.save(buff, format="JPEG")
22
- return base64.b64encode(buff.getvalue()).decode("utf-8")
23
-
24
- # -------------------------------
25
- # 1. CLASSIFIER MODULE (DenseNet121 via MONAI)
26
- # -------------------------------
27
- CLASS_NAMES = ['AbdomenCT', 'BreastMRI', 'Chest Xray', 'ChestCT',
28
- 'Endoscopy', 'Hand Xray', 'HeadCT', 'HeadMRI']
29
- from monai.networks.nets import DenseNet121
30
- def load_classifier_model(model_path):
31
- model = DenseNet121(
32
- spatial_dims=2,
33
- in_channels=3,
34
- out_channels=len(CLASS_NAMES)
35
- ).to(device)
36
- state_dict = torch.load(model_path, map_location=device)
37
- if isinstance(state_dict, dict) and "state_dict" in state_dict:
38
- state_dict = state_dict["state_dict"]
39
- model.load_state_dict(state_dict, strict=False)
40
- model.eval()
41
- return model
42
-
43
- def load_and_preprocess_image_classifier(image_path):
44
- image_path = image_path.strip()
45
- if image_path.lower().endswith((".jpg", ".jpeg", ".png")):
46
- image = Image.open(image_path).convert("RGB")
47
- image = np.array(image)
48
- elif image_path.lower().endswith((".nii", ".nii.gz")):
49
- import nibabel as nib
50
- image = nib.load(image_path).get_fdata()
51
- image = np.squeeze(image)
52
- if len(image.shape) == 4:
53
- image = image[..., 0]
54
- if len(image.shape) == 3:
55
- image = image[:, :, image.shape[2] // 2]
56
- if len(image.shape) == 2:
57
- image = np.stack([image]*3, axis=-1)
58
- elif image_path.lower().endswith(".dcm"):
59
- import pydicom
60
- dicom_data = pydicom.dcmread(image_path)
61
- image = dicom_data.pixel_array
62
- if len(image.shape) == 2:
63
- image = np.stack([image]*3, axis=-1)
64
- else:
65
- raise ValueError("Unsupported file format!")
66
- if len(image.shape) == 3 and image.shape[-1] == 3:
67
- image = np.transpose(image, (2, 0, 1))
68
- else:
69
- raise ValueError(f"Unexpected image shape: {image.shape}")
70
- image = torch.tensor(image, dtype=torch.float32)
71
- image = ScaleIntensity()(image)
72
- image = Resize((224,224))(image)
73
- image = image.unsqueeze(0)
74
- return image.to(device)
75
-
76
- def classify_medical_image(image_path, classifier_model):
77
- image_tensor = load_and_preprocess_image_classifier(image_path)
78
- with torch.no_grad():
79
- output = classifier_model(image_tensor)
80
- pred_class = torch.argmax(output, dim=1).item()
81
- return CLASS_NAMES[pred_class]
82
-
83
- # -------------------------------
84
- # 2. BRAIN TUMOR SEGMENTATION MODULE (UNetMulti)
85
- # -------------------------------
86
- class DoubleConvUNet(nn.Module):
87
- def __init__(self, in_channels, out_channels):
88
- super(DoubleConvUNet, self).__init__()
89
- self.conv = nn.Sequential(
90
- nn.Conv2d(in_channels, out_channels, 3, padding=1),
91
- nn.BatchNorm2d(out_channels),
92
- nn.ReLU(inplace=True),
93
- nn.Conv2d(out_channels, out_channels, 3, padding=1),
94
- nn.BatchNorm2d(out_channels),
95
- nn.ReLU(inplace=True)
96
- )
97
- def forward(self, x):
98
- return self.conv(x)
99
-
100
- class UNetMulti(nn.Module):
101
- def __init__(self, in_channels=3, out_channels=4):
102
- super(UNetMulti, self).__init__()
103
- self.down1 = DoubleConvUNet(in_channels, 64)
104
- self.pool1 = nn.MaxPool2d(2)
105
- self.down2 = DoubleConvUNet(64, 128)
106
- self.pool2 = nn.MaxPool2d(2)
107
- self.down3 = DoubleConvUNet(128, 256)
108
- self.pool3 = nn.MaxPool2d(2)
109
- self.down4 = DoubleConvUNet(256, 512)
110
- self.pool4 = nn.MaxPool2d(2)
111
- self.bottleneck = DoubleConvUNet(512, 1024)
112
- self.up4 = nn.ConvTranspose2d(1024, 512, 2, stride=2)
113
- self.conv4 = DoubleConvUNet(1024, 512)
114
- self.up3 = nn.ConvTranspose2d(512, 256, 2, stride=2)
115
- self.conv3 = DoubleConvUNet(512, 256)
116
- self.up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
117
- self.conv2 = DoubleConvUNet(256, 128)
118
- self.up1 = nn.ConvTranspose2d(128, 64, 2, stride=2)
119
- self.conv1 = DoubleConvUNet(128, 64)
120
- self.final_conv = nn.Conv2d(64, out_channels, 1)
121
- def forward(self, x):
122
- c1 = self.down1(x)
123
- p1 = self.pool1(c1)
124
- c2 = self.down2(p1)
125
- p2 = self.pool2(c2)
126
- c3 = self.down3(p2)
127
- p3 = self.pool3(c3)
128
- c4 = self.down4(p3)
129
- p4 = self.pool4(c4)
130
- bn = self.bottleneck(p4)
131
- u4 = self.up4(bn)
132
- merge4 = torch.cat([u4, c4], dim=1)
133
- c5 = self.conv4(merge4)
134
- u3 = self.up3(c5)
135
- merge3 = torch.cat([u3, c3], dim=1)
136
- c6 = self.conv3(merge3)
137
- u2 = self.up2(c6)
138
- merge2 = torch.cat([u2, c2], dim=1)
139
- c7 = self.conv2(merge2)
140
- u1 = self.up1(c7)
141
- merge1 = torch.cat([u1, c1], dim=1)
142
- c8 = self.conv1(merge1)
143
- return self.final_conv(c8)
144
-
145
- def process_brain_tumor_return(image, model_path="models/brain_tumor_unet_multiclass.pth"):
146
- logging.debug("Processing brain tumor segmentation")
147
- model = UNetMulti(in_channels=3, out_channels=4).to(device)
148
- model.load_state_dict(torch.load(model_path, map_location=device))
149
- model.eval()
150
- transform_img = transforms.Compose([
151
- transforms.Resize((256,256)),
152
- transforms.ToTensor()
153
- ])
154
- input_tensor = transform_img(image).unsqueeze(0).to(device)
155
- with torch.no_grad():
156
- output = model(input_tensor)
157
- preds = torch.argmax(output, dim=1).squeeze().cpu().numpy()
158
- image_np = transform_img(image).permute(1,2,0).cpu().numpy()
159
- overlay = cv2.applyColorMap(np.uint8(255 * preds/np.max(preds+1e-8)), cv2.COLORMAP_JET)
160
- overlay = cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB)
161
- blended = cv2.addWeighted(np.uint8(image_np*255), 0.6, overlay, 0.4, 0)
162
- orig_pil = Image.fromarray((image_np*255).astype(np.uint8))
163
- mask_pil = Image.fromarray(overlay)
164
- overlay_pil = Image.fromarray(blended)
165
- return {
166
- "original": pil_to_base64(orig_pil),
167
- "mask": pil_to_base64(mask_pil),
168
- "overlay": pil_to_base64(overlay_pil)
169
- }
170
-
171
- # -------------------------------
172
- # 3. ENDOSCOPY POLYP DETECTION MODULE (Binary UNet)
173
- # -------------------------------
174
- class UNetBinary(nn.Module):
175
- def __init__(self, in_channels=3, out_channels=1):
176
- super(UNetBinary, self).__init__()
177
- self.down1 = DoubleConvUNet(in_channels, 64)
178
- self.pool1 = nn.MaxPool2d(2)
179
- self.down2 = DoubleConvUNet(64, 128)
180
- self.pool2 = nn.MaxPool2d(2)
181
- self.down3 = DoubleConvUNet(128, 256)
182
- self.pool3 = nn.MaxPool2d(2)
183
- self.down4 = DoubleConvUNet(256, 512)
184
- self.pool4 = nn.MaxPool2d(2)
185
- self.bottleneck = DoubleConvUNet(512, 1024)
186
- self.up4 = nn.ConvTranspose2d(1024, 512, 2, stride=2)
187
- self.conv4 = DoubleConvUNet(1024, 512)
188
- self.up3 = nn.ConvTranspose2d(512, 256, 2, stride=2)
189
- self.conv3 = DoubleConvUNet(512, 256)
190
- self.up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
191
- self.conv2 = DoubleConvUNet(256, 128)
192
- self.up1 = nn.ConvTranspose2d(128, 64, 2, stride=2)
193
- self.conv1 = DoubleConvUNet(128, 64)
194
- self.final_conv = nn.Conv2d(64, out_channels, 1)
195
- def forward(self, x):
196
- c1 = self.down1(x)
197
- p1 = self.pool1(c1)
198
- c2 = self.down2(p1)
199
- p2 = self.pool2(c2)
200
- c3 = self.down3(p2)
201
- p3 = self.pool3(c3)
202
- c4 = self.down4(p3)
203
- p4 = self.pool4(c4)
204
- bn = self.bottleneck(p4)
205
- u4 = self.up4(bn)
206
- merge4 = torch.cat([u4, c4], dim=1)
207
- c5 = self.conv4(merge4)
208
- u3 = self.up3(c5)
209
- merge3 = torch.cat([u3, c3], dim=1)
210
- c6 = self.conv3(merge3)
211
- u2 = self.up2(c6)
212
- merge2 = torch.cat([u2, c2], dim=1)
213
- c7 = self.conv2(merge2)
214
- u1 = self.up1(c7)
215
- merge1 = torch.cat([u1, c1], dim=1)
216
- c8 = self.conv1(merge1)
217
- return self.final_conv(c8)
218
-
219
- def process_endoscopy_return(image, model_path="models/endoscopy_unet.pth"):
220
- model = UNetBinary(in_channels=3, out_channels=1).to(device)
221
- model.load_state_dict(torch.load(model_path, map_location=device))
222
- model.eval()
223
- transform_img = transforms.Compose([
224
- transforms.Resize((256,256)),
225
- transforms.ToTensor()
226
- ])
227
- input_tensor = transform_img(image).unsqueeze(0).to(device)
228
- with torch.no_grad():
229
- output = model(input_tensor)
230
- prob = torch.sigmoid(output)
231
- mask = (prob > 0.5).float().squeeze().cpu().numpy()
232
- image_np = transform_img(image).permute(1,2,0).cpu().numpy()
233
- overlay = cv2.applyColorMap(np.uint8(255*mask), cv2.COLORMAP_JET)
234
- overlay = cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB)
235
- blended = cv2.addWeighted(np.uint8(image_np*255), 0.6, overlay, 0.4, 0)
236
- orig_pil = Image.fromarray((image_np*255).astype(np.uint8))
237
- mask_pil = Image.fromarray(overlay)
238
- overlay_pil = Image.fromarray(blended)
239
- return {
240
- "original": pil_to_base64(orig_pil),
241
- "mask": pil_to_base64(mask_pil),
242
- "overlay": pil_to_base64(overlay_pil)
243
- }
244
-
245
- # -------------------------------
246
- # 4. PNEUMONIA DETECTION MODULE (Grad-CAM on ResNet18)
247
- # -------------------------------
248
- class GradCAM_Pneumonia:
249
- def __init__(self, model, target_layer):
250
- self.model = model
251
- self.target_layer = target_layer
252
- self.gradients = None
253
- self.activations = None
254
- self.hook_handles = []
255
- self._register_hooks()
256
- def _register_hooks(self):
257
- def forward_hook(module, input, output):
258
- self.activations = output.detach()
259
- def backward_hook(module, grad_in, grad_out):
260
- self.gradients = grad_out[0].detach()
261
- handle1 = self.target_layer.register_forward_hook(forward_hook)
262
- handle2 = self.target_layer.register_backward_hook(backward_hook)
263
- self.hook_handles.extend([handle1, handle2])
264
- def remove_hooks(self):
265
- for handle in self.hook_handles:
266
- handle.remove()
267
- def generate(self, input_image, target_class=None):
268
- output = self.model(input_image)
269
- if target_class is None:
270
- target_class = output.argmax(dim=1).item()
271
- self.model.zero_grad()
272
- one_hot = torch.zeros_like(output)
273
- one_hot[0, target_class] = 1
274
- with torch.enable_grad():
275
- output.backward(gradient=one_hot, retain_graph=True)
276
- weights = self.gradients.mean(dim=(2,3), keepdim=True)
277
- cam = (weights * self.activations).sum(dim=1, keepdim=True)
278
- cam = F.relu(cam)
279
- cam = cam.squeeze().cpu().numpy()
280
- _, _, H, W = input_image.shape
281
- cam = cv2.resize(cam, (W, H))
282
- cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam) + 1e-8)
283
- return cam, output
284
-
285
- def process_pneumonia_return(image, model_path="models/pneumonia_resnet18.pth"):
286
- model = models.resnet18(pretrained=False)
287
- num_ftrs = model.fc.in_features
288
- model.fc = nn.Linear(num_ftrs, 2) # 2 classes: normal and pneumonia
289
- model.load_state_dict(torch.load(model_path, map_location=device))
290
- model.to(device)
291
- model.eval()
292
- grad_cam = GradCAM_Pneumonia(model, model.layer4)
293
-
294
- transform_img = transforms.Compose([
295
- transforms.Resize((224,224)),
296
- transforms.ToTensor(),
297
- transforms.Normalize(mean=[0.485,0.456,0.406],
298
- std=[0.229,0.224,0.225])
299
- ])
300
- input_tensor = transform_img(image).unsqueeze(0).to(device)
301
- # Enable gradient tracking for the input tensor
302
- input_tensor.requires_grad_()
303
- # Do NOT wrap the following call with torch.no_grad()
304
- cam, output = grad_cam.generate(input_tensor)
305
- predicted_class = output.argmax(dim=1).item()
306
-
307
- label_text = "Pneumonia" if predicted_class == 1 else "Normal"
308
-
309
- def get_bounding_box(heatmap, thresh=0.5, min_area=100):
310
- heat_uint8 = np.uint8(255 * heatmap)
311
- ret, binary = cv2.threshold(heat_uint8, int(thresh*255), 255, cv2.THRESH_BINARY)
312
- contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
313
- if len(contours)==0:
314
- return None
315
- largest = max(contours, key=cv2.contourArea)
316
- if cv2.contourArea(largest) < min_area:
317
- return None
318
- x, y, w, h = cv2.boundingRect(largest)
319
- return (x, y, w, h)
320
-
321
- bbox = None
322
- if predicted_class == 1:
323
- bbox = get_bounding_box(cam, thresh=0.5, min_area=100)
324
-
325
- resized_image = image.resize((224,224))
326
- image_np = np.array(resized_image)
327
- overlay = image_np.copy()
328
- if bbox is not None:
329
- x, y, w, h = bbox
330
- cv2.rectangle(overlay, (x, y), (x+w, y+h), (255,0,0), 2)
331
- cv2.putText(overlay, label_text, (10,25), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,0),2)
332
-
333
- heatmap_color = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)
334
- heatmap_color = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB)
335
-
336
- orig_pil = Image.fromarray(image_np)
337
- heatmap_pil = Image.fromarray(heatmap_color)
338
- overlay_pil = Image.fromarray(overlay)
339
- grad_cam.remove_hooks()
340
- return {
341
- "original": pil_to_base64(orig_pil),
342
- "mask": pil_to_base64(heatmap_pil),
343
- "overlay": pil_to_base64(overlay_pil)
344
- }
345
-
346
- # -------------------------------
347
- # 5. COMPLETE PIPELINE FUNCTION
348
- # -------------------------------
349
- def complete_pipeline(image_path):
350
- classifier_model = load_classifier_model("models/best_metric_model (4).pth")
351
- predicted_modality = classify_medical_image(image_path, classifier_model)
352
- print(f"Detected modality: {predicted_modality}")
353
- original_image = Image.open(image_path).convert("RGB")
354
- results = {"predicted_modality": predicted_modality}
355
- if predicted_modality in ["HeadCT", "HeadMRI"]:
356
- results["specialized"] = process_brain_tumor_return(original_image, "models/brain_tumor_unet_multiclass.pth")
357
- elif predicted_modality == "Endoscopy":
358
- results["specialized"] = process_endoscopy_return(original_image, "models/endoscopy_unet.pth")
359
- elif predicted_modality == "Chest Xray":
360
- results["specialized"] = process_pneumonia_return(original_image, "models/pneumonia_resnet18.pth")
361
- else:
362
- results["message"] = f"No specialized processing for modality: {predicted_modality}"
363
- return results
364
-
365
- # -------------------------------
366
- # 6. FLASK API SETUP
367
- # -------------------------------
368
- from flask import Flask, request, render_template, jsonify
369
- app = Flask(__name__)
370
-
371
- @app.route('/', methods=['GET'])
372
- def index():
373
- return render_template("index.html", result=None)
374
-
375
- @app.route('/predict', methods=['POST'])
376
- def predict():
377
- if 'file' not in request.files:
378
- return render_template("index.html", result={"error": "No file part in the request."})
379
- file = request.files['file']
380
- if file.filename == '':
381
- return render_template("index.html", result={"error": "No file selected."})
382
- temp_path = "temp_input.jpg"
383
- file.save(temp_path)
384
- try:
385
- result = complete_pipeline(temp_path)
386
- except Exception as e:
387
- result = {"error": str(e)}
388
- os.remove(temp_path)
389
- return render_template("index.html", result=result)
390
-
391
- if __name__ == '__main__':
392
- app.run(host='0.0.0.0', port=5000, debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
images.jpg DELETED
Binary file (5.97 kB)
 
oligodendroglioma-banner.jpg DELETED
Binary file (64.8 kB)
 
symptom_assessment.py DELETED
@@ -1,31 +0,0 @@
1
- from sklearn.feature_extraction.text import TfidfVectorizer
2
-
3
- class SymptomAssessment:
4
- def __init__(self):
5
- # Example disease-symptom mapping dictionary.
6
- # In practice, replace this with a robust dataset.
7
- self.disease_symptoms = {
8
- "Flu": ["fever", "cough", "sore throat", "fatigue"],
9
- "Migraine": ["headache", "nausea", "sensitivity to light"],
10
- "COVID-19": ["fever", "cough", "shortness of breath", "loss of taste"]
11
- }
12
- # Prepare vector space for diseases
13
- self.vectorizer = TfidfVectorizer()
14
- self.diseases = list(self.disease_symptoms.keys())
15
- symptom_texts = [" ".join(self.disease_symptoms[d]) for d in self.diseases]
16
- self.vectors = self.vectorizer.fit_transform(symptom_texts)
17
-
18
- def assess(self, symptoms_list):
19
- """
20
- Given a list of reported symptoms, determine the best matching disease
21
- and identify which expected symptoms are missing.
22
- """
23
- input_text = " ".join(symptoms_list)
24
- input_vector = self.vectorizer.transform([input_text])
25
- similarities = (self.vectors * input_vector.T).toarray().flatten()
26
- best_match_index = similarities.argmax()
27
- best_disease = self.diseases[best_match_index]
28
- missing_symptoms = list(set(self.disease_symptoms[best_disease]) - set(symptoms_list))
29
- assessment = (f"Based on the input symptoms, {best_disease} is suspected. "
30
- f"Missing symptoms for improved diagnosis: {missing_symptoms}")
31
- return missing_symptoms, assessment