Laishram Pongthangamba Meitei commited on
Commit
8b37d8f
·
verified ·
1 Parent(s): f2b1dca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +265 -4
app.py CHANGED
@@ -1,7 +1,268 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from tqdm import tqdm
4
+ from monai.utils import set_determinism
5
+ from torch.cuda.amp import autocast
6
+ # from generative.inferers import DiffusionInferer
7
+ from generative.networks.nets import DiffusionModelUNet,AutoencoderKL
8
+ from generative.networks.schedulers import DDPMScheduler
9
+ from generative.networks.schedulers.ddim import DDIMScheduler
10
+ import cv2
11
+ from lib_image_processing.contrast_brightness_lib import controller
12
+ from lib_image_processing.removebg_lib import get_mask
13
+ import matplotlib.pyplot as plt
14
+ import numpy as np
15
+ set_determinism(42)
16
+ torch.cuda.empty_cache()
17
+
18
+ ## Load autoencoder
19
+
20
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
21
+
22
+ autoencoderkl = AutoencoderKL(
23
+ spatial_dims=2,
24
+ in_channels=1,
25
+ out_channels=1,
26
+ num_channels=(128, 128, 256),
27
+ latent_channels=3,
28
+ num_res_blocks=2,
29
+ attention_levels=(False, False, False),
30
+ with_encoder_nonlocal_attn=False,
31
+ with_decoder_nonlocal_attn=False,
32
+ )
33
+ root_dir = "models"
34
+ PATH_auto = f'{root_dir}/auto_encoder_model.pt'
35
+
36
+ autoencoderkl.load_state_dict(torch.load(PATH_auto))
37
+ autoencoderkl = autoencoderkl.to(device)
38
+
39
+ #### Load unet and embedings
40
+
41
+ embedding_dimension = 64
42
+ unet = DiffusionModelUNet(
43
+ spatial_dims=2,
44
+ in_channels=3,
45
+ out_channels=3,
46
+ num_res_blocks=2,
47
+ num_channels=(128, 256, 512),
48
+ attention_levels=(False, True, True),
49
+ num_head_channels=(0, 256, 512),
50
+ with_conditioning=True,
51
+ cross_attention_dim=embedding_dimension
52
+ )
53
+
54
+ embed = torch.nn.Embedding(num_embeddings=6, embedding_dim=embedding_dimension, padding_idx=0)
55
+
56
+ #### Load the Model here ##########################################################
57
+ # PATH_check_point = 'checkpoints/275.pth'
58
+ # checkpoint = torch.load(PATH_check_point)
59
+
60
+ PATH_unet_condition = f'{root_dir}/unet_latent_space_model_condition.pt'
61
+ PATH_embed_condition = f'{root_dir}/embed_latent_space_model_condition.pt'
62
+
63
+ unet.load_state_dict(torch.load(PATH_unet_condition))
64
+ embed.load_state_dict(torch.load(PATH_embed_condition))
65
+
66
+ # unet.load_state_dict(checkpoint['model_state_dict'])
67
+ # embed.load_state_dict(checkpoint['embed_state_dict'])
68
+ ####################################################################
69
+
70
+ unet.to(device)
71
+ embed.to(device)
72
+
73
+
74
+ ###---------------> Global variables for anomaly detection <------------------##
75
+
76
+ input_unhealthy = ''
77
+ output_healthy = ''
78
+
79
+ ### ------------------------> Anomaly detection <-----------------------###########
80
+
81
+ scheduler_ddims = DDIMScheduler(num_train_timesteps=1000,schedule="linear_beta", beta_start=0.0015, beta_end=0.0195)
82
+
83
+ def get_healthy(un_img): # un_img is in range 0-255 but model takes in range 0-1. conversion is needed.
84
+ global input_unhealthy
85
+ global output_healthy
86
+
87
+ img = cv2.resize(un_img,(112,112)) # resizing here
88
+ gray_image = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
89
+ input_unhealthy = gray_image.copy()
90
+ gray_image.resize(112,112,1)
91
+ img_tensor = torch.from_numpy(gray_image*1.0)
92
+ img_tensor = img_tensor.permute(2,0,1)
93
+ img_tensor /= 255.
94
+ img_tensor = img_tensor.float()
95
+ input = img_tensor.reshape((1,1,112,112))
96
+ z_mu, z_sigma = autoencoderkl.encode(input.to(device))
97
+ z = autoencoderkl.sampling(z_mu, z_sigma)
98
+
99
+ unet.eval()
100
+ guidance_scale = 3.0
101
+ total_timesteps = 1000
102
+ latent_space_depth = int(total_timesteps * 0.5)
103
+ current_img = z
104
+ current_img = current_img.float()
105
+ scheduler_ddims.set_timesteps(num_inference_steps=total_timesteps)
106
+ ## Ecodings
107
+ scheduler_ddims.clip_sample = False
108
+ class_embedding = embed(torch.zeros(1).long().to(device)).unsqueeze(1)
109
+ progress_bar = tqdm(range(30))
110
+ for i in progress_bar: # go through the noising process
111
+ t = i
112
+ with torch.no_grad():
113
+ model_output = unet(current_img, timesteps=torch.Tensor((t,)).to(current_img.device), context=class_embedding)
114
+ current_img, _ = scheduler_ddims.reversed_step(model_output, t, current_img)
115
+ progress_bar.set_postfix({"timestep input": t})
116
+
117
+ latent_img = current_img
118
+ ## Decoding
119
+ conditioning = torch.cat([torch.zeros(1).long(), torch.ones(1).long()], dim=0).to(device)
120
+ class_embedding = embed(conditioning).unsqueeze(1)
121
+
122
+ progress_bar = tqdm(range(500))
123
+ for i in progress_bar: # go through the denoising process
124
+ t = latent_space_depth - i
125
+ current_img_double = torch.cat([current_img] * 2)
126
+ with torch.no_grad():
127
+ model_output = unet(
128
+ current_img_double, timesteps=torch.Tensor([t, t]).to(current_img.device), context=class_embedding
129
+ )
130
+ noise_pred_uncond, noise_pred_text = model_output.chunk(2)
131
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
132
+ current_img, _ = scheduler_ddims.step(noise_pred, t, current_img)
133
+ progress_bar.set_postfix({"timestep input": t})
134
+ # torch.cuda.empty_cache()
135
+ current_img_decode = autoencoderkl.decode(current_img)
136
+
137
+ out_image = current_img_decode[0][0].to('cpu').detach().numpy()
138
+ out_image = 255*out_image
139
+ out_image = (out_image).astype('uint8')
140
+ output_healthy = out_image.copy()
141
+ return cv2.resize(out_image,(448,448))
142
+
143
+ ##------------------> Anomaly detection , contrast and background removal <-------------------##
144
+
145
+ def update(brightness,contrast): ##def update(brightness,contrast,thr1,thr2):
146
+ unhealthy_c = controller(input_unhealthy,brightness,contrast)
147
+ healthy_c = controller(output_healthy,brightness,contrast)
148
+ # unhealthy_remove_bg = get_mask(unhealthy_c,thr1,thr2)
149
+ # healthy_remove_bg = get_mask(healthy_c,thr1,thr2)
150
+ # diff_img = unhealthy_remove_bg - healthy_remove_bg
151
+ diff_img = unhealthy_c - healthy_c
152
+ cmap = plt.get_cmap('inferno')
153
+ diff_img_a = cmap(diff_img)
154
+ diff_img = np.delete(diff_img_a, 3, 2)
155
+ return cv2.resize(healthy_c,(448,448)),cv2.resize(diff_img,(448,448))
156
+
157
+
158
+
159
+ ### --------------> Image generation <----------------------------##############
160
+
161
+
162
+
163
+ scheduler = DDPMScheduler(num_train_timesteps=1000, schedule="linear_beta", beta_start=0.0015, beta_end=0.0195)
164
+ # scale_factor = 0.943597137928009
165
+ # inferer = LatentDiffusionInferer(scheduler, scale_factor=scale_factor)
166
+
167
+
168
+ def get_value(grad):
169
+ info_dict = {"Normal":1, "Level_1":2, "Level_2":3,"Level_3":4,"Worse":5}
170
+ return info_dict[grad]
171
+
172
+ def generate_condition_bone_images(grad=0):
173
+ grad_value = get_value(grad)
174
+ unet.eval()
175
+ scheduler.clip_sample = True
176
+ guidance_scale = 3
177
+ conditioning = torch.cat([torch.zeros(1).long(), grad_value * torch.ones(1).long()], dim=0).to(
178
+ device
179
+ ) # 2*torch.ones(1).long() is the class label for the UNHEALTHY (tumor) class
180
+ class_embedding = embed(conditioning).unsqueeze(
181
+ 1
182
+ ) # cross attention expects shape [batch size, sequence length, channels]
183
+ scheduler.set_timesteps(num_inference_steps=1000)
184
+ noise = torch.randn((1, 3, 28, 28))
185
+ noise = noise.to(device)
186
+
187
+ progress_bar = tqdm(scheduler.timesteps)
188
+ for t in progress_bar:
189
+ with autocast(enabled=True):
190
+ with torch.no_grad():
191
+ noise_input = torch.cat([noise] * 2)
192
+ model_output = unet(noise_input, timesteps=torch.Tensor((t,)).to(noise.device), context=class_embedding,)
193
+ noise_pred_uncond, noise_pred_text = model_output.chunk(2)
194
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
195
+
196
+ noise, _ = scheduler.step(noise_pred, t, noise)
197
+ with torch.no_grad():
198
+ noise = autoencoderkl.decode(noise)
199
+ img = (noise[0][0].to('cpu')).numpy()
200
+ return cv2.resize(img,(448,448))
201
+
202
+
203
+
204
+ ##--------------------------------> UI <-----------------------------##
205
+
206
+ my_theme = 'YenLai/Superhuman'
207
+
208
+
209
+ with gr.Blocks(theme=my_theme,title="Knee Predict") as demo:
210
+ gr.Markdown(""" # Knee Predict
211
+ ## Generative AI for Anomaly Detection and Analysis for Bone Diseases - Knee Osteoarthritis """ )
212
+
213
+ with gr.Tab("Generate Image on conditions"):
214
+ gr.Markdown("#### Generate Knee X-ray images with condition. You can select the level of Osteoarthritis and click on generate . Then the AI will generate Knee X-ray image of the given condition.")
215
+ with gr.Row():
216
+ output =gr.Image(height=450,width=450)
217
+ gr.Image(value="images/doc_bone.png",label="AI-Assisted Healthcare")
218
+ # output= gr.Textbox(label="Output Box")
219
+ gr.Markdown(" ### Select the level of disease severity you want to generate !!")
220
+ input = gr.Radio(["Normal", "Level_1", "Level_2","Level_3","Worse"], label="Knee Osteoarthritis Disease Severity Levels",scale=1)
221
+ with gr.Row():
222
+ greet_btn = gr.Button("Generate",size="lg",scale=1,interactive=True)
223
+ gr.Markdown()
224
+ gr.Markdown()
225
+
226
+
227
+
228
+ with gr.Tab("Anomaly Detection"):
229
+ gr.Markdown("### From a given unhealthy x-ray image generate a healthy image keeping the size and other important features")
230
+ with gr.Row():
231
+ image_input = gr.Image(height=450,width=450,label="Upload your knee x-ray here")
232
+ img_out_heal = gr.Image(height=450,width=450,label="Healthy image")
233
+ with gr.Row():
234
+ gr.Markdown()
235
+ generate_healthy_button = gr.Button("Generate",size="lg")
236
+ gr.Markdown()
237
+
238
+ gr.Markdown("""### Generate Anomaly by comparing the healthy and unhealthy Knee x-rays
239
+ #### Click the update button to update the anomaly after changing the contrast and brightness.
240
+ """)
241
+ with gr.Row():
242
+ # image_input = gr.Image()
243
+ image_output = [gr.Image(height=450,width=450,label="Contrasted"),gr.Image(height=450,width=450,label="Anomaly map")] # contrast and anomaly
244
+ with gr.Row():
245
+ gr.Markdown()
246
+ update_anomaly_button = gr.Button("Update",size="lg")
247
+ gr.Markdown()
248
+ inputs_vlaues = [gr.Slider(0, 510, value=284, label="Brightness", info="Choose between 0 and 510"),
249
+ gr.Slider(0, 254, value=234, label="Contrast", info="Choose between 0 and 254"),
250
+ # gr.Slider(0, 50, value=7, label="Canny Threshold 1", info="Choose between 0 and 50"),
251
+ # gr.Slider(0, 50, value=20, label="Canny Threshold 2", info="Choose between 0 and 50"),
252
+ ]
253
+
254
+ # inputs_vlaues.append(image_input)
255
+ gr.Examples(examples='examples' , fn=get_healthy, cache_examples=True, inputs=image_input, outputs=img_out_heal)
256
+ greet_btn.click(fn=generate_condition_bone_images, inputs=input,outputs=output, api_name="generate_bone")
257
+ generate_healthy_button.click(get_healthy,inputs=image_input,outputs=img_out_heal)
258
+ update_anomaly_button.click(update, inputs=inputs_vlaues, outputs=image_output)
259
+
260
+
261
+
262
+ if __name__ == "__main__":
263
+ demo.launch(share=True,server_name='0.0.0.0')
264
+
265
+
266
+
267
 
 
 
268