TharunSiva commited on
Commit
2865872
Β·
verified Β·
1 Parent(s): 8d79a3b

util files

Browse files
Files changed (3) hide show
  1. app.py +302 -0
  2. eff_b3.py +24 -0
  3. efficientnetb3.h5 +3 -0
app.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
+ import matplotlib.pyplot as plt
5
+
6
+ import tensorflow as tf
7
+ import tensorflow.keras.backend as K
8
+ from keras.preprocessing import image
9
+
10
+ from ResUNet import *
11
+
12
+ from eff import *
13
+ from vit import *
14
+ from eff_b3 import *
15
+
16
+ # Define the image transformation
17
+ transform = transforms.Compose([
18
+ transforms.ToTensor(),
19
+ transforms.Resize((224, 224)),
20
+ ])
21
+
22
+ examples1 = [
23
+ ["examples/Classification/0.jpg", "EfficientNet-B3"],
24
+ ["examples/Classification/3.jpg", "EfficientNet-B3"],
25
+ ["examples/Classification/1.jpg", "EfficientNet-V2"],
26
+ ["examples/Classification/4.jpg", "EfficientNet-V2"],
27
+ ["examples/Classification/2.jpg", "ViT"],
28
+ ["examples/Classification/5.jpg", "ViT"],
29
+
30
+ # f"examples/Classification/{i}.jpg" for i in range(6)
31
+ ]
32
+
33
+ # def classification(image):
34
+ # input_tensor = transform(image).unsqueeze(0).to(CFG.DEVICE) # Add batch dimension
35
+
36
+ # input_batch = input_tensor
37
+
38
+ # # Perform inference
39
+ # with torch.no_grad():
40
+ # output1 = efficientnet_model(input_batch).to(CFG.DEVICE)
41
+ # output2 = vit_model(input_batch).to(CFG.DEVICE)
42
+
43
+ # b3_img = cv2.resize(image, (256, 256))
44
+ # b3_img = np.reshape(b3_img, (1, 256, 256, 3))
45
+
46
+
47
+ # output3 = b3_model.predict(b3_img)
48
+
49
+ # # You can now use the 'output' tensor as needed (e.g., get predictions)
50
+ # # print(output)
51
+ # res1 = torch.softmax(output1, dim=1)
52
+ # res2 = torch.softmax(output2, dim=1)
53
+ # res3 = tf.nn.softmax(output3)
54
+
55
+ # probs1 = {class_names[i]: float(res1[0][i]) for i in range(len(class_names))}
56
+ # probs2 = {class_names[i]: float(res2[0][i]) for i in range(len(class_names))}
57
+ # probs3 = {class_names[i]: float(res3[0][i]) for i in range(len(class_names))}
58
+
59
+ # return probs3, probs2, probs1
60
+
61
+
62
+ # def classification(image, model="EfficientNet-B3"):
63
+ # input_tensor = transform(image).unsqueeze(0).to(CFG.DEVICE) # Add batch dimension
64
+ # input_batch = input_tensor
65
+
66
+ # if(model == "EfficientNet-B3"):
67
+ # b3_img = cv2.resize(image, (256, 256))
68
+ # b3_img = np.reshape(b3_img, (1, 256, 256, 3))
69
+ # output3 = b3_model.predict(b3_img)
70
+ # res3 = tf.nn.softmax(output3)
71
+ # probs3 = {class_names[i]: float(res3[0][i]) for i in range(len(class_names))}
72
+
73
+ # return probs3
74
+
75
+ # elif(model == "EfficientNet-V2"):
76
+ # with torch.no_grad():
77
+ # output1 = efficientnet_model(input_batch).to(CFG.DEVICE)
78
+
79
+ # res1 = torch.softmax(output1, dim=1)
80
+ # probs1 = {class_names[i]: float(res1[0][i]) for i in range(len(class_names))}
81
+
82
+ # return probs1
83
+
84
+ # else:
85
+ # with torch.no_grad():
86
+ # output2 = vit_model(input_batch).to(CFG.DEVICE)
87
+
88
+ # res2 = torch.softmax(output2, dim=1)
89
+ # probs2 = {class_names[i]: float(res2[0][i]) for i in range(len(class_names))}
90
+
91
+ # return probs2
92
+
93
+
94
+ def classification(image, model="EfficientNet-B3"):
95
+ input_tensor = transform(image).unsqueeze(0).to(CFG.DEVICE) # Add batch dimension
96
+
97
+ input_batch = input_tensor
98
+
99
+ if(model=="ViT"):
100
+
101
+ with torch.no_grad():
102
+ output = vit_model(input_batch).to(CFG.DEVICE)
103
+ res = torch.softmax(output, dim=1)
104
+ vit_probs = {class_names[i]: float(res[0][i]) for i in range(len(class_names))}
105
+
106
+ return vit_probs
107
+
108
+
109
+ elif(model=="EfficientNet-V2"):
110
+
111
+ with torch.no_grad():
112
+ output = efficientnet_model(input_batch).to(CFG.DEVICE)
113
+ res = torch.softmax(output, dim=1)
114
+ v2_probs = {class_names[i]: float(res[0][i]) for i in range(len(class_names))}
115
+
116
+ return v2_probs
117
+
118
+
119
+ else:
120
+
121
+ b3_img = cv2.resize(image, (256, 256))
122
+ b3_img = np.reshape(b3_img, (1, 256, 256, 3))
123
+ output3 = b3_model.predict(b3_img)
124
+ res3 = tf.nn.softmax(output3)
125
+ b3_probs = {class_names[i]: float(res3[0][i]) for i in range(len(class_names))}
126
+
127
+ return b3_probs
128
+
129
+
130
+ classify = gr.Interface(
131
+ fn=classification,
132
+ inputs=[
133
+ gr.Image(label="Image"),
134
+ gr.Radio(["EfficientNet-B3", "EfficientNet-V2", "ViT"], value="EfficientNet-B3")
135
+ ],
136
+ outputs=[
137
+ gr.Label(num_top_classes = 3, label = "Result"),
138
+ # gr.Label(num_top_classes = 3, label = "EfficientNet-V2"),
139
+ # gr.Label(num_top_classes = 3, label = "ViT"),
140
+ ],
141
+ examples=examples1,
142
+ cache_examples=True
143
+ )
144
+
145
+ # ---------------------------------------------------------
146
+
147
+ seg_model = load_model()
148
+ seg_model.load_weights("ResUNet-segModel-weights.hdf5")
149
+
150
+
151
+ examples2 = [
152
+ f"examples/ResUNet/{i}.jpg" for i in range(5)
153
+ ]
154
+
155
+ def detection(img):
156
+ org_img = img
157
+
158
+ img = img *1./255.
159
+
160
+ #reshaping
161
+ img = cv2.resize(img, (256,256))
162
+
163
+ # converting img into array
164
+ img = np.array(img, dtype=np.float64)
165
+
166
+ #reshaping the image from 256,256,3 to 1,256,256,3
167
+ img = np.reshape(img, (1,256,256,3))
168
+
169
+
170
+ #Creating a empty array of shape 1,256,256,1
171
+ X = np.empty((1,256,256,3))
172
+
173
+ # standardising the image
174
+ img -= img.mean()
175
+ img /= img.std()
176
+
177
+ #converting the shape of image from 256,256,3 to 1,256,256,3
178
+ X[0,] = img
179
+
180
+ #make prediction of mask
181
+ predict = seg_model.predict(X)
182
+
183
+
184
+ pred = np.array(predict[0]).squeeze().round()
185
+
186
+
187
+ img_ = cv2.resize(org_img, (256,256))
188
+ img_ = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)
189
+ img_[pred==1] = (0,255,150)
190
+
191
+ plt.imshow(img_)
192
+ plt.axis("off")
193
+ image_path = "plot.png"
194
+ plt.savefig(image_path)
195
+
196
+ return gr.update(value=image_path, visible=True)
197
+
198
+
199
+ detect = gr.Interface(
200
+ fn=detection,
201
+ inputs=[
202
+ gr.Image(label="Image")
203
+ ],
204
+ outputs=[
205
+ gr.Image(label="Output")
206
+ ],
207
+ examples=examples2,
208
+ cache_examples=True
209
+ )
210
+
211
+ # ##########################################
212
+
213
+ def data_viewer(label="Pituitary", count=10):
214
+ results = []
215
+
216
+ if(label == "Segmentation"):
217
+ for i in range((count//2)+1):
218
+ results.append(f"Images/{label}/original_image_{i}.png")
219
+ results.append(f"Images/{label}/image_with_mask_{i}.png")
220
+
221
+ else:
222
+
223
+ for i in range(count):
224
+ results.append(f"Images/{label}/{i}.jpg")
225
+
226
+ return results
227
+
228
+
229
+ view_data = gr.Interface(
230
+ fn = data_viewer,
231
+ inputs = [
232
+ gr.Dropdown(
233
+ ["Glioma", "Meningioma", "Pituitary", "Segmentation"], label="Category"
234
+ ),
235
+ gr.Slider(0, 12, value=4, step=2)
236
+ ],
237
+ outputs = [
238
+ gr.Gallery(columns=2),
239
+ ]
240
+ )
241
+
242
+ # ##########################
243
+
244
+ from huggingface_hub import InferenceClient
245
+
246
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
247
+
248
+ def format_prompt(message, history):
249
+ prompt = "<s>"
250
+ for user_prompt, bot_response in history:
251
+ prompt += f"[INST] {user_prompt} [/INST]"
252
+ prompt += f" {bot_response}</s> "
253
+ prompt += f"[INST] {message} [/INST]"
254
+ return prompt
255
+
256
+ def generate(
257
+ prompt, history, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,
258
+ ):
259
+ temperature = float(temperature)
260
+ if temperature < 1e-2:
261
+ temperature = 1e-2
262
+ top_p = float(top_p)
263
+
264
+ generate_kwargs = dict(
265
+ temperature=temperature,
266
+ max_new_tokens=max_new_tokens,
267
+ top_p=top_p,
268
+ repetition_penalty=repetition_penalty,
269
+ do_sample=True,
270
+ seed=42,
271
+ )
272
+
273
+ formatted_prompt = format_prompt(prompt, history)
274
+
275
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
276
+ output = ""
277
+
278
+ for response in stream:
279
+ output += response.token.text
280
+ yield output
281
+ return output
282
+
283
+
284
+ mychatbot = gr.Chatbot(
285
+ avatar_images=["Chatbot/user.png", "Chatbot/botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
286
+
287
+ chatbot = gr.ChatInterface(
288
+ fn=generate,
289
+ chatbot=mychatbot,
290
+ examples=[
291
+ "What is Brain Tumor and its types?",
292
+ "What is a tumor's grade? What does this mean?",
293
+ "What are some of the treatment options for Brain Tumor?",
294
+ "What causes brain tumors?",
295
+ "If I have a brain tumor, can I pass it on to my children?"
296
+ ],
297
+ )
298
+
299
+
300
+ demo = gr.TabbedInterface([classify, detect, view_data, chatbot], ["Classification", "Detection", "Visualization", "ChatBot"])
301
+
302
+ demo.launch()
eff_b3.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # B3 ------------
2
+
3
+ import tensorflow as tf
4
+ from tensorflow.keras.models import Sequential
5
+ from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, Dropout, BatchNormalization
6
+ from tensorflow.keras import regularizers
7
+
8
+ # Create Model Structure
9
+ cnn_img_size = (256, 256)
10
+ channels = 3
11
+ img_shape = (cnn_img_size[0], cnn_img_size[1], channels)
12
+
13
+ base_model = tf.keras.applications.efficientnet.EfficientNetB3(include_top= False, weights= "imagenet", input_shape= img_shape, pooling= 'max')
14
+
15
+ b3_model = Sequential([
16
+ base_model,
17
+ BatchNormalization(axis= -1, momentum= 0.99, epsilon= 0.001),
18
+ Dense(256, kernel_regularizer= regularizers.l2(0.016), activity_regularizer= regularizers.l1(0.006),
19
+ bias_regularizer= regularizers.l1(0.006), activation= 'relu'),
20
+ Dropout(rate= 0.45, seed= 123),
21
+ Dense(4, activation= 'softmax')
22
+ ])
23
+
24
+ b3_model.load_weights("efficientnetb3.h5")
efficientnetb3.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29cd365f53a18ebfab11ea708b64fbe168bfc59b2e525b0394d2e5f282d157a5
3
+ size 134950368