Spaces:
Runtime error
Runtime error
update app.py
Browse files
app.py
CHANGED
@@ -80,14 +80,18 @@ import torchvision
|
|
80 |
def load_sample(index):
|
81 |
#sample_index = index
|
82 |
|
83 |
-
|
84 |
-
imgs = []
|
85 |
for i in range(4):
|
86 |
-
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
imgs_label = []
|
93 |
for i in range(3):
|
@@ -97,9 +101,12 @@ def load_sample(index):
|
|
97 |
for i in range(3):
|
98 |
pil_images_label.append(torchvision.transforms.functional.to_pil_image(imgs_label[i]))
|
99 |
|
100 |
-
return [index,
|
101 |
pil_images_label[0], pil_images_label[1], pil_images_label[2]]
|
102 |
|
|
|
|
|
|
|
103 |
|
104 |
def predict(sample_index):
|
105 |
sample = torch.load(f"samples/val{sample_index-1}.pt")
|
@@ -132,10 +139,16 @@ with gr.Blocks(title="Brain tumor 3D segmentation with MONAIMNIST - ClassCat",
|
|
132 |
gr.HTML("""<h4 style="color:navy;">1. Select an example, which includes input images and label images, by clicking "Example x" button.</h4>""")
|
133 |
|
134 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
135 |
input_image0 = gr.Image(label="image channel 0", type="pil", shape=(240, 240))
|
136 |
input_image1 = gr.Image(label="image channel 1", type="pil", shape=(240, 240))
|
137 |
input_image2 = gr.Image(label="image channel 2", type="pil", shape=(240, 240))
|
138 |
input_image3 = gr.Image(label="image channel 3", type="pil", shape=(240, 240))
|
|
|
139 |
|
140 |
with gr.Row():
|
141 |
label_image0 = gr.Image(label="label channel 0", type="pil")
|
|
|
80 |
def load_sample(index):
|
81 |
#sample_index = index
|
82 |
|
83 |
+
image_filenames = []
|
|
|
84 |
for i in range(4):
|
85 |
+
image_filenames[i] = f"thumbnails/image{index-1}_{j}.png"
|
86 |
|
87 |
+
sample = torch.load(f"samples/val{index-1}.pt")
|
88 |
+
#imgs = []
|
89 |
+
#for i in range(4):
|
90 |
+
# imgs.append(sample["image"][i, :, :, 70])
|
91 |
+
|
92 |
+
#pil_images = []
|
93 |
+
#for i in range(4):
|
94 |
+
# pil_images.append(torchvision.transforms.functional.to_pil_image(imgs[i]))
|
95 |
|
96 |
imgs_label = []
|
97 |
for i in range(3):
|
|
|
101 |
for i in range(3):
|
102 |
pil_images_label.append(torchvision.transforms.functional.to_pil_image(imgs_label[i]))
|
103 |
|
104 |
+
return [index, image_filenames[0], image_filenames[1], image_filenames[2], image_filenames[3],
|
105 |
pil_images_label[0], pil_images_label[1], pil_images_label[2]]
|
106 |
|
107 |
+
#return [index, pil_images[0], pil_images[1], pil_images[2], pil_images[3],
|
108 |
+
# pil_images_label[0], pil_images_label[1], pil_images_label[2]]
|
109 |
+
|
110 |
|
111 |
def predict(sample_index):
|
112 |
sample = torch.load(f"samples/val{sample_index-1}.pt")
|
|
|
139 |
gr.HTML("""<h4 style="color:navy;">1. Select an example, which includes input images and label images, by clicking "Example x" button.</h4>""")
|
140 |
|
141 |
with gr.Row():
|
142 |
+
input_image0 = gr.Image(label="image channel 0", type="filepath", shape=(240, 240))
|
143 |
+
input_image1 = gr.Image(label="image channel 1", type="filepath", shape=(240, 240))
|
144 |
+
input_image2 = gr.Image(label="image channel 2", type="filepath", shape=(240, 240))
|
145 |
+
input_image3 = gr.Image(label="image channel 3", type="filepath", shape=(240, 240))
|
146 |
+
"""
|
147 |
input_image0 = gr.Image(label="image channel 0", type="pil", shape=(240, 240))
|
148 |
input_image1 = gr.Image(label="image channel 1", type="pil", shape=(240, 240))
|
149 |
input_image2 = gr.Image(label="image channel 2", type="pil", shape=(240, 240))
|
150 |
input_image3 = gr.Image(label="image channel 3", type="pil", shape=(240, 240))
|
151 |
+
"""
|
152 |
|
153 |
with gr.Row():
|
154 |
label_image0 = gr.Image(label="label channel 0", type="pil")
|