ClassCat commited on
Commit
38fec76
1 Parent(s): a2bc7ea

update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -3
app.py CHANGED
@@ -63,6 +63,18 @@ def load_sample3():
63
  def load_sample4():
64
  return load_sample(4)
65
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  import torchvision
67
 
68
  def load_sample(index):
@@ -90,7 +102,6 @@ def load_sample(index):
90
 
91
 
92
  def predict(sample_index):
93
- print(sample_index)
94
  sample = torch.load(f"samples/val{sample_index-1}.pt")
95
  model.eval()
96
  with torch.no_grad():
@@ -111,11 +122,12 @@ def predict(sample_index):
111
 
112
  return [pil_images_output[0], pil_images_output[1], pil_images_output[2]]
113
 
114
- with gr.Blocks(css=".gradio-container {background:lightyellow;color:red;}", title="テスト"
 
115
  ) as demo:
116
  sample_index = gr.State([])
117
 
118
- gr.HTML('<div style="font-size:12pt; text-align:center; color:yellow;">MNIST 分類器</div>')
119
 
120
  with gr.Row():
121
  input_image0 = gr.Image(label="image channel 0", type="pil", shape=(240, 240))
@@ -135,6 +147,10 @@ with gr.Blocks(css=".gradio-container {background:lightyellow;color:red;}", titl
135
  example2_btn = gr.Button("Example 2")
136
  example3_btn = gr.Button("Example 3")
137
  example4_btn = gr.Button("Example 4")
 
 
 
 
138
 
139
  example1_btn.click(fn=load_sample1, inputs=None,
140
  outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
@@ -148,6 +164,18 @@ with gr.Blocks(css=".gradio-container {background:lightyellow;color:red;}", titl
148
  example4_btn.click(fn=load_sample4, inputs=None,
149
  outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
150
  label_image0, label_image1, label_image2])
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  with gr.Row():
153
  output_image0 = gr.Image(label="output channel 0", type="pil")
 
63
  def load_sample4():
64
  return load_sample(4)
65
 
66
+ def load_sample5():
67
+ return load_sample(5)
68
+
69
+ def load_sample6():
70
+ return load_sample(6)
71
+
72
+ def load_sample7():
73
+ return load_sample(7)
74
+
75
+ def load_sample8():
76
+ return load_sample(8)
77
+
78
  import torchvision
79
 
80
  def load_sample(index):
 
102
 
103
 
104
  def predict(sample_index):
 
105
  sample = torch.load(f"samples/val{sample_index-1}.pt")
106
  model.eval()
107
  with torch.no_grad():
 
122
 
123
  return [pil_images_output[0], pil_images_output[1], pil_images_output[2]]
124
 
125
+ with gr.Blocks( title="Brain tumor 3D segmentation with MONAIMNIST - ClassCat"
126
+ css=".gradio-container {background:azure;}",
127
  ) as demo:
128
  sample_index = gr.State([])
129
 
130
+ gr.HTML("""<div style="font-family:'Times New Roman', 'Serif'; font-size:16pt; font-weight:bold; text-align:center; color:royalblue;">Brain tumor 3D segmentation with MONAI</div>""")
131
 
132
  with gr.Row():
133
  input_image0 = gr.Image(label="image channel 0", type="pil", shape=(240, 240))
 
147
  example2_btn = gr.Button("Example 2")
148
  example3_btn = gr.Button("Example 3")
149
  example4_btn = gr.Button("Example 4")
150
+ example5_btn = gr.Button("Example 5")
151
+ example6_btn = gr.Button("Example 6")
152
+ example7_btn = gr.Button("Example 7")
153
+ example8_btn = gr.Button("Example 8")
154
 
155
  example1_btn.click(fn=load_sample1, inputs=None,
156
  outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
 
164
  example4_btn.click(fn=load_sample4, inputs=None,
165
  outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
166
  label_image0, label_image1, label_image2])
167
+ example5_btn.click(fn=load_sample5, inputs=None,
168
+ outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
169
+ label_image0, label_image1, label_image2])
170
+ example6_btn.click(fn=load_sample6, inputs=None,
171
+ outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
172
+ label_image0, label_image1, label_image2])
173
+ example7_btn.click(fn=load_sample7, inputs=None,
174
+ outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
175
+ label_image0, label_image1, label_image2])
176
+ example8_btn.click(fn=load_sample8, inputs=None,
177
+ outputs=[sample_index, input_image0, input_image1, input_image2, input_image3,
178
+ label_image0, label_image1, label_image2])
179
 
180
  with gr.Row():
181
  output_image0 = gr.Image(label="output channel 0", type="pil")