Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -343,141 +343,6 @@ def decoder(inputs, input_tensor):
|
|
343 |
|
344 |
|
345 |
|
346 |
-
|
347 |
-
def autoencoder(n_classes=2, height=size, width=size, channels=3):
|
348 |
-
inputs = Input((height, width, channels))
|
349 |
-
#Contraction path
|
350 |
-
conv_1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
|
351 |
-
conv_1 = BatchNormalization()(conv_1)
|
352 |
-
conv_1 = Dropout(0.2)(conv_1)
|
353 |
-
conv_1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv_1)
|
354 |
-
conv_1 = BatchNormalization()(conv_1)
|
355 |
-
pool_1 = MaxPooling2D((2, 2))(conv_1)
|
356 |
-
|
357 |
-
conv_2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool_1)
|
358 |
-
conv_2 = BatchNormalization()(conv_2)
|
359 |
-
conv_2 = Dropout(0.2)(conv_2)
|
360 |
-
conv_2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_2)
|
361 |
-
conv_2 = BatchNormalization()(conv_2)
|
362 |
-
pool_2 = MaxPooling2D((2, 2))(conv_2)
|
363 |
-
|
364 |
-
conv_3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool_2)
|
365 |
-
conv_3 = BatchNormalization()(conv_3)
|
366 |
-
conv_3 = Dropout(0.2)(conv_3)
|
367 |
-
conv_3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_3)
|
368 |
-
conv_3 = BatchNormalization()(conv_3)
|
369 |
-
pool_3 = MaxPooling2D((2, 2))(conv_3)
|
370 |
-
|
371 |
-
conv_4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool_3)
|
372 |
-
conv_4 = BatchNormalization()(conv_4)
|
373 |
-
conv_4 = Dropout(0.2)(conv_4)
|
374 |
-
conv_4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_4)
|
375 |
-
conv_4 = BatchNormalization()(conv_4)
|
376 |
-
pool_4 = MaxPooling2D(pool_size=(2, 2))(conv_4)
|
377 |
-
|
378 |
-
|
379 |
-
#conv_5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool_4)
|
380 |
-
#conv_5 = BatchNormalization()(conv_5)
|
381 |
-
conv_5 = Dropout(0.1)(pool_4)
|
382 |
-
|
383 |
-
#Expansive path
|
384 |
-
|
385 |
-
u6 = UpSampling2D((2, 2))(conv_5)
|
386 |
-
#u6 = concatenate([att_5, u6])
|
387 |
-
conv_6 = Conv2D(256, (3, 3), activation='relu', padding='same')(u6)
|
388 |
-
conv_6 = BatchNormalization()(conv_6)
|
389 |
-
conv_6 = Dropout(0.2)(conv_6)
|
390 |
-
#conv_6 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv_6)
|
391 |
-
#conv_6 = Dropout(0.2)(conv_6)
|
392 |
-
#conv_6 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv_6)
|
393 |
-
#conv_6 = BatchNormalization()(conv_6)
|
394 |
-
#conv_6 = Dropout(0.2)(conv_6)
|
395 |
-
conv_6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_6)
|
396 |
-
conv_6 = BatchNormalization()(conv_6)
|
397 |
-
|
398 |
-
"""
|
399 |
-
u66 = UpSampling2D((2, 2))(conv_6)
|
400 |
-
conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(u66)
|
401 |
-
conv_66 = BatchNormalization()(conv_66)
|
402 |
-
conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(u66)
|
403 |
-
conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(u66)
|
404 |
-
conv_66 = BatchNormalization()(conv_66)
|
405 |
-
conv_66 = Dropout(0.2)(conv_66)
|
406 |
-
conv_66 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_66)
|
407 |
-
"""
|
408 |
-
|
409 |
-
u7 = UpSampling2D((2, 2))(conv_6)
|
410 |
-
conv_7 = Conv2D(128, (3, 3), activation='relu', padding='same')(u7)
|
411 |
-
conv_7 = BatchNormalization()(conv_7)
|
412 |
-
conv_7 = Dropout(0.2)(conv_7)
|
413 |
-
#conv_7 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_7)
|
414 |
-
#conv_7 = Dropout(0.1)(conv_7)
|
415 |
-
#conv_7 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv_7)
|
416 |
-
#conv_7 = BatchNormalization()(conv_7)
|
417 |
-
#conv_7 = Dropout(0.1)(conv_7)
|
418 |
-
conv_7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_7)
|
419 |
-
conv_7 = BatchNormalization()(conv_7)
|
420 |
-
|
421 |
-
u8 = UpSampling2D((2, 2))(conv_7)
|
422 |
-
conv_8 = Conv2D(64, (3, 3), activation='relu', padding='same')(u8)
|
423 |
-
conv_8 = BatchNormalization()(conv_8)
|
424 |
-
conv_8 = Dropout(0.2)(conv_8)
|
425 |
-
#conv_8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_8)
|
426 |
-
#conv_8 = Dropout(0.2)(conv_8)
|
427 |
-
#conv_8 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv_8)
|
428 |
-
#conv_8 = BatchNormalization()(conv_8)
|
429 |
-
#conv_8 = Dropout(0.2)(conv_8)
|
430 |
-
conv_8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_8)
|
431 |
-
conv_8 = BatchNormalization()(conv_8)
|
432 |
-
|
433 |
-
u9 = UpSampling2D((2, 2))(conv_8)
|
434 |
-
conv_9 = Conv2D(32, (3, 3), activation='relu', padding='same')(u9)
|
435 |
-
conv_9 = BatchNormalization()(conv_9)
|
436 |
-
conv_9 = Dropout(0.2)(conv_9)
|
437 |
-
#conv_9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_9)
|
438 |
-
#conv_9 = Dropout(0.1)(conv_9)
|
439 |
-
#conv_9 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv_9)
|
440 |
-
#conv_9 = BatchNormalization()(conv_9)
|
441 |
-
#conv_9 = Dropout(0.1)(conv_9)
|
442 |
-
conv_9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv_9)
|
443 |
-
conv_9 = BatchNormalization()(conv_9)
|
444 |
-
|
445 |
-
outputs = Conv2D(n_classes, (1, 1), activation='softmax')(conv_9)
|
446 |
-
|
447 |
-
model = Model(inputs=[inputs], outputs=[outputs])
|
448 |
-
return model
|
449 |
-
|
450 |
-
"""
|
451 |
-
gating_16 = gating_signal(stage_5, 8*FILTER_NUM, True)
|
452 |
-
att_16 = attention_block(stage_4, stage_5, 8*FILTER_NUM)
|
453 |
-
up_stage_1 = upsample(stage_5,stage_4)
|
454 |
-
up_16 = layers.concatenate([up_stage_1, att_16], axis=axis)
|
455 |
-
|
456 |
-
|
457 |
-
gating_32 = gating_signal(up_repeat_elem1, 4*FILTER_NUM, True)
|
458 |
-
att_32 = attention_block(stage_3, gating_32, 4*FILTER_NUM)
|
459 |
-
up_stage_2 = upsample(up_repeat_elem1,stage_3)
|
460 |
-
up_32 = layers.concatenate([up_stage_2, att_32], axis=axis)
|
461 |
-
|
462 |
-
|
463 |
-
gating_64 = gating_signal(up_repeat_elem2, 2*FILTER_NUM, True)
|
464 |
-
att_64 = attention_block(stage_2, gating_64, 2*FILTER_NUM)
|
465 |
-
up_stage_3 = upsample(up_repeat_elem2,stage_2)
|
466 |
-
up_64 = layers.concatenate([up_stage_3, att_64], axis=axis)
|
467 |
-
|
468 |
-
|
469 |
-
gating_128 = gating_signal(up_repeat_elem3, FILTER_NUM, True)
|
470 |
-
att_128 = attention_block(stage_1, gating_128, FILTER_NUM)
|
471 |
-
up_stage_4 = upsample(up_repeat_elem3,stage_1)
|
472 |
-
up_128 = layers.concatenate([up_stage_4, att_128], axis=axis)
|
473 |
-
|
474 |
-
|
475 |
-
gating_256 = gating_signal(up_repeat_elem4, FILTER_NUM, True)
|
476 |
-
att_256 = attention_block(conv_1, gating_256, FILTER_NUM)
|
477 |
-
up_stage_5 = upsample(up_repeat_elem4,conv_1)
|
478 |
-
up_256 = layers.concatenate([up_stage_5, att_256], axis=axis)
|
479 |
-
"""
|
480 |
-
|
481 |
def unet_2( n_classes=2, height=size, width=size, channels=3, metrics = ['accuracy']):
|
482 |
inputs = Input((height, width, channels))
|
483 |
|
|
|
343 |
|
344 |
|
345 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
def unet_2( n_classes=2, height=size, width=size, channels=3, metrics = ['accuracy']):
|
347 |
inputs = Input((height, width, channels))
|
348 |
|