File size: 3,134 Bytes
1899d85
8c16ebc
d905150
 
4e2e389
d905150
 
1899d85
d905150
8c16ebc
c0ca22c
88d04c7
d905150
 
 
 
 
 
7aeba2e
 
 
 
 
 
d905150
7aeba2e
d905150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88d04c7
d905150
b9b64f9
480e7d1
d905150
 
7e182e3
8a7486c
3821d52
 
 
 
d905150
 
 
 
 
7b635eb
56df16f
 
a4c8a40
 
 
14f2a96
56df16f
d905150
 
 
 
 
 
0ed3925
2e123b3
d905150
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import os, io
import cv2
import gradio as gr
import tensorflow as tf
import urllib.request
import numpy as np
import keras.backend as K
from transformers import pipeline

from PIL import Image
from matplotlib import cm
from tensorflow import keras


resized_shape = (768, 768, 3)
IMG_SCALING = (1, 1)


# # Download the model file
# def download_model():
#     url = "https://drive.google.com/uc?id=1FhICkeGn6GcNXWTDn1s83ctC-6Mo1UXk"
#     output = "seg_unet_model.h5"
#     gdown.download(url, output, quiet=False)
#     return output

model_file = "./seg_unet_model.h5"

#Custom objects for model

def Combo_loss(y_true, y_pred, eps=1e-9, smooth=1):
    targets = tf.dtypes.cast(K.flatten(y_true), tf.float32)
    inputs = tf.dtypes.cast(K.flatten(y_pred), tf.float32)
    intersection = K.sum(targets * inputs)
    dice = (2. * intersection + smooth) / (K.sum(targets) + K.sum(inputs) + smooth)
    inputs = K.clip(inputs, eps, 1.0 - eps)
    out = - (ALPHA * ((targets * K.log(inputs)) + ((1 - ALPHA) * (1.0 - targets) * K.log(1.0 - inputs))))
    weighted_ce = K.mean(out, axis=-1)
    combo = (CE_RATIO * weighted_ce) - ((1 - CE_RATIO) * dice)
    return combo

def dice_coef(y_true, y_pred, smooth=1):
    y_pred = tf.dtypes.cast(y_pred, tf.int32)
    y_true = tf.dtypes.cast(y_true, tf.int32)
    intersection = K.sum(y_true * y_pred, axis=[1,2,3])                     
    union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3])           
    return K.mean((2 * intersection + smooth) / (union + smooth), axis=0)

# Load the model
seg_model = keras.models.load_model('seg_unet_model.h5', custom_objects={'Combo_loss': Combo_loss, 'dice_coef': dice_coef})

inputs = gr.inputs.Image(type="pil", label="Upload an image")
image_output = gr.outputs.Image(type="pil", label="Output Image")
# outputs = gr.outputs.HTML() #uncomment for single class output 


def gen_pred(img, model=seg_model):
    # pil_image = img.convert('RGB')
    # open_cv_image = np.array(pil_image)
    # img = open_cv_image[:, :, ::-1].copy() 
    # # img = cv2.imread("./003e2c95d.jpg")
    img = img[::IMG_SCALING[0], ::IMG_SCALING[1]]
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = img/255
    img = tf.expand_dims(img, axis=0)
    pred = model.predict(img)
    pred = np.squeeze(pred, axis=0)
    pil_img = Image.fromarray(pred, 'RGB')
    # im = Image.fromarray((pred * 255).astype(np.uint8))
    # img_bytes = pred.tobytes()
    # nparr = np.frombuffer(img_bytes, np.byte)
    # pred_pil = cv2.imdecode(nparr, cv2.IMREAD_REDUCED_COLOR_8)
    # return "UI in developing process ..."
    return pil_img

title = "<h1 style='text-align: center;'>Semantic Segmentation</h1>"
description = "Upload an image and get prediction mask"
# css_code='body{background-image:url("file=wave.mp4");}'

gr.Interface(fn=gen_pred, 
             inputs=[gr.Image(type='numpy')], 
             outputs=gr.Image(type='numpy'), 
             title=title, 
             examples=[["003e2c95d.jpg"], ["003b50a15.jpg"], ["003b48a9e.jpg"], ["0038cbe45.jpg"], ["00371aa92.jpg"]],
             description=description,
            enable_queue=True).launch()