practica3 / app.py
paascorb's picture
Update app.py
4b036ae
raw
history blame
3.06 kB
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
import PIL
import torchvision.transforms as transforms
# from albumentations import (
# Compose,
# OneOf,
# ElasticTransform,
# GridDistortion,
# OpticalDistortion,
# HorizontalFlip,
# Rotate,
# Transpose,
# CLAHE,
# ShiftScaleRotate
# )
# def get_y_fn (x):
# return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))
# def ParentSplitter(x):
# return Path(x).parent.name==test_name
# class SegmentationAlbumentationsTransform(ItemTransform):
# split_idx = 0
# def __init__(self, aug):
# self.aug = aug
# def encodes(self, x):
# img,mask = x
# aug = self.aug(image=np.array(img), mask=np.array(mask))
# return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
# transforms=Compose([HorizontalFlip(p=0.5),
# Rotate(p=0.40,limit=10),GridDistortion()
# ],p=1)
# transformPipeline=SegmentationAlbumentationsTransform(transforms)
# class TargetMaskConvertTransform(ItemTransform):
# def __init__(self):
# pass
# def encodes(self, x):
# img,mask = x
# #Convert to array
# mask = np.array(mask)
# # Aqu铆 definimos cada clase en la m谩scara
# # uva:
# mask[mask==255]=1
# # hojas:
# mask[mask==150]=2
# # conductores:
# mask[mask==76]=3
# mask[mask==74]=3
# # madera:
# mask[mask==29]=4
# mask[mask==25]=4
# # Back to PILMask
# mask = PILMask.create(mask)
# return img, mask
# repo_id = "paascorb/practica3_Segmentation"
# learner = from_pretrained_fastai(repo_id)
learn = unet_learner(None,resnet18,cbs=None,metrics=[DiceMulti()]).to_fp16()
learn.load('model')
def transform_image(image):
my_transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_aux = image
return my_transforms(image_aux).unsqueeze(0).to(device)
def predict(img):
img = PIL.Image.fromarray(img, "RGB")
image = transforms.Resize((480,640))(img)
tensor = transform_image(image=image)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
learner.to(device)
with torch.no_grad():
outputs = learner(tensor)
outputs = torch.argmax(outputs,1)
mask = np.array(outputs.cpu())
mask[mask==1]=255
mask[mask==2]=150
mask[mask==3]=76
mask[mask==4]=29
mask=np.reshape(mask,(480,640))
return Image.fromarray(mask.astype('uint8'))
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=[gr.outputs.Image(type="pil", label="Predicci贸n")], examples=['color_155.jpg','color_154.jpg']).launch(share=False)