File size: 3,062 Bytes
772b566 907c6f8 772b566 907c6f8 7afe095 907c6f8 7afe095 907c6f8 4763157 907c6f8 4763157 907c6f8 4763157 907c6f8 7afe095 907c6f8 35fd4e9 907c6f8 35fd4e9 907c6f8 35fd4e9 907c6f8 35fd4e9 907c6f8 772b566 907c6f8 4b036ae 907c6f8 772b566 0dcb86b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
import PIL
import torchvision.transforms as transforms
# from albumentations import (
# Compose,
# OneOf,
# ElasticTransform,
# GridDistortion,
# OpticalDistortion,
# HorizontalFlip,
# Rotate,
# Transpose,
# CLAHE,
# ShiftScaleRotate
# )
# def get_y_fn (x):
# return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))
# def ParentSplitter(x):
# return Path(x).parent.name==test_name
# class SegmentationAlbumentationsTransform(ItemTransform):
# split_idx = 0
# def __init__(self, aug):
# self.aug = aug
# def encodes(self, x):
# img,mask = x
# aug = self.aug(image=np.array(img), mask=np.array(mask))
# return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
# transforms=Compose([HorizontalFlip(p=0.5),
# Rotate(p=0.40,limit=10),GridDistortion()
# ],p=1)
# transformPipeline=SegmentationAlbumentationsTransform(transforms)
# class TargetMaskConvertTransform(ItemTransform):
# def __init__(self):
# pass
# def encodes(self, x):
# img,mask = x
# #Convert to array
# mask = np.array(mask)
# # Aquí definimos cada clase en la máscara
# # uva:
# mask[mask==255]=1
# # hojas:
# mask[mask==150]=2
# # conductores:
# mask[mask==76]=3
# mask[mask==74]=3
# # madera:
# mask[mask==29]=4
# mask[mask==25]=4
# # Back to PILMask
# mask = PILMask.create(mask)
# return img, mask
# repo_id = "paascorb/practica3_Segmentation"
# learner = from_pretrained_fastai(repo_id)
learn = unet_learner(None,resnet18,cbs=None,metrics=[DiceMulti()]).to_fp16()
learn.load('model')
def transform_image(image):
my_transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_aux = image
return my_transforms(image_aux).unsqueeze(0).to(device)
def predict(img):
img = PIL.Image.fromarray(img, "RGB")
image = transforms.Resize((480,640))(img)
tensor = transform_image(image=image)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
learner.to(device)
with torch.no_grad():
outputs = learner(tensor)
outputs = torch.argmax(outputs,1)
mask = np.array(outputs.cpu())
mask[mask==1]=255
mask[mask==2]=150
mask[mask==3]=76
mask[mask==4]=29
mask=np.reshape(mask,(480,640))
return Image.fromarray(mask.astype('uint8'))
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=[gr.outputs.Image(type="pil", label="Predicción")], examples=['color_155.jpg','color_154.jpg']).launch(share=False) |