Spaces:
Runtime error
Runtime error
File size: 3,601 Bytes
cec5e36 6d8dbab 0ba2d7d a34a00e cec5e36 0cb35c3 3d7753d 0cb35c3 0b3216f 0cb35c3 eb64ea1 df205ac eb64ea1 cec5e36 eb64ea1 cec5e36 ca8b4bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
def get_y_fn (x):
return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))
from albumentations import (
Compose,
OneOf,
ElasticTransform,
GridDistortion,
OpticalDistortion,
HorizontalFlip,
Rotate,
Transpose,
CLAHE,
ShiftScaleRotate,
RandomBrightnessContrast,
GaussNoise
)
class SegmentationAlbumentationsTransform(ItemTransform):
split_idx = 0
def __init__(self, aug):
self.aug = aug
def encodes(self, x):
img,mask = x
aug = self.aug(image=np.array(img), mask=np.array(mask))
return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
class TargetMaskConvertTransform(ItemTransform):
def __init__(self):
pass
def encodes(self, x):
img,mask = x
#Convert to array
mask = np.array(mask)
# Uvas
mask[mask==255]=1
# Hojas
mask[mask==150]=2
# Poste
mask[mask==76]=3
mask[mask==74]=3
# Madera
mask[mask==29]=4
mask[mask==25]=4
# Back to PILMask
mask = PILMask.create(mask)
return img, mask
repo_id = "ancebuc/grapes-segmentation"
learner = from_pretrained_fastai(repo_id)
aux = learner.model
aux = aux.cpu()
import torchvision.transforms as transforms
img = PILImage.create('color_158.jpg')
transformer=transforms.Compose([transforms.Resize((480,640)),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
img=transformer(img).unsqueeze(0)
img=img.cpu()
model=torch.jit.trace(aux, (img))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.cpu()
model.eval()
model.to(device)
import torchvision.transforms as transforms
def transform_image(image):
my_transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_aux = image
return my_transforms(image_aux).unsqueeze(0).to(device)
# Definimos una función que se encarga de llevar a cabo las predicciones
def predict(img):
img = PILImage.create(img)
image = transforms.Resize((480,640))(img)
tensor = transform_image(image=image)
with torch.no_grad():
outputs = model(tensor)
outputs = torch.argmax(outputs,1)
mask = np.array(outputs.cpu())
mask = np.reshape(mask,(480,640))
# Añadimos una dimesionalidad para colocar color
mask = np.expand_dims(mask, axis=2)
# Y añadimos los tres canales
mask = np.repeat(mask, 3, axis=2)
# Creamos las máscaras
uvas = np.all(mask == [1, 1, 1], axis=2)
hojas = np.all(mask == [2, 2, 2], axis=2)
poste = np.all(mask == [3, 3, 3], axis=2)
madera = np.all(mask == [4, 4, 4], axis=2)
# Uvas
mask[uvas] = [255, 255, 255]
# Hojas
mask[hojas] = [0, 255, 0]
# Poste
mask[poste] = [0, 0, 255]
# Madera
mask[madera] = [255, 0, 0]
return Image.fromarray(mask.astype('uint8'))
# Creamos la interfaz y la lanzamos.
gr.Interface(fn=predict, inputs="image", outputs="image",examples=['color_158.jpg','color_157.jpg']).launch(share=False) |