File size: 2,974 Bytes
772b566
 
86b80d6
 
772b566
86b80d6
 
 
 
 
 
 
772b566
3f105e4
eae290b
5c1a7c5
772b566
86b80d6
 
7afe095
86b80d6
 
7afe095
86b80d6
 
4763157
86b80d6
 
4763157
86b80d6
 
 
 
4763157
86b80d6
 
 
 
7afe095
86b80d6
 
 
 
 
35fd4e9
86b80d6
 
35fd4e9
86b80d6
 
 
 
 
 
 
 
 
 
 
35fd4e9
86b80d6
 
 
772b566
86b80d6
907c6f8
86b80d6
772b566
 
eeab346
3e9d2a3
 
772b566
 
 
 
 
 
 
3f105e4
eae290b
772b566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0dcb86b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.basics import *
from fastai.vision import models
from fastai.vision.all import *
from fastai.metrics import *
from fastai.data.all import *
from fastai.callback import *


from pathlib import Path
import random
import PIL
import torchvision.transforms as transformss
import torch
from albumentations import *

def get_y_fn (x):
    return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))

def ParentSplitter(x):
    return Path(x).parent.name==test_name

class SegmentationAlbumentationsTransform(ItemTransform):
    split_idx = 0
    
    def __init__(self, aug): 
        self.aug = aug
        
    def encodes(self, x):
        img,mask = x
        aug = self.aug(image=np.array(img), mask=np.array(mask))
        return PILImage.create(aug["image"]), PILMask.create(aug["mask"])

transforms=Compose([HorizontalFlip(p=0.5),
                    Rotate(p=0.40,limit=10),GridDistortion()
                    ],p=1)
transformPipeline=SegmentationAlbumentationsTransform(transforms)

class TargetMaskConvertTransform(ItemTransform):
    def __init__(self): 
        pass
    def encodes(self, x):
        img,mask = x
        
        #Convert to array
        mask = np.array(mask)
        
        # Aquí definimos cada clase en la máscara
        # uva:
        mask[mask==255]=1
        # hojas:
        mask[mask==150]=2
        # conductores:
        mask[mask==76]=3
        mask[mask==74]=3
        # madera:
        mask[mask==29]=4
        mask[mask==25]=4
        
        # Back to PILMask
        mask = PILMask.create(mask)
        return img, mask

repo_id = "paascorb/practica3_Segmentation"

learner = from_pretrained_fastai(repo_id)

def transform_image(image):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    my_transforms = transformss.Compose([transformss.ToTensor(),
                                        transformss.Normalize(
                                            [0.485, 0.456, 0.406],
                                            [0.229, 0.224, 0.225])])
    image_aux = image
    return my_transforms(image_aux).unsqueeze(0).to(device)

def predict(img):
    img = PIL.Image.fromarray(img, "RGB")
    image = transformss.Resize((480,640))(img)
    tensor = transform_image(image=image) 

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    learner.to(device)
    with torch.no_grad():
        outputs = learner(tensor)
    
    outputs = torch.argmax(outputs,1)
    mask = np.array(outputs.cpu())
    mask[mask==1]=255
    mask[mask==2]=150
    mask[mask==3]=76
    mask[mask==4]=29
    mask=np.reshape(mask,(480,640))
    return Image.fromarray(mask.astype('uint8'))
    
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=[gr.outputs.Image(type="pil", label="Predicción")], examples=['color_155.jpg','color_154.jpg']).launch(share=False)