Update app.py
Browse files
app.py
CHANGED
@@ -3,69 +3,72 @@ import gradio as gr
|
|
3 |
from fastai.vision.all import *
|
4 |
import PIL
|
5 |
import torchvision.transforms as transforms
|
6 |
-
from albumentations import (
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
)
|
18 |
|
19 |
-
def get_y_fn (x):
|
20 |
-
|
21 |
|
22 |
-
def ParentSplitter(x):
|
23 |
-
|
24 |
|
25 |
-
class SegmentationAlbumentationsTransform(ItemTransform):
|
26 |
-
|
27 |
|
28 |
-
|
29 |
-
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
|
36 |
-
transforms=Compose([HorizontalFlip(p=0.5),
|
37 |
-
|
38 |
-
|
39 |
-
transformPipeline=SegmentationAlbumentationsTransform(transforms)
|
40 |
|
41 |
-
class TargetMaskConvertTransform(ItemTransform):
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
|
47 |
-
|
48 |
-
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
|
66 |
-
repo_id = "paascorb/practica3_Segmentation"
|
67 |
|
68 |
-
learner = from_pretrained_fastai(repo_id)
|
|
|
|
|
|
|
69 |
|
70 |
def transform_image(image):
|
71 |
my_transforms = transforms.Compose([transforms.ToTensor(),
|
|
|
3 |
from fastai.vision.all import *
|
4 |
import PIL
|
5 |
import torchvision.transforms as transforms
|
6 |
+
# from albumentations import (
|
7 |
+
# Compose,
|
8 |
+
# OneOf,
|
9 |
+
# ElasticTransform,
|
10 |
+
# GridDistortion,
|
11 |
+
# OpticalDistortion,
|
12 |
+
# HorizontalFlip,
|
13 |
+
# Rotate,
|
14 |
+
# Transpose,
|
15 |
+
# CLAHE,
|
16 |
+
# ShiftScaleRotate
|
17 |
+
# )
|
18 |
|
19 |
+
# def get_y_fn (x):
|
20 |
+
# return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png"))
|
21 |
|
22 |
+
# def ParentSplitter(x):
|
23 |
+
# return Path(x).parent.name==test_name
|
24 |
|
25 |
+
# class SegmentationAlbumentationsTransform(ItemTransform):
|
26 |
+
# split_idx = 0
|
27 |
|
28 |
+
# def __init__(self, aug):
|
29 |
+
# self.aug = aug
|
30 |
|
31 |
+
# def encodes(self, x):
|
32 |
+
# img,mask = x
|
33 |
+
# aug = self.aug(image=np.array(img), mask=np.array(mask))
|
34 |
+
# return PILImage.create(aug["image"]), PILMask.create(aug["mask"])
|
35 |
|
36 |
+
# transforms=Compose([HorizontalFlip(p=0.5),
|
37 |
+
# Rotate(p=0.40,limit=10),GridDistortion()
|
38 |
+
# ],p=1)
|
39 |
+
# transformPipeline=SegmentationAlbumentationsTransform(transforms)
|
40 |
|
41 |
+
# class TargetMaskConvertTransform(ItemTransform):
|
42 |
+
# def __init__(self):
|
43 |
+
# pass
|
44 |
+
# def encodes(self, x):
|
45 |
+
# img,mask = x
|
46 |
|
47 |
+
# #Convert to array
|
48 |
+
# mask = np.array(mask)
|
49 |
|
50 |
+
# # Aquí definimos cada clase en la máscara
|
51 |
+
# # uva:
|
52 |
+
# mask[mask==255]=1
|
53 |
+
# # hojas:
|
54 |
+
# mask[mask==150]=2
|
55 |
+
# # conductores:
|
56 |
+
# mask[mask==76]=3
|
57 |
+
# mask[mask==74]=3
|
58 |
+
# # madera:
|
59 |
+
# mask[mask==29]=4
|
60 |
+
# mask[mask==25]=4
|
61 |
|
62 |
+
# # Back to PILMask
|
63 |
+
# mask = PILMask.create(mask)
|
64 |
+
# return img, mask
|
65 |
|
66 |
+
# repo_id = "paascorb/practica3_Segmentation"
|
67 |
|
68 |
+
# learner = from_pretrained_fastai(repo_id)
|
69 |
+
|
70 |
+
learn = unet_learner(trainDLS,resnet18,cbs=callbacks,metrics=[DiceMulti()]).to_fp16()
|
71 |
+
learn.load('model')
|
72 |
|
73 |
def transform_image(image):
|
74 |
my_transforms = transforms.Compose([transforms.ToTensor(),
|