Spaces:
Sleeping
Sleeping
File size: 1,831 Bytes
acd1613 de6e097 acd1613 697f288 acd1613 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
import PIL
import torchvision.transforms as transforms
class TargetMaskConvertTransform(ItemTransform):
def __init__(self):
pass
def encodes(self, x):
img,mask = x
#Convert to array
mask = np.array(mask)
mask[mask!=255]=0
# Change 255 for 1
mask[mask==255]=1
# Back to PILMask
mask = PILMask.create(mask)
return img, mask
def transform_image(image):
my_transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_aux = image
return my_transforms(image_aux).unsqueeze(0).to(device)
repo_id = "Ignaciobfp/segmentacion-dron-marras"
learner = from_pretrained_fastai(repo_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = learner.model
model = model.cpu()
# Definimos una función que se encarga de llevar a cabo las predicciones
def predict(img):
#img = PILImage.create(img)
image = transforms.Resize((400,400))(img)
tensor = transform_image(image=image)
model.to(device)
with torch.no_grad():
outputs = model(tensor)
outputs = torch.argmax(outputs,1)
mask = np.array(outputs.cpu())
mask[mask==1]=255
mask=np.reshape(mask,(400,400))
return mask
# Creamos la interfaz y la lanzamos.
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(400, 400)), outputs="image", examples=['examples/1CA SUR_1200_800.png', 'examples/1CA SUR_4000_1200.png', 'examples/1CA SUR_4800_2000.png']).launch(share=False)
|