Spaces:
Runtime error
Runtime error
from huggingface_hub import from_pretrained_fastai | |
import gradio as gr | |
from fastai.vision.all import * | |
import PIL | |
import torchvision.transforms as transforms | |
#repo_id = "Ignaciobfp/segmentacion-dron-marras" | |
#learner = from_pretrained_fastai(repo_id) | |
device = torch.device("cpu") | |
#model = learner.model | |
model = torch.jit.load("pr3.pth") | |
model = model.cpu() | |
def transform_image(image): | |
my_transforms = transforms.Compose([transforms.ToTensor(), | |
transforms.Normalize( | |
[0.485, 0.456, 0.406], | |
[0.229, 0.224, 0.225])]) | |
image_aux = image | |
return my_transforms(image_aux).unsqueeze(0).to(device) | |
# Definimos una función que se encarga de llevar a cabo las predicciones | |
def predict(img): | |
img_pil = PIL.Image.fromarray(img, 'RGB') | |
image = transforms.Resize((400,400))(img_pil) | |
tensor = transform_image(image=image) | |
model.to(device) | |
with torch.no_grad(): | |
outputs = model(tensor) | |
outputs = torch.argmax(outputs,1) | |
mask = np.array(outputs.cpu()) | |
mask[mask==1]=255 | |
mask=np.reshape(mask,(400,400)) | |
return Image.fromarray(mask.astype('uint8')) | |
# Creamos la interfaz y la lanzamos. | |
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(400, 400)), outputs=gr.outputs.Image(type="pil"), | |
examples=['examplesB/color_180.jpg', 'examplesB/color_179.jpg', 'examplesB/color_156.jpg', 'examplesB/color_155.jpg', 'examplesB/color_154.jpg']).launch(share=False) | |