mcysqrd's picture
Duplicate from muellerzr/deployment-no-fastai
f49db3f
import torch
import gradio as gr
from PIL import Image
from model import get_model, apply_weights, copy_weight
from transform import crop, pad, gpu_crop
from torchvision.transforms import Normalize, ToTensor
# Vocab
vocab = [
'Abyssinian', 'Bengal', 'Birman',
'Bombay', 'British_Shorthair',
'Egyptian_Mau', 'Maine_Coon',
'Persian', 'Ragdoll', 'Russian_Blue',
'Siamese', 'Sphynx', 'american_bulldog',
'american_pit_bull_terrier', 'basset_hound',
'beagle', 'boxer', 'chihuahua', 'english_cocker_spaniel',
'english_setter', 'german_shorthaired',
'great_pyrenees', 'havanese',
'japanese_chin', 'keeshond',
'leonberger', 'miniature_pinscher', 'newfoundland',
'pomeranian', 'pug', 'saint_bernard', 'samoyed',
'scottish_terrier', 'shiba_inu', 'staffordshire_bull_terrier',
'wheaten_terrier', 'yorkshire_terrier'
]
model = get_model()
state = torch.load('exported_model.pth', map_location="cpu")
apply_weights(model, state, copy_weight)
to_tensor = ToTensor()
norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def classify_image(inp):
inp = Image.fromarray(inp)
transformed_input = pad(crop(inp, (460, 460)), (460, 460))
transformed_input = to_tensor(transformed_input).unsqueeze(0)
transformed_input = gpu_crop(transformed_input, (224, 224))
transformed_input = norm(transformed_input)
model.eval()
with torch.no_grad():
pred = model(transformed_input)
pred = torch.argmax(pred, dim=1)
return vocab[pred]
iface = gr.Interface(
fn=classify_image,
inputs=gr.inputs.Image(),
outputs="text",
title="NO Fastai Classifier",
description="An example of not using Fastai in Gradio.",
).launch()