|
import mxnet as mx |
|
import matplotlib.pyplot as plt |
|
import numpy as np |
|
from collections import namedtuple |
|
from mxnet.gluon.data.vision import transforms |
|
from mxnet.contrib.onnx.onnx2mx.import_model import import_model |
|
import os |
|
import gradio as gr |
|
|
|
mx.test_utils.download('https://s3.amazonaws.com/model-server/inputs/kitten.jpg') |
|
|
|
mx.test_utils.download('https://s3.amazonaws.com/onnx-model-zoo/synset.txt') |
|
with open('synset.txt', 'r') as f: |
|
labels = [l.rstrip() for l in f] |
|
|
|
os.system("wget https://github.com/onnx/models/raw/main/vision/classification/inception_and_googlenet/googlenet/model/googlenet-3.onnx") |
|
|
|
|
|
|
|
sym, arg_params, aux_params = import_model('googlenet-3.onnx') |
|
|
|
Batch = namedtuple('Batch', ['data']) |
|
def get_image(path, show=False): |
|
img = mx.image.imread(path) |
|
if img is None: |
|
return None |
|
if show: |
|
plt.imshow(img.asnumpy()) |
|
plt.axis('off') |
|
return img |
|
|
|
def preprocess(img): |
|
transform_fn = transforms.Compose([ |
|
transforms.Resize(256), |
|
transforms.CenterCrop(224), |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) |
|
]) |
|
img = transform_fn(img) |
|
img = img.expand_dims(axis=0) |
|
return img |
|
|
|
def predict(path): |
|
img = get_image(path, show=True) |
|
img = preprocess(img) |
|
mod.forward(Batch([img])) |
|
|
|
scores = mx.ndarray.softmax(mod.get_outputs()[0]).asnumpy() |
|
|
|
scores = np.squeeze(scores) |
|
a = np.argsort(scores)[::-1] |
|
results = {} |
|
for i in a[0:5]: |
|
results[labels[i]] = float(scores[i]) |
|
return results |
|
|
|
|
|
if len(mx.test_utils.list_gpus())==0: |
|
ctx = mx.cpu() |
|
else: |
|
ctx = mx.gpu(0) |
|
|
|
mod = mx.mod.Module(symbol=sym, context=ctx, label_names=None) |
|
mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], |
|
label_shapes=mod._label_shapes) |
|
mod.set_params(arg_params, aux_params, allow_missing=True, allow_extra=True) |
|
|
|
title="MobileNet" |
|
description="MobileNet improves the state-of-the-art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. MobileNet is based on an inverted residual structure where the shortcut connections are between the thin bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. Additionally, it removes non-linearities in the narrow layers in order to maintain representational power." |
|
|
|
examples=[['catonnx.jpg']] |
|
gr.Interface(predict,gr.inputs.Image(type='filepath'),"label",title=title,description=description,examples=examples).launch(enable_queue=True) |