File size: 1,836 Bytes
a739b51
11a75eb
a739b51
 
 
11a75eb
 
 
a739b51
11a75eb
 
 
 
 
25cc91f
11a75eb
a739b51
 
11a75eb
 
a739b51
 
 
 
 
 
 
 
 
c12d225
 
a739b51
c12d225
 
 
 
 
 
 
 
 
 
a739b51
c12d225
a739b51
11a75eb
c12d225
a739b51
 
 
 
 
 
58659b1
11a75eb
58659b1
fd3b091
11a75eb
a739b51
11a75eb
 
5455bbe
 
11a75eb
a739b51
11a75eb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import onnx
import numpy as np
import onnxruntime as ort
from PIL import Image
import cv2
import os
import gradio as gr

os.system("wget https://s3.amazonaws.com/onnx-model-zoo/synset.txt")


with open('synset.txt', 'r') as f:
    labels = [l.rstrip() for l in f]
    
os.system("wget https://github.com/AK391/models/raw/main/vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-9.onnx")

os.system("wget https://s3.amazonaws.com/model-server/inputs/kitten.jpg")



model_path = 'inception-v2-9.onnx'
model = onnx.load(model_path)
session = ort.InferenceSession(model.SerializeToString())

def get_image(path):
    with Image.open(path) as img:
        img = np.array(img.convert('RGB'))
    return img
    


def preprocess(img):
    '''
    Preprocessing required on the images for inference with mxnet gluon
    The function takes loaded image and returns processed tensor
    '''
    img = np.array(Image.fromarray(img).resize((224, 224))).astype(np.float32)
    img[:, :, 0] -= 123.68
    img[:, :, 1] -= 116.779
    img[:, :, 2] -= 103.939
    img[:,:,[0,1,2]] = img[:,:,[2,1,0]]
    img = img.transpose((2, 0, 1))
    img = np.expand_dims(img, axis=0)

    return img


def predict(path):
    img = get_image(path)
    img = preprocess(img)
    ort_inputs = {session.get_inputs()[0].name: img}
    preds = session.run(None, ort_inputs)[0]
    preds = np.squeeze(preds)
    a = np.argsort(preds)[::-1]
    results = {}
    for i in a[0:5]:    
        results[labels[a[i]]] = float(preds[a[i]])
    return results

       

title="Inception v2"
description="Inception v2 is a deep convolutional networks for classification."

examples=[['kitten.jpg']]
gr.Interface(predict,gr.inputs.Image(type='filepath'),"label",title=title,description=description,examples=examples).launch(enable_queue=True,debug=True)