File size: 1,698 Bytes
adf757a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09f4eae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
from model import create_effnetb2_model
import os
import torch
from torch import nn
from typing import List, Dict, Tuple
from timeit import default_timer as timer

class_names = ['pizza', 'steak', 'sushi']
model, transforms = create_effnetb2_model(num_classes=len(class_names))

ckpt = torch.load('effnet_ckpt.tar', map_location='cpu')
model.load_state_dict(ckpt['model_state_dict'])
model.to('cpu')


def predict(img) -> Tuple[Dict, float]:
    start = timer()
    img = transforms(img)
    img = img.unsqueeze(0)
    img = img.to('cpu')
    model.to('cpu')
    model.eval()
    with torch.inference_mode():
        pred_logits = model(img)
        pred_probs = nn.Softmax(dim=1)(pred_logits).squeeze(0)
        pred_probs_dict = {class_names[i]: pred_probs[i].item() for i in range(len(class_names))}
    end = timer()
    return pred_probs_dict, round(end - start, 4)


examples_dir = 'examples'
examples = [[os.path.join(examples_dir, f)] for f in os.listdir(examples_dir)]

import gradio as gr
title = "Pizza, Steak, Sushi Classifier 🍕🥩🍣"
description = "This efficientnetb2 model classifies images of pizza, steak, and sushi."
article = "Created for practice using [Gradio](https://www.gradio.app/)"
demo = gr.Interface(fn=predict,
                    inputs=gr.Image(type="pil", label="Image of Pizza, Steak, or Sushi"),
                    outputs=[gr.Label(label="Predictions", num_top_classes=len(class_names)),
                             gr.Number(label="Prediction Time (s)")],
                    examples=examples,
                    title=title,
                    description=description,
                    article=article)
demo.launch(share=True)