File size: 1,033 Bytes
8424a62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import torch
import gradio as gr
from typing import Tuple, Dict
from torchvision import models
import torch.nn as nn

from model import get_transforms, create_effnetb2_model

model = create_effnetb2_model(num_classes=3) 
model.eval()

cns = ['negative', 'neutral', 'positive']

def predict(img) -> Tuple[Dict, float]:
    transform = get_transforms()
    img = transform(img).unsqueeze(0)
    
    with torch.inference_mode():
        pred_probs = torch.softmax(model(img), dim=1)
    
    pred_labels_and_probs = {cns[i]: float(pred_probs[0][i]) for i in range(len(cns))}

    return pred_labels_and_probs

title = "Effnetb2 Sentiment Analysis"
description = "An EfficientNetB2 feature extractor computer vision model to analyse image sentiment."

demo = gr.Interface(fn=predict,
                    inputs=gr.Image(type="pil"),
                    outputs=[gr.Label(num_top_classes=3, label="Predictions")],
                    title=title,
                    description=description)

if __name__ == "__main__":
    demo.launch()