emotion / app.py
Sushrut98's picture
Update app.py
c7ddf50 verified
### 1. Imports and class names setup ###
import gradio as gr
import os
import torch
from model import TinyCNN
from timeit import default_timer as timer
from typing import Tuple, Dict
import torch
import torchvision
from torchvision import transforms
from torch import nn
# Setup class names
with open("class_names.txt", "r") as f: # reading them in from class_names.txt
class_names = [food_name.strip() for food_name in f.readlines()]
### 2. Model and transforms preparation ###
# Create model
TinyCNN_model = TinyCNN(input_shape=3, # number of color channels (3 for RGB)
hidden_units=64,
output_shape=len(class_names))
loss_fn = nn.CrossEntropyLoss() # measure how wrong our model is
optimizer = torch.optim.Adam(params = TinyCNN_model.parameters() ,lr=0.001)
transform = transforms.Compose([
transforms.Resize((128, 128)),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor()
])
# Load saved weights
TinyCNN_model.load_state_dict(
torch.load(
f="TinyCNN_3.pth",
map_location=torch.device("cpu"), # load to CPU
)
)
### 3. Predict function ###
# Create predict function
def predict(img) :
"""Transforms and performs a prediction on img and returns prediction and time taken.
"""
# Start the timer
start_time = timer()
# Transform the target image and add a batch dimension
img = transform(img).unsqueeze(dim=0)
# Put model into evaluation mode and turn on inference mode
TinyCNN_model.eval()
with torch.inference_mode():
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
pred_probs = torch.softmax(TinyCNN_model(img), dim=1)
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
# Calculate the prediction time
pred_time = round(timer() - start_time, 5)
emoji_list = [["emojis/" + example] for example in os.listdir("emojis")]
emoji_1 = torch.argmax(pred_probs)
emoji = class_names[emoji_1]
if emoji == 'angry':
a = emoji_list[0]
a = a[0]
return pred_labels_and_probs,a
elif emoji == 'disgust':
a = emoji_list[1]
a = a[0]
return pred_labels_and_probs,a
elif emoji == 'fear':
a = emoji_list[2]
a = a[0]
return pred_labels_and_probs,a
elif emoji == 'happy':
a = emoji_list[3]
a = a[0]
return pred_labels_and_probs,a
elif emoji == 'neutral':
a = emoji_list[4]
a = a[0]
return pred_labels_and_probs,a
elif emoji == 'sad':
a = emoji_list[5]
a = a[0]
return pred_labels_and_probs,a
elif emoji == 'surprise':
a = emoji_list[6]
a = a[0]
return pred_labels_and_probs,a
# Return the prediction dictionary and prediction time
### 4. Gradio app ###
# Create title, description and article strings
title = "Expression Detection"
description = "An app to predict emotions from the list.[Angry, Disgust, Fear, Happy, Neutral, Sad, Surprise]. The model can predict on single face only. So upload an image which has only one face"
article = "Created as a college project."
# Create examples list from "examples/" directory
example_list = [["examples/" + example] for example in os.listdir("examples")]
# Create Gradio interface
demo = gr.Interface(
fn=predict,
inputs=gr.Image(sources=["upload"], type='pil'),
outputs=[
gr.Label(num_top_classes=5, label="Predictions"),
gr.Image(label="Emotion"),
],
examples=example_list,
title=title,
description=description,
article=article,
)
# Launch the app!
demo.launch()