File size: 1,451 Bytes
abea447
 
009dbb2
bfa9948
 
 
009dbb2
abea447
bfa9948
 
009dbb2
bfa9948
 
 
 
 
 
 
8bbe152
bfa9948
 
 
 
 
 
 
 
 
 
 
2f73a94
 
 
 
 
5ebc75d
 
7d19038
009dbb2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from pathlib import Path

import gradio as gr
import torch
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification


# Specify the path of the model
model_ckpt = Path("./distilbert-base-uncased-finetuned-emotion")

# Load the fine-tuned tokenizer and model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
model = AutoModelForSequenceClassification.from_pretrained(model_ckpt).to(device)

class_names = ['sadness', 'joy', 'love', 'anger', 'fear', 'surprise']

# main function
def inference(text: str) -> str:
    inputs = tokenizer(text, return_tensors="pt")

    inputs = {k:v.to(device) for k,v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
    predictions = torch.nn.functional.softmax(outputs.logits, dim=-1).tolist()[0]

    max_vale = max(predictions)
    idx = predictions.index(max_vale)
    return model.config.id2label[idx]
    
title = "Classify the feeling of your sentence"
description = """
<p style="text-align:center">The model has been trained to classify the feeling of the texts, between sadness, joy, love, anger, fear or surprise. Test it!</p>
"""
examples = ['Tomorrow I will celebrate my birthday!', 'I was shocked when I saw the movie']

iface = gr.Interface(fn=inference, inputs="text", outputs="text", title=title, description=description, examples=examples)
iface.launch()