Spaces:
Runtime error
Runtime error
File size: 1,178 Bytes
1a5f890 bf79d0d 9a33247 bf79d0d 2268b75 7cf8aab 2268b75 6767b70 7186326 2268b75 e104571 bf79d0d 9a33247 e104571 9a33247 d72e17f bf79d0d 37e34d5 4548183 79c7e0d 8b74bcc 79c7e0d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import streamlit as st
import plotly.express as px
import torch
from torch import nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
deftxt = "I hate you cancerous insects so much"
txt = st.text_area('Text to analyze', deftxt)
# load tokenizer and model weights
tokenizer = AutoTokenizer.from_pretrained("s-nlp/roberta_toxicity_classifier")
model = AutoModelForSequenceClassification.from_pretrained("s-nlp/roberta_toxicity_classifier")
batch = tokenizer.encode(txt, return_tensors='pt')
# run model e.g. "logits": tensor([[ 4.8982, -5.1952]], grad_fn=<AddmmBackward0>)
result = model(batch)
# get probabilities e.g. tensor([[9.9996e-01, 4.2627e-05]], grad_fn=<SoftmaxBackward0>)
# first indice is neutral, second is toxic
prediction = nn.functional.softmax(result.logits, dim=-1)
neutralProb = prediction.data[0][0]
toxicProb = prediction.data[0][1]
neutralProb = torch.round(neutralProb, decimals=4)
toxicProb = torch.round(toxicProb, decimals=4)
# default text input ought to return:
# Neutral: 0.0052
# Toxic: 0.9948
st.write("Classification Probabilities")
st.write(f"{neutralProb:.4} - NEUTRAL")
st.write(f"{toxicProb:.4} - TOXIC")
|