Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,19 +3,9 @@ import pandas as pd
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
5 |
|
6 |
-
# Define the available models to choose from
|
7 |
-
models = {
|
8 |
-
'BERT': 'bert-base-uncased',
|
9 |
-
'RoBERTa': 'roberta-base',
|
10 |
-
'DistilBERT': 'distilbert-base-uncased'
|
11 |
-
}
|
12 |
-
|
13 |
-
# Create a drop-down menu to select the model
|
14 |
-
model_name = st.sidebar.selectbox('Select Model', list(models.keys()))
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
model = AutoModelForSequenceClassification.from_pretrained(models[model_name])
|
19 |
|
20 |
# Define the classes and their corresponding labels
|
21 |
classes = {
|
@@ -30,17 +20,6 @@ classes = {
|
|
30 |
|
31 |
# Create a function to generate the toxicity predictions
|
32 |
@st.cache(allow_output_mutation=True)
|
33 |
-
def predict_toxicity(tweet, model, tokenizer):
|
34 |
-
# Preprocess the text
|
35 |
-
inputs = tokenizer(tweet, padding=True, truncation=True, return_tensors='pt')
|
36 |
-
# Get the predictions from the model
|
37 |
-
outputs = model(**inputs)
|
38 |
-
predictions = torch.nn.functional.softmax(outputs.logits, dim=1).detach().numpy()
|
39 |
-
# Get the class with the highest probability
|
40 |
-
predicted_class = int(predictions.argmax())
|
41 |
-
predicted_class_label = classes[predicted_class]
|
42 |
-
predicted_prob = predictions[0][predicted_class]
|
43 |
-
return predicted_class_label, predicted_prob
|
44 |
|
45 |
# Create a table to display the toxicity predictions
|
46 |
def create_table(predictions):
|
@@ -52,11 +31,9 @@ def create_table(predictions):
|
|
52 |
df = pd.DataFrame(data)
|
53 |
return df
|
54 |
|
55 |
-
# Create the user interface
|
56 |
st.title('Toxicity Prediction App')
|
57 |
tweet_input = st.text_input('Enter a tweet:')
|
58 |
if st.button('Predict'):
|
59 |
-
# Generate the toxicity prediction for the tweet using the selected model
|
60 |
predicted_class_label, predicted_prob = predict_toxicity(tweet_input, model, tokenizer)
|
61 |
prediction_text = f'Prediction: {predicted_class_label} ({predicted_prob:.2f})'
|
62 |
st.write(prediction_text)
|
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("APJ23/MultiHeaded_Sentiment_Analysis_Model")
|
8 |
+
model = AutoModelForSequenceClassification.from_pretrained("APJ23/MultiHeaded_Sentiment_Analysis_Model")
|
|
|
9 |
|
10 |
# Define the classes and their corresponding labels
|
11 |
classes = {
|
|
|
20 |
|
21 |
# Create a function to generate the toxicity predictions
|
22 |
@st.cache(allow_output_mutation=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
# Create a table to display the toxicity predictions
|
25 |
def create_table(predictions):
|
|
|
31 |
df = pd.DataFrame(data)
|
32 |
return df
|
33 |
|
|
|
34 |
st.title('Toxicity Prediction App')
|
35 |
tweet_input = st.text_input('Enter a tweet:')
|
36 |
if st.button('Predict'):
|
|
|
37 |
predicted_class_label, predicted_prob = predict_toxicity(tweet_input, model, tokenizer)
|
38 |
prediction_text = f'Prediction: {predicted_class_label} ({predicted_prob:.2f})'
|
39 |
st.write(prediction_text)
|