Spaces:
Running
Running
import torch | |
from transformers import BertTokenizer, BertForSequenceClassification | |
from transformers import pipeline | |
import matplotlib.pyplot as plt | |
import streamlit as st | |
# Load pre-trained model and tokenizer | |
model_name = "nlptown/bert-base-multilingual-uncased-sentiment" | |
model = BertForSequenceClassification.from_pretrained(model_name) | |
tokenizer = BertTokenizer.from_pretrained(model_name) | |
# Function to classify text | |
def classify_text(text): | |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) | |
outputs = model(**inputs) | |
scores = torch.nn.functional.softmax(outputs.logits, dim=1) | |
return scores | |
# Streamlit interface | |
st.title("NLP Transformer with PyTorch and Hugging Face") | |
prompt_text = "Crt n NLP trnsfrmr xmpl sng PyTrch wth Hggng Fc, dd Strmlnt ntrfc fr npts nd tpts, ncld mtlpl grph f ncssry. Cd shld b sy t ct nd pst" | |
st.write(f"**Prompt:** {prompt_text}") | |
st.header("Sentiment Analysis") | |
text = st.text_area("Enter text for sentiment analysis:") | |
if st.button("Classify"): | |
scores = classify_text(text).detach().numpy()[0] | |
labels = ["1 star", "2 stars", "3 stars", "4 stars", "5 stars"] | |
st.write("Classification Scores:") | |
for label, score in zip(labels, scores): | |
st.write(f"{label}: {score:.4f}") | |
fig, ax = plt.subplots() | |
ax.bar(labels, scores, color='blue') | |
ax.set_xlabel('Sentiment') | |
ax.set_ylabel('Score') | |
ax.set_title('Sentiment Analysis Scores') | |
st.pyplot(fig) | |