Spaces:
Running
Running
File size: 1,380 Bytes
c954503 69f088a c954503 151aa67 c954503 69f088a c954503 69f088a c954503 69f088a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Sidebar for user input
st.sidebar.header("Model Configuration")
model_name = st.sidebar.text_input("Enter model name", "huggingface/transformers")
# Load model and tokenizer on demand
@st.cache_resource
def load_model(model_name):
try:
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
return tokenizer, model
except Exception as e:
st.error(f"Error loading model: {e}")
return None, None
# Load the model and tokenizer
tokenizer, model = load_model(model_name)
# Input text box in the main panel
st.title("Text Classification with Hugging Face Models")
user_input = st.text_area("Enter text for classification:")
# Make prediction if user input is provided
if user_input and model and tokenizer:
inputs = tokenizer(user_input, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
# Display results (e.g., classification logits)
logits = outputs.logits
predicted_class = torch.argmax(logits, dim=-1).item()
st.write(f"Predicted Class: {predicted_class}")
st.write(f"Logits: {logits}")
else:
st.info("Please enter some text to classify.")
|