Spaces:
Sleeping
Sleeping
File size: 1,713 Bytes
68ebbad de3aa3a 5805817 b364de5 5805817 e51f240 68ebbad cfa8b24 ab15ec2 cfa8b24 68ebbad cfa8b24 68ebbad c4d5f50 fb8f465 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import streamlit as st
import torch
#streamlit clean
#streamlit run app.py
#pip install --upgrade pip
#curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
from PIL import Image
#from transformers import ViTForImageClassification, ViTImageProcessor
from transformers import AutoImageProcessor, AutoModelForImageClassification
# Load the model
model_name = "trpakov/vit-face-expression"
image_processor = AutoImageProcessor.from_pretrained(model_name)
model = AutoModelForImageClassification.from_pretrained(model_name)
# Load the model
model_name = "trpakov/vit-face-expression"
#model = ViTForImageClassification.from_pretrained(model_name)
#image_processor = ViTImageProcessor.from_pretrained(model_name)
# Streamlit app
st.title("Emotion Recognition with vit-face-expression")
# Slider example
x = st.slider('Select a value')
st.write(f"{x} squared is {x * x}")
# Upload image
uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png"])
if uploaded_image:
image = Image.open(uploaded_image)
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values
# Predict emotion
with torch.no_grad():
outputs = model(pixel_values)
predicted_class = torch.argmax(outputs.logits, dim=1).item()
emotion_labels = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
predicted_emotion = emotion_labels[predicted_class]
st.image(image, caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
# Display scores for each category
st.write("Emotion Scores:")
for label, score in zip(emotion_labels, outputs.logits[0]):
st.write(f"{label}: {score:.4f}")
|