import streamlit as st from PIL import Image from transformers import pipeline # Create an image classification pipeline with scores pipe = pipeline("image-classification", model="trpakov/vit-face-expression", top_k=None) # Streamlit app st.title("Emotion Recognition with vit-face-expression") # Slider example x = st.slider('Select a value') st.write(f"{x} squared is {x * x}") # Upload images uploaded_images = st.file_uploader("Upload images", type=["jpg", "png"], accept_multiple_files=True) # Display thumbnail images alongside file names and sizes in the sidebar selected_images = [] if uploaded_images: for idx, img in enumerate(uploaded_images): image = Image.open(img) checkbox_key = f"{img.name}_checkbox_{idx}" # Unique key for each checkbox # Display thumbnail image and checkbox in sidebar st.sidebar.image(image, caption=f"{img.name} {img.size / 1024.0:.1f} KB", width=40) selected = st.sidebar.checkbox(f"Select {img.name}", value=False, key=checkbox_key) if selected: selected_images.append(image) if st.button("Predict Emotions") and selected_images: if len(selected_images) == 2: # Predict emotion for each selected image using the pipeline results = [pipe(image) for image in selected_images] # Display images and predicted emotions side by side col1, col2 = st.columns(2) for i in range(2): predicted_class = results[i][0]["label"] predicted_emotion = predicted_class.split("_")[-1].capitalize() col = col1 if i == 0 else col2 col.image(selected_images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True) col.write(f"Emotion Scores: {predicted_emotion}: {results[i][0]['score']:.4f}") # Use the index to get the corresponding filename col.write(f"Original File Name: {uploaded_images[i].name}") # Display the keys and values of all results st.write("Keys and Values of all results:") col1, col2 = st.columns(2) for i, result in enumerate(results): col = col1 if i == 0 else col2 col.write(f"Keys and Values of results[{i}]:") for res in result: label = res["label"] score = res["score"] col.write(f"{label}: {score:.4f}") else: # Predict emotion for each selected image using the pipeline results = [pipe(image) for image in selected_images] # Display images and predicted emotions for i, (image, result) in enumerate(zip(selected_images, results)): predicted_class = result[0]["label"] predicted_emotion = predicted_class.split("_")[-1].capitalize() st.image(image, caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True) st.write(f"Emotion Scores for #{i+1} Image") st.write(f"{predicted_emotion}: {result[0]['score']:.4f}") # Use the index to get the corresponding filename st.write(f"Original File Name: {uploaded_images[i].name if i < len(uploaded_images) else 'Unknown'}")