File size: 3,371 Bytes
68ebbad
 
b2639c4
cfa8b24
3dbcc1d
da3ed73
68ebbad
 
 
 
 
 
 
 
b0b6588
 
 
d766871
 
b84fe3f
d766871
34951bf
 
 
 
b84fe3f
d7dd53f
 
 
b84fe3f
 
34951bf
b84fe3f
d5af778
 
a24e27f
 
 
 
 
 
 
 
 
 
 
 
 
 
b800ba6
a24e27f
 
 
 
 
fb4f062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b800ba6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import streamlit as st
from PIL import Image
from transformers import pipeline

# Create an image classification pipeline with scores
pipe = pipeline("image-classification", model="trpakov/vit-face-expression", top_k=None)

# Streamlit app
st.title("Emotion Recognition with vit-face-expression")

# Slider example
x = st.slider('Select a value')
st.write(f"{x} squared is {x * x}")

# Upload images
uploaded_images = st.file_uploader("Upload images", type=["jpg", "png"], accept_multiple_files=True)

# Display thumbnail images alongside file names and sizes in the sidebar
if uploaded_images:
    for idx, img in enumerate(uploaded_images):
        image = Image.open(img)
        # Generate unique keys for image display and checkbox
        image_key = f"{img.name}_image_{idx}"
        checkbox_key = f"{img.name}_checkbox_{idx}"
        st.sidebar.image(image, caption=f"{img.name} {img.size / 1024.0:.1f} KB", width=40, key=image_key)
        st.sidebar.checkbox(f"Select {img.name}", value=False, key=checkbox_key)

# Collect selected images based on checkbox input
selected_images = []
for idx, img in enumerate(uploaded_images):
    # Generate a unique key for each checkbox
    checkbox_key = f"{img.name}_checkbox_{idx}"
    selected = st.sidebar.checkbox(f"Select {img.name}", value=False, key=checkbox_key)
    if selected:
        selected_images.append(Image.open(img))

if st.button("Predict Emotions") and selected_images:
    if len(selected_images) == 2:
        # Predict emotion for each selected image using the pipeline
        results = [pipe(image) for image in selected_images]

        # Display images and predicted emotions side by side
        col1, col2 = st.columns(2)
        for i in range(2):
            predicted_class = results[i][0]["label"]
            predicted_emotion = predicted_class.split("_")[-1].capitalize()
            col = col1 if i == 0 else col2
            col.image(selected_images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
            col.write(f"Emotion Scores: {predicted_emotion}: {results[i][0]['score']:.4f}")
            col.write(f"Original File Name: {uploaded_images[i].name}")  # Display original file name

        # Display the keys and values of all results
        st.write("Keys and Values of all results:")
        col1, col2 = st.columns(2)
        for i, result in enumerate(results):
            col = col1 if i == 0 else col2
            col.write(f"Keys and Values of results[{i}]:")
            for res in result:
                label = res["label"]
                score = res["score"]
                col.write(f"{label}: {score:.4f}")
            
    else:
        # Predict emotion for each selected image using the pipeline
        results = [pipe(image) for image in selected_images]

        # Display images and predicted emotions
        for i, result in enumerate(results):
            predicted_class = result[0]["label"]
            predicted_emotion = predicted_class.split("_")[-1].capitalize()
            st.image(selected_images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
            st.write(f"Emotion Scores for #{i+1} Image")
            st.write(f"{predicted_emotion}: {result[0]['score']:.4f}")
            st.write(f"Original File Name: {uploaded_images[i].name}")  # Display original file name