File size: 8,400 Bytes
68ebbad
 
b2639c4
ab8b2eb
1423812
 
cfa8b24
f89a541
 
 
3dbcc1d
da3ed73
68ebbad
 
 
 
b0b6588
 
 
8aa40c0
 
 
d766871
d7dd53f
d6bb7f6
3c9434b
 
 
 
5426655
d6bb7f6
5426655
0c020f3
 
3c9434b
 
 
 
 
d6bb7f6
 
8aa40c0
a24e27f
04852bd
 
 
 
a24e27f
1423812
04852bd
61dc048
24c4a97
 
 
 
 
 
 
 
 
 
 
 
 
 
8aa40c0
24c4a97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8aa40c0
1423812
 
 
 
57b82c3
 
ee48256
 
 
 
 
 
 
57b82c3
 
 
9e9c3e5
 
ee48256
57b82c3
 
 
9e9c3e5
1423812
f89a541
8f9b3cd
 
 
57b82c3
f89a541
9e9c3e5
 
f89a541
1423812
57b82c3
 
 
9e9c3e5
1423812
f89a541
57b82c3
f89a541
 
9e9c3e5
 
ede1ca0
8f9b3cd
 
 
 
8aa40c0
3d38f3e
 
3edd6ef
 
 
 
 
 
 
3981fbc
ea68c20
 
 
 
 
 
 
3d38f3e
 
f1806c7
 
 
 
 
 
ab8b2eb
ea68c20
 
 
ab8b2eb
f1806c7
 
3981fbc
f1806c7
 
 
 
 
 
f86d6c1
 
 
 
 
 
 
f1806c7
 
3981fbc
f1806c7
 
 
 
 
 
 
 
3981fbc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import streamlit as st
from PIL import Image
from transformers import pipeline
import datetime
import pandas as pd
import matplotlib.pyplot as plt

# Disable PyplotGlobalUseWarning
st.set_option('deprecation.showPyplotGlobalUse', False)

# Create an image classification pipeline with scores
pipe = pipeline("image-classification", model="trpakov/vit-face-expression", top_k=None)

# Streamlit app
st.title("Emotion Recognition with vit-face-expression")

# Upload images
uploaded_images = st.file_uploader("Upload images", type=["jpg", "png"], accept_multiple_files=True)

# Store selected file names
selected_file_names = []

# Display thumbnail images alongside file names and sizes in the sidebar
selected_images = []
if uploaded_images:

    # Add a "Select All" checkbox in the sidebar
    select_all = st.sidebar.checkbox("Select All", False)
    
    for idx, img in enumerate(uploaded_images):
        image = Image.open(img)
        checkbox_key = f"{img.name}_checkbox_{idx}"  # Unique key for each checkbox
        # Display thumbnail image and checkbox in sidebar
        st.sidebar.image(image, caption=f"{img.name} {img.size / 1024.0:.1f} KB", width=40)
        #selected = st.sidebar.checkbox(f"Select {img.name}", value=False, key=checkbox_key)
        # If "Select All" is checked, all individual checkboxes are selected
        selected = st.sidebar.checkbox(f"Select {img.name}", value=select_all, key=checkbox_key)         
        
        
        if selected:
            selected_images.append(image)
            selected_file_names.append(img.name)

# Define results list to store prediction results
results = []


if st.button("Predict Emotions") and selected_images:
    emotions = []
    #results = []  # Define results list to store prediction results, add for DataFrame button

    if len(selected_images) == 2:
        # Predict emotion for each selected image using the pipeline
        results = [pipe(image) for image in selected_images]

        # Display images and predicted emotions side by side
        col1, col2 = st.columns(2)
        for i in range(2):
            predicted_class = results[i][0]["label"]
            predicted_emotion = predicted_class.split("_")[-1].capitalize()
            emotions.append(predicted_emotion)
            col = col1 if i == 0 else col2
            col.image(selected_images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
            col.write(f"Emotion Scores: {predicted_emotion}: {results[i][0]['score']:.4f}")
            # Use the index to get the corresponding filename
            col.write(f"Original File Name: {selected_file_names[i]}")  

        # Display the keys and values of all results
        st.write("Keys and Values of all results:")
        col1, col2 = st.columns(2)
        for i, result in enumerate(results):
            col = col1 if i == 0 else col2
            col.write(f"Keys and Values of results[{i}]:")
            for res in result:
                label = res["label"]
                score = res["score"]
                col.write(f"{label}: {score:.4f}")
    else:
        # Predict emotion for each selected image using the pipeline
        results = [pipe(image) for image in selected_images]

        # Display images and predicted emotions
        for i, (image, result) in enumerate(zip(selected_images, results)):
            predicted_class = result[0]["label"]
            predicted_emotion = predicted_class.split("_")[-1].capitalize()
            emotions.append(predicted_emotion)
            st.image(image, caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
            st.write(f"Emotion Scores for #{i+1} Image")
            st.write(f"{predicted_emotion}: {result[0]['score']:.4f}")
            # Use the index to get the corresponding filename
            st.write(f"Original File Name: {selected_file_names[i] if i < len(selected_file_names) else 'Unknown'}")

    # Calculate emotion statistics
    emotion_counts = pd.Series(emotions).value_counts()

    # Define a color map that matches the emotions to specific colors
    color_map = {
        'Neutral': '#B38B6D',  # Taupe
        'Happy': '#FFFF00',    # Yellow
        'Sad': '#0000FF',      # Blue
        'Angry': '#FF0000',    # Red
        'Disgust': '#008000',  # Green
        'Surprise': '#FFA500', # Orange (Bright)
        'Fear': '#000000'      # Black
        # Add more emotions and their corresponding colors here
    }

    # Calculate the total number of faces analyzed
    total_faces = len(selected_images)

    # Use the color map to assign colors to the pie chart
    pie_colors = [color_map.get(emotion, '#999999') for emotion in emotion_counts.index]  # Default to grey if not found

    # Plot pie chart with total faces in the title
    st.write("Emotion Distribution (Pie Chart):")
    fig_pie, ax_pie = plt.subplots()
    #font color
    ax_pie.pie(emotion_counts, labels=emotion_counts.index, autopct='%1.1f%%', startangle=140, colors=pie_colors, textprops={'color': 'white', 'weight': 'bold'})

    ax_pie.pie(emotion_counts, labels=emotion_counts.index, autopct='%1.1f%%', startangle=140, colors=pie_colors)
    ax_pie.axis('equal')  # Equal aspect ratio ensures that pie is drawn as a circle.
    # Add total faces to the title
    ax_pie.set_title(f"Total Faces Analyzed: {total_faces}")
    st.pyplot(fig_pie)

    # Use the same color map for the bar chart
    bar_colors = [color_map.get(emotion, '#999999') for emotion in emotion_counts.index]  # Default to grey if not found

    # Plot bar chart with total faces in the title
    st.write("Emotion Distribution (Bar Chart):")
    fig_bar, ax_bar = plt.subplots()
    emotion_counts.plot(kind='bar', color=bar_colors, ax=ax_bar)
    ax_bar.set_xlabel('Emotion')
    ax_bar.set_ylabel('Count')
    # Add total faces to the title
    ax_bar.set_title(f"Emotion Distribution - Total Faces Analyzed: {total_faces}")
    ax_bar.yaxis.set_major_locator(plt.MaxNLocator(integer=True))  # Ensure integer ticks on Y-axis
    # Display bar values as integers
    for i in ax_bar.patches:
        ax_bar.text(i.get_x() + i.get_width() / 2, i.get_height() + 0.1, int(i.get_height()), ha='center', va='bottom')

    st.pyplot(fig_bar)


# Debug statement to print the contents of the results list
if results:
    st.write("Results list is populated:", results)
else:
    st.error("Results list is empty.")




st.write("selected_images inner loop:", selected_images)
st.write("selected_file_names inner loop:", selected_file_names)
st.write("results inner loop:", results)
        

    
# Generate DataFrame button
if st.button("Generate DataFrame") and selected_images:
    # Create a list to store data for DataFrame
    df_data = []

    # Iterate through selected images to gather data
    for image, file_name, result in zip(selected_images, selected_file_names, results):
        # Extract image metadata

        #st.write("selected_images inner loop:", selected_images)
        #st.write("selected_file_names inner loop:", selected_file_names)
        #st.write("results inner loop:", results)
        
        size_kb = image.size[0] * image.size[1] / 1024.0  # Calculating size in KB
        timestamp = datetime.datetime.now()  # Current timestamp
        color_type = "Color" if image.mode == 'RGB' else "Grayscale"
        
        # Extract predicted emotions and scores
        emotion_scores = {res["label"].split("_")[-1].capitalize(): res["score"] for res in result}

        # Append image metadata and emotion scores to the list
        df_data.append({
            "Neutral": f"{emotion_scores.get('neutral', 0.0):.4f}",
            "Happy": f"{emotion_scores.get('happy', 0.0):.4f}",
            "Sad": f"{emotion_scores.get('sad', 0.0):.4f}",
            "Angry": f"{emotion_scores.get('angry', 0.0):.4f}",
            "Disgust": f"{emotion_scores.get('disgust', 0.0):.4f}",
            "Surprise": f"{emotion_scores.get('surprise', 0.0):.4f}",
            "Fear": f"{emotion_scores.get('fear', 0.0):.4f}",  # Add this line if 'Fear' is a possible label
            "File Name": file_name,
            "Size (KB)": size_kb,
            "Timestamp": timestamp.strftime('%Y-%m-%d %H:%M:%S'),  # Format timestamp
            "Color Type": color_type
        })

    # Create DataFrame
    df = pd.DataFrame(df_data)

    # Display DataFrame
    st.write(df)