Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,16 @@
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
3 |
from transformers import pipeline
|
4 |
-
import datetime
|
5 |
import pandas as pd
|
6 |
import matplotlib.pyplot as plt
|
7 |
|
8 |
-
# Disable PyplotGlobalUseWarning
|
9 |
-
st.set_option('deprecation.showPyplotGlobalUse', False)
|
10 |
-
|
11 |
# Initialize session state for results if not already present
|
12 |
if 'results' not in st.session_state:
|
13 |
st.session_state['results'] = []
|
14 |
|
|
|
|
|
|
|
15 |
# Create an image classification pipeline with scores
|
16 |
pipe = pipeline("image-classification", model="trpakov/vit-face-expression", top_k=None)
|
17 |
|
@@ -27,7 +26,6 @@ selected_file_names = []
|
|
27 |
# Display thumbnail images alongside file names and sizes in the sidebar
|
28 |
selected_images = []
|
29 |
if uploaded_images:
|
30 |
-
|
31 |
# Add a "Select All" checkbox in the sidebar
|
32 |
select_all = st.sidebar.checkbox("Select All", False)
|
33 |
|
@@ -36,130 +34,24 @@ if uploaded_images:
|
|
36 |
checkbox_key = f"{img.name}_checkbox_{idx}" # Unique key for each checkbox
|
37 |
# Display thumbnail image and checkbox in sidebar
|
38 |
st.sidebar.image(image, caption=f"{img.name} {img.size / 1024.0:.1f} KB", width=40)
|
39 |
-
#selected = st.sidebar.checkbox(f"Select {img.name}", value=False, key=checkbox_key)
|
40 |
-
# If "Select All" is checked, all individual checkboxes are selected
|
41 |
selected = st.sidebar.checkbox(f"Select {img.name}", value=select_all, key=checkbox_key)
|
42 |
|
43 |
-
|
44 |
if selected:
|
45 |
selected_images.append(image)
|
46 |
selected_file_names.append(img.name)
|
47 |
|
48 |
-
# Define results list to store prediction results
|
49 |
-
#results = []
|
50 |
-
|
51 |
-
|
52 |
if st.button("Predict Emotions") and selected_images:
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
predicted_emotion = predicted_class.split("_")[-1].capitalize()
|
65 |
-
emotions.append(predicted_emotion)
|
66 |
-
col = col1 if i == 0 else col2
|
67 |
-
col.image(selected_images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
|
68 |
-
col.write(f"Emotion Scores: {predicted_emotion}: {results[i][0]['score']:.4f}")
|
69 |
-
# Use the index to get the corresponding filename
|
70 |
-
col.write(f"Original File Name: {selected_file_names[i]}")
|
71 |
-
|
72 |
-
# Display the keys and values of all results
|
73 |
-
st.write("Keys and Values of all results:")
|
74 |
-
col1, col2 = st.columns(2)
|
75 |
-
for i, result in enumerate(results):
|
76 |
-
col = col1 if i == 0 else col2
|
77 |
-
col.write(f"Keys and Values of results[{i}]:")
|
78 |
-
for res in result:
|
79 |
-
label = res["label"]
|
80 |
-
score = res["score"]
|
81 |
-
col.write(f"{label}: {score:.4f}")
|
82 |
-
else:
|
83 |
-
# Predict emotion for each selected image using the pipeline
|
84 |
-
results = [pipe(image) for image in selected_images]
|
85 |
-
|
86 |
-
# Display images and predicted emotions
|
87 |
-
for i, (image, result) in enumerate(zip(selected_images, results)):
|
88 |
-
predicted_class = result[0]["label"]
|
89 |
-
predicted_emotion = predicted_class.split("_")[-1].capitalize()
|
90 |
-
emotions.append(predicted_emotion)
|
91 |
-
st.image(image, caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
|
92 |
-
st.write(f"Emotion Scores for #{i+1} Image")
|
93 |
-
st.write(f"{predicted_emotion}: {result[0]['score']:.4f}")
|
94 |
-
# Use the index to get the corresponding filename
|
95 |
-
st.write(f"Original File Name: {selected_file_names[i] if i < len(selected_file_names) else 'Unknown'}")
|
96 |
-
|
97 |
-
# Calculate emotion statistics
|
98 |
-
emotion_counts = pd.Series(emotions).value_counts()
|
99 |
-
|
100 |
-
# Define a color map that matches the emotions to specific colors
|
101 |
-
color_map = {
|
102 |
-
'Neutral': '#B38B6D', # Taupe
|
103 |
-
'Happy': '#FFFF00', # Yellow
|
104 |
-
'Sad': '#0000FF', # Blue
|
105 |
-
'Angry': '#FF0000', # Red
|
106 |
-
'Disgust': '#008000', # Green
|
107 |
-
'Surprise': '#FFA500', # Orange (Bright)
|
108 |
-
'Fear': '#000000' # Black
|
109 |
-
# Add more emotions and their corresponding colors here
|
110 |
-
}
|
111 |
-
|
112 |
-
# Calculate the total number of faces analyzed
|
113 |
-
total_faces = len(selected_images)
|
114 |
-
|
115 |
-
# Use the color map to assign colors to the pie chart
|
116 |
-
pie_colors = [color_map.get(emotion, '#999999') for emotion in emotion_counts.index] # Default to grey if not found
|
117 |
-
|
118 |
-
# Plot pie chart with total faces in the title
|
119 |
-
st.write("Emotion Distribution (Pie Chart):")
|
120 |
-
fig_pie, ax_pie = plt.subplots()
|
121 |
-
#font color
|
122 |
-
ax_pie.pie(emotion_counts, labels=emotion_counts.index, autopct='%1.1f%%', startangle=140, colors=pie_colors, textprops={'color': 'white', 'weight': 'bold'})
|
123 |
-
|
124 |
-
ax_pie.pie(emotion_counts, labels=emotion_counts.index, autopct='%1.1f%%', startangle=140, colors=pie_colors)
|
125 |
-
ax_pie.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
|
126 |
-
# Add total faces to the title
|
127 |
-
ax_pie.set_title(f"Total Faces Analyzed: {total_faces}")
|
128 |
-
st.pyplot(fig_pie)
|
129 |
-
|
130 |
-
# Use the same color map for the bar chart
|
131 |
-
bar_colors = [color_map.get(emotion, '#999999') for emotion in emotion_counts.index] # Default to grey if not found
|
132 |
-
|
133 |
-
# Plot bar chart with total faces in the title
|
134 |
-
st.write("Emotion Distribution (Bar Chart):")
|
135 |
-
fig_bar, ax_bar = plt.subplots()
|
136 |
-
emotion_counts.plot(kind='bar', color=bar_colors, ax=ax_bar)
|
137 |
-
ax_bar.set_xlabel('Emotion')
|
138 |
-
ax_bar.set_ylabel('Count')
|
139 |
-
# Add total faces to the title
|
140 |
-
ax_bar.set_title(f"Emotion Distribution - Total Faces Analyzed: {total_faces}")
|
141 |
-
ax_bar.yaxis.set_major_locator(plt.MaxNLocator(integer=True)) # Ensure integer ticks on Y-axis
|
142 |
-
# Display bar values as integers
|
143 |
-
for i in ax_bar.patches:
|
144 |
-
ax_bar.text(i.get_x() + i.get_width() / 2, i.get_height() + 0.1, int(i.get_height()), ha='center', va='bottom')
|
145 |
-
|
146 |
-
st.pyplot(fig_bar)
|
147 |
-
|
148 |
-
|
149 |
-
# Debug statement to print the contents of the results list
|
150 |
-
if results:
|
151 |
-
st.write("Results list is populated:", results)
|
152 |
-
else:
|
153 |
-
st.error("Results list is empty.")
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
st.write("selected_images info:", selected_images)
|
159 |
-
st.write("selected_file_names info:", selected_file_names)
|
160 |
-
st.write("results info:", results)
|
161 |
-
|
162 |
-
|
163 |
|
164 |
# Generate DataFrame from results
|
165 |
if st.button("Generate DataFrame"):
|
@@ -175,8 +67,6 @@ if st.button("Generate DataFrame"):
|
|
175 |
'Disgust': [],
|
176 |
'Angry': [],
|
177 |
# Add other emotions if necessary
|
178 |
-
'Sad': [], # Add this if you have 'sad' scores in your results
|
179 |
-
'Fear': [] # Add this if you have 'fear' scores in your results
|
180 |
}
|
181 |
|
182 |
# Iterate over the results and populate the dictionary
|
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
3 |
from transformers import pipeline
|
|
|
4 |
import pandas as pd
|
5 |
import matplotlib.pyplot as plt
|
6 |
|
|
|
|
|
|
|
7 |
# Initialize session state for results if not already present
|
8 |
if 'results' not in st.session_state:
|
9 |
st.session_state['results'] = []
|
10 |
|
11 |
+
# Disable PyplotGlobalUseWarning
|
12 |
+
st.set_option('deprecation.showPyplotGlobalUse', False)
|
13 |
+
|
14 |
# Create an image classification pipeline with scores
|
15 |
pipe = pipeline("image-classification", model="trpakov/vit-face-expression", top_k=None)
|
16 |
|
|
|
26 |
# Display thumbnail images alongside file names and sizes in the sidebar
|
27 |
selected_images = []
|
28 |
if uploaded_images:
|
|
|
29 |
# Add a "Select All" checkbox in the sidebar
|
30 |
select_all = st.sidebar.checkbox("Select All", False)
|
31 |
|
|
|
34 |
checkbox_key = f"{img.name}_checkbox_{idx}" # Unique key for each checkbox
|
35 |
# Display thumbnail image and checkbox in sidebar
|
36 |
st.sidebar.image(image, caption=f"{img.name} {img.size / 1024.0:.1f} KB", width=40)
|
|
|
|
|
37 |
selected = st.sidebar.checkbox(f"Select {img.name}", value=select_all, key=checkbox_key)
|
38 |
|
|
|
39 |
if selected:
|
40 |
selected_images.append(image)
|
41 |
selected_file_names.append(img.name)
|
42 |
|
|
|
|
|
|
|
|
|
43 |
if st.button("Predict Emotions") and selected_images:
|
44 |
+
# Predict emotion for each selected image using the pipeline
|
45 |
+
st.session_state['results'] = [pipe(image) for image in selected_images]
|
46 |
+
# Display images and predicted emotions
|
47 |
+
for i, (image, result) in enumerate(zip(selected_images, st.session_state['results'])):
|
48 |
+
predicted_class = result[0]["label"]
|
49 |
+
predicted_emotion = predicted_class.split("_")[-1].capitalize()
|
50 |
+
st.image(image, caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
|
51 |
+
st.write(f"Emotion Scores for #{i+1} Image")
|
52 |
+
st.write(f"{predicted_emotion}: {result[0]['score']:.4f}")
|
53 |
+
# Use the index to get the corresponding filename
|
54 |
+
st.write(f"Original File Name: {selected_file_names[i] if i < len(selected_file_names) else 'Unknown'}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
# Generate DataFrame from results
|
57 |
if st.button("Generate DataFrame"):
|
|
|
67 |
'Disgust': [],
|
68 |
'Angry': [],
|
69 |
# Add other emotions if necessary
|
|
|
|
|
70 |
}
|
71 |
|
72 |
# Iterate over the results and populate the dictionary
|