facefeature / app.py
xtlyxt's picture
Update app.py
ea68c20 verified
raw
history blame
8.4 kB
import streamlit as st
from PIL import Image
from transformers import pipeline
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# Disable PyplotGlobalUseWarning
st.set_option('deprecation.showPyplotGlobalUse', False)
# Create an image classification pipeline with scores
pipe = pipeline("image-classification", model="trpakov/vit-face-expression", top_k=None)
# Streamlit app
st.title("Emotion Recognition with vit-face-expression")
# Upload images
uploaded_images = st.file_uploader("Upload images", type=["jpg", "png"], accept_multiple_files=True)
# Store selected file names
selected_file_names = []
# Display thumbnail images alongside file names and sizes in the sidebar
selected_images = []
if uploaded_images:
# Add a "Select All" checkbox in the sidebar
select_all = st.sidebar.checkbox("Select All", False)
for idx, img in enumerate(uploaded_images):
image = Image.open(img)
checkbox_key = f"{img.name}_checkbox_{idx}" # Unique key for each checkbox
# Display thumbnail image and checkbox in sidebar
st.sidebar.image(image, caption=f"{img.name} {img.size / 1024.0:.1f} KB", width=40)
#selected = st.sidebar.checkbox(f"Select {img.name}", value=False, key=checkbox_key)
# If "Select All" is checked, all individual checkboxes are selected
selected = st.sidebar.checkbox(f"Select {img.name}", value=select_all, key=checkbox_key)
if selected:
selected_images.append(image)
selected_file_names.append(img.name)
# Define results list to store prediction results
results = []
if st.button("Predict Emotions") and selected_images:
emotions = []
#results = [] # Define results list to store prediction results, add for DataFrame button
if len(selected_images) == 2:
# Predict emotion for each selected image using the pipeline
results = [pipe(image) for image in selected_images]
# Display images and predicted emotions side by side
col1, col2 = st.columns(2)
for i in range(2):
predicted_class = results[i][0]["label"]
predicted_emotion = predicted_class.split("_")[-1].capitalize()
emotions.append(predicted_emotion)
col = col1 if i == 0 else col2
col.image(selected_images[i], caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
col.write(f"Emotion Scores: {predicted_emotion}: {results[i][0]['score']:.4f}")
# Use the index to get the corresponding filename
col.write(f"Original File Name: {selected_file_names[i]}")
# Display the keys and values of all results
st.write("Keys and Values of all results:")
col1, col2 = st.columns(2)
for i, result in enumerate(results):
col = col1 if i == 0 else col2
col.write(f"Keys and Values of results[{i}]:")
for res in result:
label = res["label"]
score = res["score"]
col.write(f"{label}: {score:.4f}")
else:
# Predict emotion for each selected image using the pipeline
results = [pipe(image) for image in selected_images]
# Display images and predicted emotions
for i, (image, result) in enumerate(zip(selected_images, results)):
predicted_class = result[0]["label"]
predicted_emotion = predicted_class.split("_")[-1].capitalize()
emotions.append(predicted_emotion)
st.image(image, caption=f"Predicted emotion: {predicted_emotion}", use_column_width=True)
st.write(f"Emotion Scores for #{i+1} Image")
st.write(f"{predicted_emotion}: {result[0]['score']:.4f}")
# Use the index to get the corresponding filename
st.write(f"Original File Name: {selected_file_names[i] if i < len(selected_file_names) else 'Unknown'}")
# Calculate emotion statistics
emotion_counts = pd.Series(emotions).value_counts()
# Define a color map that matches the emotions to specific colors
color_map = {
'Neutral': '#B38B6D', # Taupe
'Happy': '#FFFF00', # Yellow
'Sad': '#0000FF', # Blue
'Angry': '#FF0000', # Red
'Disgust': '#008000', # Green
'Surprise': '#FFA500', # Orange (Bright)
'Fear': '#000000' # Black
# Add more emotions and their corresponding colors here
}
# Calculate the total number of faces analyzed
total_faces = len(selected_images)
# Use the color map to assign colors to the pie chart
pie_colors = [color_map.get(emotion, '#999999') for emotion in emotion_counts.index] # Default to grey if not found
# Plot pie chart with total faces in the title
st.write("Emotion Distribution (Pie Chart):")
fig_pie, ax_pie = plt.subplots()
#font color
ax_pie.pie(emotion_counts, labels=emotion_counts.index, autopct='%1.1f%%', startangle=140, colors=pie_colors, textprops={'color': 'white', 'weight': 'bold'})
ax_pie.pie(emotion_counts, labels=emotion_counts.index, autopct='%1.1f%%', startangle=140, colors=pie_colors)
ax_pie.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# Add total faces to the title
ax_pie.set_title(f"Total Faces Analyzed: {total_faces}")
st.pyplot(fig_pie)
# Use the same color map for the bar chart
bar_colors = [color_map.get(emotion, '#999999') for emotion in emotion_counts.index] # Default to grey if not found
# Plot bar chart with total faces in the title
st.write("Emotion Distribution (Bar Chart):")
fig_bar, ax_bar = plt.subplots()
emotion_counts.plot(kind='bar', color=bar_colors, ax=ax_bar)
ax_bar.set_xlabel('Emotion')
ax_bar.set_ylabel('Count')
# Add total faces to the title
ax_bar.set_title(f"Emotion Distribution - Total Faces Analyzed: {total_faces}")
ax_bar.yaxis.set_major_locator(plt.MaxNLocator(integer=True)) # Ensure integer ticks on Y-axis
# Display bar values as integers
for i in ax_bar.patches:
ax_bar.text(i.get_x() + i.get_width() / 2, i.get_height() + 0.1, int(i.get_height()), ha='center', va='bottom')
st.pyplot(fig_bar)
# Debug statement to print the contents of the results list
if results:
st.write("Results list is populated:", results)
else:
st.error("Results list is empty.")
st.write("selected_images inner loop:", selected_images)
st.write("selected_file_names inner loop:", selected_file_names)
st.write("results inner loop:", results)
# Generate DataFrame button
if st.button("Generate DataFrame") and selected_images:
# Create a list to store data for DataFrame
df_data = []
# Iterate through selected images to gather data
for image, file_name, result in zip(selected_images, selected_file_names, results):
# Extract image metadata
#st.write("selected_images inner loop:", selected_images)
#st.write("selected_file_names inner loop:", selected_file_names)
#st.write("results inner loop:", results)
size_kb = image.size[0] * image.size[1] / 1024.0 # Calculating size in KB
timestamp = datetime.datetime.now() # Current timestamp
color_type = "Color" if image.mode == 'RGB' else "Grayscale"
# Extract predicted emotions and scores
emotion_scores = {res["label"].split("_")[-1].capitalize(): res["score"] for res in result}
# Append image metadata and emotion scores to the list
df_data.append({
"Neutral": f"{emotion_scores.get('neutral', 0.0):.4f}",
"Happy": f"{emotion_scores.get('happy', 0.0):.4f}",
"Sad": f"{emotion_scores.get('sad', 0.0):.4f}",
"Angry": f"{emotion_scores.get('angry', 0.0):.4f}",
"Disgust": f"{emotion_scores.get('disgust', 0.0):.4f}",
"Surprise": f"{emotion_scores.get('surprise', 0.0):.4f}",
"Fear": f"{emotion_scores.get('fear', 0.0):.4f}", # Add this line if 'Fear' is a possible label
"File Name": file_name,
"Size (KB)": size_kb,
"Timestamp": timestamp.strftime('%Y-%m-%d %H:%M:%S'), # Format timestamp
"Color Type": color_type
})
# Create DataFrame
df = pd.DataFrame(df_data)
# Display DataFrame
st.write(df)