Spaces:
Sleeping
Sleeping
felix.wf
commited on
Commit
·
5290f9e
1
Parent(s):
a7df0e5
add video process
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import streamlit as st
|
|
4 |
from PIL import Image
|
5 |
import pandas as pd
|
6 |
import numpy as np
|
|
|
7 |
|
8 |
|
9 |
|
@@ -15,108 +16,154 @@ pipe_emotions_refined = pipeline("image-classification", model="felixwf/fine_tun
|
|
15 |
|
16 |
st.title("Online Teaching Effect Monitor")
|
17 |
|
18 |
-
file_name = st.file_uploader("Upload
|
19 |
|
20 |
if file_name is not None:
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from PIL import Image
|
5 |
import pandas as pd
|
6 |
import numpy as np
|
7 |
+
import cv2
|
8 |
|
9 |
|
10 |
|
|
|
16 |
|
17 |
st.title("Online Teaching Effect Monitor")
|
18 |
|
19 |
+
file_name = st.file_uploader("Upload an image or a video")
|
20 |
|
21 |
if file_name is not None:
|
22 |
+
if file_name.type.startswith('image'):
|
23 |
+
# Process image
|
24 |
+
face_image = Image.open(file_name)
|
25 |
+
st.image(face_image)
|
26 |
+
output = pipe_yolos(face_image)
|
27 |
+
|
28 |
+
data = output
|
29 |
+
# 过滤出所有标签为 "person" 的项
|
30 |
+
persons = [item for item in data if item['label'] == 'person']
|
31 |
+
|
32 |
+
# 打印结果
|
33 |
+
print(persons)
|
34 |
+
st.text(persons)
|
35 |
+
|
36 |
+
# 假设有一张原始图片,加载图片并截取出每个 "person" 的部分
|
37 |
+
original_image = face_image
|
38 |
+
persons_image_list = []
|
39 |
+
|
40 |
+
# 截取每个 "person" 的部分并保存
|
41 |
+
for idx, person in enumerate(persons):
|
42 |
+
box = person['box']
|
43 |
+
cropped_image = original_image.crop((box['xmin'], box['ymin'], box['xmax'], box['ymax']))
|
44 |
+
cropped_image.save(f'person_{idx}.jpg')
|
45 |
+
cropped_image.show()
|
46 |
+
persons_image_list.append(cropped_image)
|
47 |
+
|
48 |
+
# Calculate the number of rows needed for 3 columns
|
49 |
+
num_images = len(persons)
|
50 |
+
num_cols = 3
|
51 |
+
num_rows = (num_images + num_cols - 1) // num_cols # Ceiling division
|
52 |
+
|
53 |
+
# Create a new canvas to stitch all person images in a grid with 3 columns
|
54 |
+
fig, axes = plt.subplots(num_rows, num_cols, figsize=(15, 5 * num_rows))
|
55 |
+
|
56 |
+
# Flatten the axes array for easy iteration
|
57 |
+
axes = axes.flatten()
|
58 |
+
|
59 |
+
# Crop each "person" part and plot it on the grid
|
60 |
+
for idx, person in enumerate(persons):
|
61 |
+
box = person['box']
|
62 |
+
cropped_image = original_image.crop((box['xmin'], box['ymin'], box['xmax'], box['ymax']))
|
63 |
+
axes[idx].imshow(cropped_image)
|
64 |
+
axes[idx].axis('off')
|
65 |
+
axes[idx].set_title(f'Person {idx}')
|
66 |
+
|
67 |
+
# Turn off any unused subplots
|
68 |
+
for ax in axes[num_images:]:
|
69 |
+
ax.axis('off')
|
70 |
+
|
71 |
+
# 识别每个人的表情
|
72 |
+
output_list_emotions = []
|
73 |
+
output_list_emotions_refined = []
|
74 |
+
|
75 |
+
for idx, face in enumerate(persons_image_list):
|
76 |
+
print(f"processing {idx}")
|
77 |
+
output = pipe_emotions(face)
|
78 |
+
output_list_emotions.append(output[0])
|
79 |
+
output = pipe_emotions_refined(face)
|
80 |
+
output_list_emotions_refined.append(output[0])
|
81 |
+
|
82 |
+
print(output_list_emotions)
|
83 |
+
st.subheader("Emotions by model: dima806/facial_emotions_image_detection")
|
84 |
+
st.text(output_list_emotions)
|
85 |
+
print(output_list_emotions_refined)
|
86 |
+
st.subheader("Emotions by model: felixwf/fine_tuned_face_emotion_model")
|
87 |
+
st.text(output_list_emotions_refined)
|
88 |
+
|
89 |
+
|
90 |
+
# 统计各种标签的数量
|
91 |
+
label_counts = {}
|
92 |
+
for item in output_list_emotions:
|
93 |
+
label = item['label']
|
94 |
+
if label in label_counts:
|
95 |
+
label_counts[label] += 1
|
96 |
+
else:
|
97 |
+
label_counts[label] = 1
|
98 |
+
for item in output_list_emotions_refined:
|
99 |
+
label = item['label']
|
100 |
+
if label in label_counts:
|
101 |
+
label_counts[label] += 1
|
102 |
+
else:
|
103 |
+
label_counts[label] = 1
|
104 |
+
|
105 |
+
# 绘制饼状图
|
106 |
+
labels = list(label_counts.keys())
|
107 |
+
sizes = list(label_counts.values())
|
108 |
+
|
109 |
+
pie_fig, ax = plt.subplots()
|
110 |
+
ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=140)
|
111 |
+
ax.set_title('Distribution of Emotions')
|
112 |
+
ax.axis('equal') # 确保饼状图为圆形
|
113 |
+
# plt.show()
|
114 |
+
# Use Streamlit columns to display the images and pie chart side by side
|
115 |
+
col1, col2 = st.columns(2)
|
116 |
+
|
117 |
+
with col1:
|
118 |
+
st.pyplot(fig) # Display the stitched person images
|
119 |
+
|
120 |
+
with col2:
|
121 |
+
st.pyplot(pie_fig) # Display the pie chart
|
122 |
+
|
123 |
+
elif file_name.type.startswith('video'):
|
124 |
+
# Process video
|
125 |
+
video = cv2.VideoCapture(file_name)
|
126 |
+
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
127 |
+
frame_rate = int(video.get(cv2.CAP_PROP_FPS))
|
128 |
+
frame_interval = frame_rate # Process one frame per second
|
129 |
+
|
130 |
+
frame_emotions = []
|
131 |
+
|
132 |
+
for frame_idx in range(0, frame_count, frame_interval):
|
133 |
+
video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
|
134 |
+
ret, frame = video.read()
|
135 |
+
if not ret:
|
136 |
+
break
|
137 |
+
|
138 |
+
# Convert frame to PIL Image
|
139 |
+
frame_image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
140 |
+
output = pipe_yolos(frame_image)
|
141 |
+
|
142 |
+
data = output
|
143 |
+
persons = [item for item in data if item['label'] == 'person']
|
144 |
+
persons_image_list = []
|
145 |
+
|
146 |
+
for person in persons:
|
147 |
+
box = person['box']
|
148 |
+
cropped_image = frame_image.crop((box['xmin'], box['ymin'], box['xmax'], box['ymax']))
|
149 |
+
persons_image_list.append(cropped_image)
|
150 |
+
|
151 |
+
# Recognize emotions for each person in the frame
|
152 |
+
frame_emotion = []
|
153 |
+
for face in persons_image_list:
|
154 |
+
output = pipe_emotions(face)
|
155 |
+
frame_emotion.append(output[0]['label'])
|
156 |
+
frame_emotions.append(frame_emotion)
|
157 |
+
|
158 |
+
# Plot emotions over frames
|
159 |
+
fig, ax = plt.subplots(figsize=(10, 5))
|
160 |
+
ax.plot(range(len(frame_emotions)), [len(emotions) for emotions in frame_emotions], label='Number of Persons Detected')
|
161 |
+
ax.set_xlabel('Frame')
|
162 |
+
ax.set_ylabel('Number of Persons')
|
163 |
+
ax.set_title('Number of Persons Detected Over Frames')
|
164 |
+
ax.legend()
|
165 |
+
|
166 |
+
st.pyplot(fig)
|
167 |
+
|
168 |
+
else:
|
169 |
+
st.error("Unsupported file type. Please upload an image or a video.")
|