Update app.py
Browse files
app.py
CHANGED
@@ -7,15 +7,13 @@ import mediapipe as mp
|
|
7 |
import matplotlib.pyplot as plt
|
8 |
from matplotlib.colors import LinearSegmentedColormap
|
9 |
from matplotlib.collections import LineCollection
|
10 |
-
import os
|
11 |
|
12 |
-
# --- MediaPipe Initialization
|
13 |
mp_face_mesh = mp.solutions.face_mesh
|
14 |
mp_drawing = mp.solutions.drawing_utils
|
15 |
mp_drawing_styles = mp.solutions.drawing_styles
|
16 |
|
17 |
-
# Create Face Mesh instance globally (or manage creation/closing if resource intensive)
|
18 |
-
# Using try-except block for safer initialization if needed in complex setups
|
19 |
try:
|
20 |
face_mesh = mp_face_mesh.FaceMesh(
|
21 |
max_num_faces=1,
|
@@ -24,323 +22,219 @@ try:
|
|
24 |
min_tracking_confidence=0.5)
|
25 |
except Exception as e:
|
26 |
print(f"Error initializing MediaPipe Face Mesh: {e}")
|
27 |
-
face_mesh = None
|
28 |
|
29 |
-
# --- Metrics Definition
|
30 |
metrics = [
|
31 |
"valence", "arousal", "dominance", "cognitive_load",
|
32 |
"emotional_stability", "openness", "agreeableness",
|
33 |
"neuroticism", "conscientiousness", "extraversion",
|
34 |
"stress_index", "engagement_level"
|
35 |
]
|
36 |
-
# Initial DataFrame structure for the state
|
37 |
initial_metrics_df = pd.DataFrame(columns=['timestamp'] + metrics)
|
38 |
|
39 |
-
|
40 |
-
# --- Analysis Functions (Keep exactly as you provided) ---
|
41 |
-
# Ensure these functions handle None input for landmarks gracefully
|
42 |
def extract_face_landmarks(image, face_mesh_instance):
|
43 |
-
if image is None or face_mesh_instance is None:
|
44 |
-
return None
|
45 |
-
# Process the image
|
46 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
47 |
-
image_rgb.flags.writeable = False
|
48 |
results = face_mesh_instance.process(image_rgb)
|
49 |
image_rgb.flags.writeable = True
|
50 |
-
|
51 |
-
if results.multi_face_landmarks:
|
52 |
-
return results.multi_face_landmarks[0]
|
53 |
return None
|
54 |
|
55 |
-
def calculate_ear(landmarks):
|
56 |
if not landmarks: return 0.0
|
57 |
-
LEFT_EYE = [33, 160, 158, 133, 153, 144]
|
58 |
-
|
59 |
-
|
60 |
-
return np.array([(landmarks.landmark[idx].x, landmarks.landmark[idx].y) for idx in landmark_indices])
|
61 |
-
left_eye_points = get_landmark_coords(LEFT_EYE)
|
62 |
-
right_eye_points = get_landmark_coords(RIGHT_EYE)
|
63 |
def eye_aspect_ratio(eye_points):
|
64 |
-
v1 = np.linalg.norm(eye_points[1] - eye_points[5])
|
65 |
-
|
66 |
-
|
67 |
-
return (v1 + v2) / (2.0 * h) if h > 0 else 0.0
|
68 |
-
left_ear = eye_aspect_ratio(left_eye_points)
|
69 |
-
right_ear = eye_aspect_ratio(right_eye_points)
|
70 |
return (left_ear + right_ear) / 2.0
|
71 |
|
72 |
-
def calculate_mar(landmarks):
|
73 |
if not landmarks: return 0.0
|
74 |
MOUTH_OUTLINE = [61, 291, 39, 181, 0, 17, 269, 405]
|
75 |
mouth_points = np.array([(landmarks.landmark[idx].x, landmarks.landmark[idx].y) for idx in MOUTH_OUTLINE])
|
76 |
-
height = np.mean([
|
77 |
-
|
78 |
-
np.linalg.norm(mouth_points[2] - mouth_points[6]),
|
79 |
-
np.linalg.norm(mouth_points[3] - mouth_points[7])
|
80 |
-
])
|
81 |
-
width = np.linalg.norm(mouth_points[0] - mouth_points[4])
|
82 |
-
return height / width if width > 0 else 0.0
|
83 |
|
84 |
-
def calculate_eyebrow_position(landmarks):
|
85 |
if not landmarks: return 0.0
|
86 |
-
LEFT_EYEBROW = 107; RIGHT_EYEBROW = 336
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
right_eye_y = landmarks.landmark[RIGHT_EYE].y
|
92 |
-
left_distance = left_eye_y - left_eyebrow_y
|
93 |
-
right_distance = right_eye_y - right_eyebrow_y
|
94 |
-
avg_distance = (left_distance + right_distance) / 2.0
|
95 |
-
normalized = (avg_distance - 0.02) / 0.06 # Approximate normalization
|
96 |
return max(0.0, min(1.0, normalized))
|
97 |
|
98 |
-
def estimate_head_pose(landmarks):
|
99 |
if not landmarks: return 0.0, 0.0
|
100 |
NOSE_TIP = 4; LEFT_EYE = 159; RIGHT_EYE = 386
|
101 |
nose = np.array([landmarks.landmark[NOSE_TIP].x, landmarks.landmark[NOSE_TIP].y, landmarks.landmark[NOSE_TIP].z])
|
102 |
left_eye = np.array([landmarks.landmark[LEFT_EYE].x, landmarks.landmark[LEFT_EYE].y, landmarks.landmark[LEFT_EYE].z])
|
103 |
right_eye = np.array([landmarks.landmark[RIGHT_EYE].x, landmarks.landmark[RIGHT_EYE].y, landmarks.landmark[RIGHT_EYE].z])
|
104 |
-
eye_level = (left_eye[1] + right_eye[1]) / 2.0
|
105 |
-
|
106 |
-
|
107 |
-
horizontal_tilt = nose[0] - horizontal_mid
|
108 |
-
vertical_tilt = max(-1.0, min(1.0, vertical_tilt * 10)) # Normalize approx
|
109 |
-
horizontal_tilt = max(-1.0, min(1.0, horizontal_tilt * 10)) # Normalize approx
|
110 |
return vertical_tilt, horizontal_tilt
|
111 |
|
112 |
-
def calculate_metrics(landmarks):
|
113 |
-
if not landmarks:
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
ear =
|
118 |
-
|
119 |
-
|
120 |
-
vertical_tilt, horizontal_tilt = estimate_head_pose(landmarks)
|
121 |
-
cognitive_load = max(0, min(1, 1.0 - ear * 2.5))
|
122 |
-
valence = max(0, min(1, mar * 2.0 * (1.0 - eyebrow_position)))
|
123 |
-
arousal = max(0, min(1, (mar + (1.0 - ear) + eyebrow_position) / 3.0))
|
124 |
-
dominance = max(0, min(1, 0.5 + vertical_tilt))
|
125 |
-
neuroticism = max(0, min(1, (cognitive_load * 0.6) + ((1.0 - valence) * 0.4)))
|
126 |
-
emotional_stability = 1.0 - neuroticism
|
127 |
-
extraversion = max(0, min(1, (arousal * 0.5) + (valence * 0.5)))
|
128 |
-
openness = max(0, min(1, 0.5 + ((mar - 0.5) * 0.5)))
|
129 |
agreeableness = max(0, min(1, (valence * 0.7) + ((1.0 - arousal) * 0.3)))
|
130 |
conscientiousness = max(0, min(1, (1.0 - abs(arousal - 0.5)) * 0.7 + (emotional_stability * 0.3)))
|
131 |
stress_index = max(0, min(1, (cognitive_load * 0.5) + (eyebrow_position * 0.3) + ((1.0 - valence) * 0.2)))
|
132 |
engagement_level = max(0, min(1, (arousal * 0.7) + ((1.0 - abs(horizontal_tilt)) * 0.3)))
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
'cognitive_load': cognitive_load, 'emotional_stability': emotional_stability,
|
137 |
-
'openness': openness, 'agreeableness': agreeableness, 'neuroticism': neuroticism,
|
138 |
-
'conscientiousness': conscientiousness, 'extraversion': extraversion,
|
139 |
-
'stress_index': stress_index, 'engagement_level': engagement_level
|
140 |
-
}
|
141 |
-
|
142 |
|
143 |
-
# --- Visualization Function
|
144 |
def update_metrics_visualization(metrics_values):
|
145 |
-
# Create a blank figure if no metrics are available
|
146 |
if not metrics_values:
|
147 |
-
fig, ax = plt.subplots(figsize=(10, 8))
|
148 |
-
ax.
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
return fig
|
154 |
-
|
155 |
-
# Calculate grid size
|
156 |
-
num_metrics = len([k for k in metrics_values if k != 'timestamp'])
|
157 |
-
nrows = (num_metrics + 2) // 3
|
158 |
-
fig, axs = plt.subplots(nrows, 3, figsize=(10, nrows * 2.5), facecolor='#FFFFFF') # Match background
|
159 |
-
axs = axs.flatten()
|
160 |
-
|
161 |
-
# Colormap and normalization
|
162 |
-
colors = [(0.1, 0.1, 0.9), (0.9, 0.9, 0.1), (0.9, 0.1, 0.1)] # Blue to Yellow to Red
|
163 |
-
cmap = LinearSegmentedColormap.from_list("custom_cmap", colors, N=100)
|
164 |
-
norm = plt.Normalize(0, 1)
|
165 |
-
|
166 |
-
metric_idx = 0
|
167 |
for key, value in metrics_values.items():
|
168 |
if key == 'timestamp': continue
|
169 |
-
|
170 |
-
ax
|
171 |
-
|
172 |
-
ax.
|
173 |
-
|
174 |
-
|
175 |
-
r = 0.4 # radius
|
176 |
-
theta = np.linspace(np.pi, 0, 100) # Flipped for gauge direction
|
177 |
-
x_bg = 0.5 + r * np.cos(theta); y_bg = 0.1 + r * np.sin(theta)
|
178 |
-
ax.plot(x_bg, y_bg, 'k-', linewidth=3, alpha=0.2) # Background arc
|
179 |
-
|
180 |
-
# Value arc calculation
|
181 |
-
value_angle = np.pi * (1 - value) # Map value [0,1] to angle [pi, 0]
|
182 |
-
# Ensure there are at least 2 points for the line segment, even for value=0
|
183 |
-
num_points = max(2, int(100 * value))
|
184 |
-
value_theta = np.linspace(np.pi, value_angle, num_points)
|
185 |
x_val = 0.5 + r * np.cos(value_theta); y_val = 0.1 + r * np.sin(value_theta)
|
186 |
-
|
187 |
-
# Create line segments for coloring if there are points to draw
|
188 |
if len(x_val) > 1:
|
189 |
-
points = np.array([x_val, y_val]).T.reshape(-1, 1, 2)
|
190 |
-
|
191 |
-
segment_values
|
192 |
-
|
193 |
-
lc.set_array(segment_values); lc.set_linewidth(5)
|
194 |
-
ax.add_collection(lc)
|
195 |
-
|
196 |
-
# Add value text
|
197 |
-
ax.text(0.5, 0.15, f"{value:.2f}", ha='center', va='center', fontsize=11,
|
198 |
-
fontweight='bold', bbox=dict(facecolor='white', alpha=0.7, boxstyle='round,pad=0.2'))
|
199 |
metric_idx += 1
|
200 |
-
|
201 |
-
|
202 |
-
for i in range(metric_idx, len(axs)):
|
203 |
-
axs[i].axis('off')
|
204 |
-
|
205 |
-
plt.tight_layout(pad=0.5)
|
206 |
-
return fig
|
207 |
|
208 |
|
209 |
# --- Gradio Processing Function ---
|
210 |
-
app_start_time = time.time()
|
211 |
|
212 |
def process_frame(
|
213 |
-
frame,
|
214 |
-
|
215 |
-
analyze_flag,
|
216 |
-
# --- State variables ---
|
217 |
-
metrics_data_state,
|
218 |
-
last_analysis_time_state,
|
219 |
-
latest_metrics_state,
|
220 |
-
latest_landmarks_state
|
221 |
):
|
222 |
-
|
223 |
if frame is None:
|
224 |
-
# Return default/empty outputs if no frame
|
225 |
default_plot = update_metrics_visualization(latest_metrics_state)
|
226 |
return frame, default_plot, metrics_data_state, \
|
227 |
metrics_data_state, last_analysis_time_state, \
|
228 |
latest_metrics_state, latest_landmarks_state
|
229 |
|
230 |
-
annotated_frame = frame.copy()
|
231 |
-
|
232 |
-
perform_analysis = False
|
233 |
-
current_landmarks = None # Landmarks detected in *this* frame run
|
234 |
|
235 |
-
# --- Decide whether to perform analysis ---
|
236 |
if analyze_flag and face_mesh and (current_time - last_analysis_time_state >= analysis_freq):
|
237 |
-
perform_analysis = True
|
238 |
-
last_analysis_time_state = current_time # Update time immediately
|
239 |
|
240 |
-
# --- Perform Analysis (if flag is set and frequency met) ---
|
241 |
if perform_analysis:
|
242 |
current_landmarks = extract_face_landmarks(frame, face_mesh)
|
243 |
calculated_metrics = calculate_metrics(current_landmarks)
|
244 |
-
|
245 |
-
# Update state variables
|
246 |
-
latest_landmarks_state = current_landmarks # Store landmarks from this run
|
247 |
-
latest_metrics_state = calculated_metrics
|
248 |
-
|
249 |
-
# Log data only if a face was detected
|
250 |
if current_landmarks:
|
251 |
-
elapsed_time = current_time - app_start_time
|
252 |
-
new_row = {'timestamp': elapsed_time, **calculated_metrics}
|
253 |
new_row_df = pd.DataFrame([new_row])
|
|
|
254 |
metrics_data_state = pd.concat([metrics_data_state, new_row_df], ignore_index=True)
|
255 |
|
256 |
-
# --- Drawing ---
|
257 |
-
# Always try to draw the latest known landmarks stored in state
|
258 |
landmarks_to_draw = latest_landmarks_state
|
259 |
if landmarks_to_draw:
|
260 |
-
mp_drawing.draw_landmarks(
|
261 |
-
|
262 |
-
landmark_list=landmarks_to_draw,
|
263 |
-
connections=mp_face_mesh.FACEMESH_TESSELATION,
|
264 |
-
landmark_drawing_spec=None,
|
265 |
-
connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style())
|
266 |
-
mp_drawing.draw_landmarks(
|
267 |
-
image=annotated_frame,
|
268 |
-
landmark_list=landmarks_to_draw,
|
269 |
-
connections=mp_face_mesh.FACEMESH_CONTOURS,
|
270 |
-
landmark_drawing_spec=None,
|
271 |
-
connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_contours_style())
|
272 |
|
273 |
-
# --- Generate Metrics Plot ---
|
274 |
metrics_plot = update_metrics_visualization(latest_metrics_state)
|
275 |
-
|
276 |
-
# --- Return updated values for outputs AND state ---
|
277 |
return annotated_frame, metrics_plot, metrics_data_state, \
|
278 |
metrics_data_state, last_analysis_time_state, \
|
279 |
latest_metrics_state, latest_landmarks_state
|
280 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
281 |
|
282 |
# --- Create Gradio Interface ---
|
283 |
with gr.Blocks(theme=gr.themes.Soft(), title="Gradio Facial Analysis") as iface:
|
284 |
gr.Markdown("# Basic Facial Analysis (Gradio Version)")
|
285 |
gr.Markdown("Analyzes webcam feed for facial landmarks and estimates metrics. *Estimations are for demonstration only.*")
|
286 |
|
287 |
-
# Define State Variables
|
288 |
-
# Need to initialize them properly
|
289 |
metrics_data = gr.State(value=initial_metrics_df.copy())
|
290 |
last_analysis_time = gr.State(value=time.time())
|
291 |
-
latest_metrics = gr.State(value=None)
|
292 |
-
latest_landmarks = gr.State(value=None)
|
293 |
|
294 |
with gr.Row():
|
295 |
with gr.Column(scale=1):
|
296 |
webcam_input = gr.Image(sources="webcam", streaming=True, label="Webcam Input", type="numpy")
|
297 |
analysis_freq_slider = gr.Slider(minimum=0.5, maximum=5.0, step=0.5, value=1.0, label="Analysis Frequency (s)")
|
298 |
analyze_checkbox = gr.Checkbox(value=True, label="Enable Analysis Calculation")
|
299 |
-
status_text = gr.Markdown("Status: Analysis Enabled" if analyze_checkbox.value else "Status: Analysis Paused") #
|
300 |
-
|
301 |
-
# Update status text dynamically (though Gradio handles this implicitly via reruns)
|
302 |
-
# Might need a more complex setup with event listeners if precise text update is needed without full rerun
|
303 |
with gr.Column(scale=1):
|
304 |
processed_output = gr.Image(label="Processed Feed", type="numpy")
|
305 |
metrics_plot_output = gr.Plot(label="Estimated Metrics")
|
306 |
-
dataframe_output = gr.Dataframe(label="Data Log", headers=['timestamp'] + metrics, wrap=True
|
307 |
|
|
|
|
|
|
|
|
|
|
|
|
|
308 |
|
309 |
-
# Define
|
310 |
webcam_input.stream(
|
311 |
fn=process_frame,
|
312 |
inputs=[
|
313 |
-
webcam_input,
|
314 |
-
|
315 |
-
analyze_checkbox,
|
316 |
-
# Pass state variables as inputs
|
317 |
-
metrics_data,
|
318 |
-
last_analysis_time,
|
319 |
-
latest_metrics,
|
320 |
-
latest_landmarks
|
321 |
],
|
322 |
outputs=[
|
323 |
-
processed_output,
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
|
|
|
|
|
|
332 |
)
|
333 |
|
334 |
# --- Launch the App ---
|
335 |
if __name__ == "__main__":
|
336 |
if face_mesh is None:
|
337 |
-
print("Face Mesh could not be initialized.
|
338 |
-
iface.launch(debug=True)
|
339 |
-
|
340 |
-
# Optional: Add cleanup logic if needed, although launching blocks execution
|
341 |
-
# try:
|
342 |
-
# iface.launch()
|
343 |
-
# finally:
|
344 |
-
# if face_mesh:
|
345 |
-
# face_mesh.close() # Close mediapipe resources if app is stopped
|
346 |
-
# print("MediaPipe FaceMesh closed.")
|
|
|
7 |
import matplotlib.pyplot as plt
|
8 |
from matplotlib.colors import LinearSegmentedColormap
|
9 |
from matplotlib.collections import LineCollection
|
10 |
+
import os
|
11 |
|
12 |
+
# --- MediaPipe Initialization ---
|
13 |
mp_face_mesh = mp.solutions.face_mesh
|
14 |
mp_drawing = mp.solutions.drawing_utils
|
15 |
mp_drawing_styles = mp.solutions.drawing_styles
|
16 |
|
|
|
|
|
17 |
try:
|
18 |
face_mesh = mp_face_mesh.FaceMesh(
|
19 |
max_num_faces=1,
|
|
|
22 |
min_tracking_confidence=0.5)
|
23 |
except Exception as e:
|
24 |
print(f"Error initializing MediaPipe Face Mesh: {e}")
|
25 |
+
face_mesh = None
|
26 |
|
27 |
+
# --- Metrics Definition ---
|
28 |
metrics = [
|
29 |
"valence", "arousal", "dominance", "cognitive_load",
|
30 |
"emotional_stability", "openness", "agreeableness",
|
31 |
"neuroticism", "conscientiousness", "extraversion",
|
32 |
"stress_index", "engagement_level"
|
33 |
]
|
|
|
34 |
initial_metrics_df = pd.DataFrame(columns=['timestamp'] + metrics)
|
35 |
|
36 |
+
# --- Analysis Functions (Keep exactly as before) ---
|
|
|
|
|
37 |
def extract_face_landmarks(image, face_mesh_instance):
|
38 |
+
if image is None or face_mesh_instance is None: return None
|
|
|
|
|
39 |
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
40 |
+
image_rgb.flags.writeable = False
|
41 |
results = face_mesh_instance.process(image_rgb)
|
42 |
image_rgb.flags.writeable = True
|
43 |
+
if results.multi_face_landmarks: return results.multi_face_landmarks[0]
|
|
|
|
|
44 |
return None
|
45 |
|
46 |
+
def calculate_ear(landmarks):
|
47 |
if not landmarks: return 0.0
|
48 |
+
LEFT_EYE = [33, 160, 158, 133, 153, 144]; RIGHT_EYE = [362, 385, 387, 263, 373, 380]
|
49 |
+
def get_landmark_coords(landmark_indices): return np.array([(landmarks.landmark[idx].x, landmarks.landmark[idx].y) for idx in landmark_indices])
|
50 |
+
left_eye_points = get_landmark_coords(LEFT_EYE); right_eye_points = get_landmark_coords(RIGHT_EYE)
|
|
|
|
|
|
|
51 |
def eye_aspect_ratio(eye_points):
|
52 |
+
v1 = np.linalg.norm(eye_points[1] - eye_points[5]); v2 = np.linalg.norm(eye_points[2] - eye_points[4])
|
53 |
+
h = np.linalg.norm(eye_points[0] - eye_points[3]); return (v1 + v2) / (2.0 * h) if h > 0 else 0.0
|
54 |
+
left_ear = eye_aspect_ratio(left_eye_points); right_ear = eye_aspect_ratio(right_eye_points)
|
|
|
|
|
|
|
55 |
return (left_ear + right_ear) / 2.0
|
56 |
|
57 |
+
def calculate_mar(landmarks):
|
58 |
if not landmarks: return 0.0
|
59 |
MOUTH_OUTLINE = [61, 291, 39, 181, 0, 17, 269, 405]
|
60 |
mouth_points = np.array([(landmarks.landmark[idx].x, landmarks.landmark[idx].y) for idx in MOUTH_OUTLINE])
|
61 |
+
height = np.mean([np.linalg.norm(mouth_points[1] - mouth_points[5]), np.linalg.norm(mouth_points[2] - mouth_points[6]), np.linalg.norm(mouth_points[3] - mouth_points[7])])
|
62 |
+
width = np.linalg.norm(mouth_points[0] - mouth_points[4]); return height / width if width > 0 else 0.0
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
+
def calculate_eyebrow_position(landmarks):
|
65 |
if not landmarks: return 0.0
|
66 |
+
LEFT_EYEBROW = 107; RIGHT_EYEBROW = 336; LEFT_EYE = 159; RIGHT_EYE = 386
|
67 |
+
left_eyebrow_y = landmarks.landmark[LEFT_EYEBROW].y; right_eyebrow_y = landmarks.landmark[RIGHT_EYEBROW].y
|
68 |
+
left_eye_y = landmarks.landmark[LEFT_EYE].y; right_eye_y = landmarks.landmark[RIGHT_EYE].y
|
69 |
+
left_distance = left_eye_y - left_eyebrow_y; right_distance = right_eye_y - right_eyebrow_y
|
70 |
+
avg_distance = (left_distance + right_distance) / 2.0; normalized = (avg_distance - 0.02) / 0.06
|
|
|
|
|
|
|
|
|
|
|
71 |
return max(0.0, min(1.0, normalized))
|
72 |
|
73 |
+
def estimate_head_pose(landmarks):
|
74 |
if not landmarks: return 0.0, 0.0
|
75 |
NOSE_TIP = 4; LEFT_EYE = 159; RIGHT_EYE = 386
|
76 |
nose = np.array([landmarks.landmark[NOSE_TIP].x, landmarks.landmark[NOSE_TIP].y, landmarks.landmark[NOSE_TIP].z])
|
77 |
left_eye = np.array([landmarks.landmark[LEFT_EYE].x, landmarks.landmark[LEFT_EYE].y, landmarks.landmark[LEFT_EYE].z])
|
78 |
right_eye = np.array([landmarks.landmark[RIGHT_EYE].x, landmarks.landmark[RIGHT_EYE].y, landmarks.landmark[RIGHT_EYE].z])
|
79 |
+
eye_level = (left_eye[1] + right_eye[1]) / 2.0; vertical_tilt = nose[1] - eye_level
|
80 |
+
horizontal_mid = (left_eye[0] + right_eye[0]) / 2.0; horizontal_tilt = nose[0] - horizontal_mid
|
81 |
+
vertical_tilt = max(-1.0, min(1.0, vertical_tilt * 10)); horizontal_tilt = max(-1.0, min(1.0, horizontal_tilt * 10))
|
|
|
|
|
|
|
82 |
return vertical_tilt, horizontal_tilt
|
83 |
|
84 |
+
def calculate_metrics(landmarks):
|
85 |
+
if not landmarks: return {metric: 0.5 for metric in metrics}
|
86 |
+
ear = calculate_ear(landmarks); mar = calculate_mar(landmarks)
|
87 |
+
eyebrow_position = calculate_eyebrow_position(landmarks); vertical_tilt, horizontal_tilt = estimate_head_pose(landmarks)
|
88 |
+
cognitive_load = max(0, min(1, 1.0 - ear * 2.5)); valence = max(0, min(1, mar * 2.0 * (1.0 - eyebrow_position)))
|
89 |
+
arousal = max(0, min(1, (mar + (1.0 - ear) + eyebrow_position) / 3.0)); dominance = max(0, min(1, 0.5 + vertical_tilt))
|
90 |
+
neuroticism = max(0, min(1, (cognitive_load * 0.6) + ((1.0 - valence) * 0.4))); emotional_stability = 1.0 - neuroticism
|
91 |
+
extraversion = max(0, min(1, (arousal * 0.5) + (valence * 0.5))); openness = max(0, min(1, 0.5 + ((mar - 0.5) * 0.5)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
agreeableness = max(0, min(1, (valence * 0.7) + ((1.0 - arousal) * 0.3)))
|
93 |
conscientiousness = max(0, min(1, (1.0 - abs(arousal - 0.5)) * 0.7 + (emotional_stability * 0.3)))
|
94 |
stress_index = max(0, min(1, (cognitive_load * 0.5) + (eyebrow_position * 0.3) + ((1.0 - valence) * 0.2)))
|
95 |
engagement_level = max(0, min(1, (arousal * 0.7) + ((1.0 - abs(horizontal_tilt)) * 0.3)))
|
96 |
+
return {'valence': valence, 'arousal': arousal, 'dominance': dominance, 'cognitive_load': cognitive_load, 'emotional_stability': emotional_stability,
|
97 |
+
'openness': openness, 'agreeableness': agreeableness, 'neuroticism': neuroticism, 'conscientiousness': conscientiousness, 'extraversion': extraversion,
|
98 |
+
'stress_index': stress_index, 'engagement_level': engagement_level}
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
+
# --- Visualization Function ---
|
101 |
def update_metrics_visualization(metrics_values):
|
|
|
102 |
if not metrics_values:
|
103 |
+
fig, ax = plt.subplots(figsize=(10, 8)); ax.text(0.5, 0.5, "Waiting for analysis...", ha='center', va='center')
|
104 |
+
ax.axis('off'); fig.patch.set_facecolor('#FFFFFF'); ax.set_facecolor('#FFFFFF'); return fig
|
105 |
+
num_metrics = len([k for k in metrics_values if k != 'timestamp']); nrows = (num_metrics + 2) // 3
|
106 |
+
fig, axs = plt.subplots(nrows, 3, figsize=(10, nrows * 2.5), facecolor='#FFFFFF'); axs = axs.flatten()
|
107 |
+
colors = [(0.1, 0.1, 0.9), (0.9, 0.9, 0.1), (0.9, 0.1, 0.1)]; cmap = LinearSegmentedColormap.from_list("custom_cmap", colors, N=100)
|
108 |
+
norm = plt.Normalize(0, 1); metric_idx = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
for key, value in metrics_values.items():
|
110 |
if key == 'timestamp': continue
|
111 |
+
ax = axs[metric_idx]; ax.set_title(key.replace('_', ' ').title(), fontsize=10)
|
112 |
+
ax.set_xlim(0, 1); ax.set_ylim(0, 0.5); ax.set_aspect('equal'); ax.axis('off'); ax.set_facecolor('#FFFFFF')
|
113 |
+
r = 0.4; theta = np.linspace(np.pi, 0, 100); x_bg = 0.5 + r * np.cos(theta); y_bg = 0.1 + r * np.sin(theta)
|
114 |
+
ax.plot(x_bg, y_bg, 'k-', linewidth=3, alpha=0.2)
|
115 |
+
value_angle = np.pi * (1 - value); num_points = max(2, int(100 * value)); value_theta = np.linspace(np.pi, value_angle, num_points)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
x_val = 0.5 + r * np.cos(value_theta); y_val = 0.1 + r * np.sin(value_theta)
|
|
|
|
|
117 |
if len(x_val) > 1:
|
118 |
+
points = np.array([x_val, y_val]).T.reshape(-1, 1, 2); segments = np.concatenate([points[:-1], points[1:]], axis=1)
|
119 |
+
segment_values = np.linspace(0, value, len(segments)); lc = LineCollection(segments, cmap=cmap, norm=norm)
|
120 |
+
lc.set_array(segment_values); lc.set_linewidth(5); ax.add_collection(lc)
|
121 |
+
ax.text(0.5, 0.15, f"{value:.2f}", ha='center', va='center', fontsize=11, fontweight='bold', bbox=dict(facecolor='white', alpha=0.7, boxstyle='round,pad=0.2'))
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
metric_idx += 1
|
123 |
+
for i in range(metric_idx, len(axs)): axs[i].axis('off')
|
124 |
+
plt.tight_layout(pad=0.5); return fig
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
|
127 |
# --- Gradio Processing Function ---
|
128 |
+
app_start_time = time.time()
|
129 |
|
130 |
def process_frame(
|
131 |
+
frame, analysis_freq, analyze_flag,
|
132 |
+
metrics_data_state, last_analysis_time_state, latest_metrics_state, latest_landmarks_state
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
):
|
134 |
+
# (This function remains the same as the previous working version)
|
135 |
if frame is None:
|
|
|
136 |
default_plot = update_metrics_visualization(latest_metrics_state)
|
137 |
return frame, default_plot, metrics_data_state, \
|
138 |
metrics_data_state, last_analysis_time_state, \
|
139 |
latest_metrics_state, latest_landmarks_state
|
140 |
|
141 |
+
annotated_frame = frame.copy(); current_time = time.time()
|
142 |
+
perform_analysis = False; current_landmarks = None
|
|
|
|
|
143 |
|
|
|
144 |
if analyze_flag and face_mesh and (current_time - last_analysis_time_state >= analysis_freq):
|
145 |
+
perform_analysis = True; last_analysis_time_state = current_time
|
|
|
146 |
|
|
|
147 |
if perform_analysis:
|
148 |
current_landmarks = extract_face_landmarks(frame, face_mesh)
|
149 |
calculated_metrics = calculate_metrics(current_landmarks)
|
150 |
+
latest_landmarks_state = current_landmarks; latest_metrics_state = calculated_metrics
|
|
|
|
|
|
|
|
|
|
|
151 |
if current_landmarks:
|
152 |
+
elapsed_time = current_time - app_start_time; new_row = {'timestamp': elapsed_time, **calculated_metrics}
|
|
|
153 |
new_row_df = pd.DataFrame([new_row])
|
154 |
+
if not isinstance(metrics_data_state, pd.DataFrame): metrics_data_state = initial_metrics_df.copy()
|
155 |
metrics_data_state = pd.concat([metrics_data_state, new_row_df], ignore_index=True)
|
156 |
|
|
|
|
|
157 |
landmarks_to_draw = latest_landmarks_state
|
158 |
if landmarks_to_draw:
|
159 |
+
mp_drawing.draw_landmarks(image=annotated_frame, landmark_list=landmarks_to_draw, connections=mp_face_mesh.FACEMESH_TESSELATION, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style())
|
160 |
+
mp_drawing.draw_landmarks(image=annotated_frame, landmark_list=landmarks_to_draw, connections=mp_face_mesh.FACEMESH_CONTOURS, landmark_drawing_spec=None, connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_contours_style())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
|
|
162 |
metrics_plot = update_metrics_visualization(latest_metrics_state)
|
|
|
|
|
163 |
return annotated_frame, metrics_plot, metrics_data_state, \
|
164 |
metrics_data_state, last_analysis_time_state, \
|
165 |
latest_metrics_state, latest_landmarks_state
|
166 |
|
167 |
+
# --- Function to Export DataFrame to CSV ---
|
168 |
+
def export_csv(data_df):
|
169 |
+
"""Saves the DataFrame to a CSV file and returns the file path."""
|
170 |
+
if data_df is None or data_df.empty:
|
171 |
+
print("No data to export.")
|
172 |
+
# Return None or raise an error/warning for the UI? Gradio File handles None.
|
173 |
+
return None
|
174 |
+
# Define filename (consider making it unique if needed, e.g., with timestamp)
|
175 |
+
csv_filename = "facial_analysis_log.csv"
|
176 |
+
try:
|
177 |
+
data_df.to_csv(csv_filename, index=False)
|
178 |
+
print(f"Data exported successfully to {csv_filename}")
|
179 |
+
return csv_filename # Return the path for the File component
|
180 |
+
except Exception as e:
|
181 |
+
print(f"Error exporting data to CSV: {e}")
|
182 |
+
return None
|
183 |
+
|
184 |
|
185 |
# --- Create Gradio Interface ---
|
186 |
with gr.Blocks(theme=gr.themes.Soft(), title="Gradio Facial Analysis") as iface:
|
187 |
gr.Markdown("# Basic Facial Analysis (Gradio Version)")
|
188 |
gr.Markdown("Analyzes webcam feed for facial landmarks and estimates metrics. *Estimations are for demonstration only.*")
|
189 |
|
190 |
+
# --- Define State Variables ---
|
|
|
191 |
metrics_data = gr.State(value=initial_metrics_df.copy())
|
192 |
last_analysis_time = gr.State(value=time.time())
|
193 |
+
latest_metrics = gr.State(value=None)
|
194 |
+
latest_landmarks = gr.State(value=None)
|
195 |
|
196 |
with gr.Row():
|
197 |
with gr.Column(scale=1):
|
198 |
webcam_input = gr.Image(sources="webcam", streaming=True, label="Webcam Input", type="numpy")
|
199 |
analysis_freq_slider = gr.Slider(minimum=0.5, maximum=5.0, step=0.5, value=1.0, label="Analysis Frequency (s)")
|
200 |
analyze_checkbox = gr.Checkbox(value=True, label="Enable Analysis Calculation")
|
201 |
+
# status_text = gr.Markdown("Status: Analysis Enabled" if analyze_checkbox.value else "Status: Analysis Paused") # This won't update live easily
|
|
|
|
|
|
|
202 |
with gr.Column(scale=1):
|
203 |
processed_output = gr.Image(label="Processed Feed", type="numpy")
|
204 |
metrics_plot_output = gr.Plot(label="Estimated Metrics")
|
205 |
+
dataframe_output = gr.Dataframe(label="Data Log", headers=['timestamp'] + metrics, wrap=True) # Removed height
|
206 |
|
207 |
+
# --- Add Export Button and File Output ---
|
208 |
+
with gr.Row():
|
209 |
+
with gr.Column(scale=1):
|
210 |
+
export_button = gr.Button("Export Data to CSV")
|
211 |
+
with gr.Column(scale=2):
|
212 |
+
download_file_output = gr.File(label="Download CSV Log")
|
213 |
|
214 |
+
# --- Define Stream Processing ---
|
215 |
webcam_input.stream(
|
216 |
fn=process_frame,
|
217 |
inputs=[
|
218 |
+
webcam_input, analysis_freq_slider, analyze_checkbox,
|
219 |
+
metrics_data, last_analysis_time, latest_metrics, latest_landmarks
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
],
|
221 |
outputs=[
|
222 |
+
processed_output, metrics_plot_output, dataframe_output,
|
223 |
+
metrics_data, last_analysis_time, latest_metrics, latest_landmarks
|
224 |
+
],
|
225 |
+
# api_name="stream_frames" # Optional: Add API endpoint name
|
226 |
+
)
|
227 |
+
|
228 |
+
# --- Define Button Click Action ---
|
229 |
+
export_button.click(
|
230 |
+
fn=export_csv,
|
231 |
+
inputs=[metrics_data], # Pass the DataFrame state to the export function
|
232 |
+
outputs=[download_file_output], # Output the file path to the File component
|
233 |
+
# api_name="export_data" # Optional: Add API endpoint name
|
234 |
)
|
235 |
|
236 |
# --- Launch the App ---
|
237 |
if __name__ == "__main__":
|
238 |
if face_mesh is None:
|
239 |
+
print("WARNING: MediaPipe Face Mesh could not be initialized. Facial analysis will not work.")
|
240 |
+
iface.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|