clockclock commited on
Commit
aad4cfe
·
verified ·
1 Parent(s): cac722b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -130
app.py CHANGED
@@ -1,8 +1,7 @@
1
- # app.py (Corrected and Ready to Run)
2
  import pandas as pd
3
  import numpy as np
4
  import matplotlib.pyplot as plt
5
- import matplotlib.patches as patches
6
  import seaborn as sns
7
  from scipy import stats
8
  from sklearn.preprocessing import StandardScaler
@@ -24,13 +23,11 @@ class EnhancedAIvsRealGazeAnalyzer:
24
  self.questions = ['Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q6']
25
  self.correct_answers = {'Pair1': 'B', 'Pair2': 'B', 'Pair3': 'B', 'Pair4': 'B', 'Pair5': 'B', 'Pair6': 'B'}
26
  self.combined_data = None
27
- self.fixation_data = {}
28
- self.valid_playback_participants = []
29
- self.valid_playback_trials = {}
30
  self.model = None
31
  self.scaler = None
32
  self.feature_names = []
33
- self.time_metrics = [] # Initialize here
 
34
 
35
  def _find_and_standardize_participant_col(self, df, filename):
36
  participant_col = next((c for c in df.columns if 'participant' in str(c).lower()), None)
@@ -57,25 +54,10 @@ class EnhancedAIvsRealGazeAnalyzer:
57
  file_path = f"{base_path}/Filtered_GenAI_Metrics_cleaned_{q}.xlsx"
58
  if os.path.exists(file_path):
59
  print(f"Processing {file_path}...")
60
- xls = pd.ExcelFile(file_path)
61
- metrics_df = pd.read_excel(xls, sheet_name=0)
62
  metrics_df = self._find_and_standardize_participant_col(metrics_df, f"{q} Metrics")
63
  metrics_df['Question'] = q
64
  all_metrics_dfs.append(metrics_df)
65
-
66
- if len(xls.sheet_names) > 1:
67
- try:
68
- fix_df = pd.read_excel(xls, sheet_name=1)
69
- fix_df = self._find_and_standardize_participant_col(fix_df, f"{q} Fixations")
70
- fix_df.dropna(subset=['Fixation point X', 'Fixation point Y', 'Gaze event duration (ms)'], inplace=True)
71
- for participant_id, group in fix_df.groupby('participant_id'):
72
- self.fixation_data[(participant_id, q)] = group.reset_index(drop=True)
73
- if participant_id not in self.valid_playback_trials:
74
- self.valid_playback_trials[participant_id] = []
75
- self.valid_playback_trials[participant_id].append(q)
76
- print(f" -> Successfully loaded {len(fix_df)} fixations for {q}.")
77
- except Exception as e:
78
- print(f" -> WARNING: Could not load fixation sheet for {q}. Error: {e}")
79
 
80
  if not all_metrics_dfs: raise ValueError("No aggregated metrics files were found.")
81
  self.combined_data = pd.concat(all_metrics_dfs, ignore_index=True)
@@ -85,12 +67,9 @@ class EnhancedAIvsRealGazeAnalyzer:
85
  self.combined_data['Answer_Correctness'] = self.combined_data['Correct'].map({True: 'Correct', False: 'Incorrect'})
86
 
87
  self.numeric_cols = self.combined_data.select_dtypes(include=np.number).columns.tolist()
88
-
89
- # <<< FIX: Removed the space in the variable name here >>>
90
  self.time_metrics = [c for c in self.numeric_cols if any(k in c.lower() for k in ['time', 'duration', 'fixation'])]
91
 
92
- self.valid_playback_participants = sorted(list(self.valid_playback_trials.keys()))
93
- print(f"--- Data Loading Successful. Found {len(self.valid_playback_participants)} participants with fixation data. ---")
94
  return self
95
 
96
  def run_prediction_model(self, test_size, n_estimators):
@@ -111,62 +90,10 @@ class EnhancedAIvsRealGazeAnalyzer:
111
  report_df = pd.DataFrame(report).transpose().round(3)
112
  feature_importance = pd.DataFrame({'Feature': self.feature_names, 'Importance': self.model.feature_importances_}).sort_values('Importance', ascending=False).head(15)
113
  fig, ax = plt.subplots(figsize=(10, 8)); sns.barplot(data=feature_importance, x='Importance', y='Feature', ax=ax, palette='viridis'); ax.set_title(f'Top 15 Predictive Features (n_estimators={int(n_estimators)})', fontsize=14); plt.tight_layout()
114
- return summary_md, report_df, fig, gr.Markdown("✅ **Model is ready!** You can now use the Gaze Playback tab.")
115
-
116
- def _recalculate_features_from_fixations(self, fixations_df):
117
- feature_vector = pd.Series(0.0, index=self.feature_names)
118
- if fixations_df.empty: return feature_vector.fillna(0).values.reshape(1, -1)
119
- if 'AOI name' in fixations_df.columns:
120
- for aoi_name, group in fixations_df.groupby('AOI name'):
121
- col_name = f'Total fixation duration on {aoi_name}'
122
- if col_name in feature_vector.index:
123
- feature_vector[col_name] = group['Gaze event duration (ms)'].sum()
124
- feature_vector['Total Recording Duration'] = fixations_df['Gaze event duration (ms)'].sum()
125
- return feature_vector.fillna(0).values.reshape(1, -1)
126
-
127
- def generate_gaze_playback(self, participant, question, fixation_num):
128
- if self.model is None: return "Please train a model in Tab 2 first.", None, gr.Slider(interactive=False)
129
- trial_key = (str(participant), question)
130
- if not participant or not question or trial_key not in self.fixation_data:
131
- return "Please select a valid trial.", None, gr.Slider(interactive=False, value=0)
132
-
133
- all_fixations = self.fixation_data[trial_key]
134
- fixation_num = int(fixation_num)
135
- slider_max = len(all_fixations)
136
- if fixation_num > slider_max: fixation_num = slider_max
137
- current_fixations = all_fixations.iloc[:fixation_num]
138
-
139
- partial_features = self._recalculate_features_from_fixations(current_fixations)
140
- prediction_prob = self.model.predict_proba(self.scaler.transform(partial_features))[0]
141
- prob_correct = prediction_prob[1]
142
-
143
- fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), gridspec_kw={'height_ratios': [4, 1]})
144
- fig.suptitle(f"Gaze Playback for {participant} - {question}", fontsize=16, weight='bold')
145
- ax1.set_title(f"Displaying Fixations 1 through {fixation_num}/{slider_max}")
146
- ax1.set_xlim(0, 1920); ax1.set_ylim(1080, 0)
147
- ax1.set_aspect('equal'); ax1.tick_params(left=False, right=False, bottom=False, top=False, labelleft=False, labelbottom=False)
148
- ax1.add_patch(patches.Rectangle((0, 0), 1920/2, 1080, facecolor='#e0e0e0'))
149
- ax1.add_patch(patches.Rectangle((1920/2, 0), 1920/2, 1080, facecolor='#f0f0f0'))
150
- ax1.text(1920*0.25, 50, "Image A", ha='center', fontsize=14, alpha=0.7)
151
- ax1.text(1920*0.75, 50, "Image B", ha='center', fontsize=14, alpha=0.7)
152
- if not current_fixations.empty:
153
- points = current_fixations[['Fixation point X', 'Fixation point Y']]
154
- ax1.plot(points['Fixation point X'], points['Fixation point Y'], marker='o', color='grey', alpha=0.5, linestyle='-')
155
- ax1.scatter(points.iloc[-1]['Fixation point X'], points.iloc[-1]['Fixation point Y'], s=200, c='red', zorder=10, edgecolors='black', lw=2)
156
 
157
- ax2.set_xlim(0, 1); ax2.set_yticks([])
158
- ax2.set_title("Live Prediction Confidence (Answer is 'Correct')")
159
- bar_color = 'green' if prob_correct > 0.5 else 'red'
160
- ax2.barh([0], [prob_correct], color=bar_color, height=0.5, edgecolor='black')
161
- ax2.axvline(0.5, color='black', linestyle='--', linewidth=1)
162
- ax2.text(prob_correct, 0, f" {prob_correct:.1%} ", va='center', ha='left' if prob_correct < 0.9 else 'right', color='white', weight='bold', fontsize=12)
163
- plt.tight_layout(rect=[0, 0, 1, 0.95])
164
-
165
- trial_info = self.combined_data[(self.combined_data['participant_id'] == str(participant)) & (self.combined_data['Question'] == question)].iloc[0]
166
- summary_text = f"**Actual Answer:** `{trial_info['Answer_Correctness']}`"
167
-
168
- return summary_text, fig, gr.Slider(maximum=slider_max, value=fixation_num, interactive=True, step=1, minimum=0)
169
-
170
  def analyze_rq1_metric(self, metric):
171
  if not metric or metric not in self.combined_data.columns: return None, "Metric not found."
172
  correct = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Correct', metric].dropna()
@@ -176,18 +103,6 @@ class EnhancedAIvsRealGazeAnalyzer:
176
  fig, ax = plt.subplots(figsize=(8, 6)); sns.boxplot(data=self.combined_data, x='Answer_Correctness', y=metric, ax=ax, palette=['#66b3ff','#ff9999']); ax.set_title(f'Comparison of "{metric}" by Answer Correctness', fontsize=14); ax.set_xlabel("Answer Correctness"); ax.set_ylabel(metric); plt.tight_layout()
177
  summary = f"""### Analysis for: **{metric}**\n- **Mean (Correct Answers):** {correct.mean():.4f}\n- **Mean (Incorrect Answers):** {incorrect.mean():.4f}\n- **T-test p-value:** {p_val:.4f}\n\n**Conclusion:**\n- {'There is a **statistically significant** difference (p < 0.05).' if p_val < 0.05 else 'There is **no statistically significant** difference (p >= 0.05).'}"""
178
  return fig, summary
179
-
180
- def update_question_dropdown(self, participant):
181
- """Dynamically updates the question dropdown based on the selected participant."""
182
- valid_questions = self.valid_playback_trials.get(participant, [])
183
- return gr.Dropdown(choices=sorted(valid_questions), interactive=True, value=None, label="2. Select a Question")
184
-
185
- def handle_new_trial_selection(self, participant, question):
186
- """Called when a new trial is selected. Resets the view to the first fixation."""
187
- if not participant or not question:
188
- return "Select a trial to begin.", None, gr.Slider(value=0, interactive=False)
189
- initial_fixation_num = 1
190
- return self.generate_gaze_playback(participant, question, initial_fixation_num)
191
 
192
  # --- DATA SETUP & GRADIO APP ---
193
  def setup_and_load_data():
@@ -222,60 +137,26 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
222
  gr.Markdown("#### Tune Model Hyperparameters")
223
  rq2_test_size_slider=gr.Slider(minimum=0.1, maximum=0.5, step=0.05, value=0.3, label="Test Set Size")
224
  rq2_estimators_slider=gr.Slider(minimum=10, maximum=200, step=10, value=100, label="Number of Trees")
225
- rq2_status = gr.Markdown("Train a model to enable the Gaze Playback tab.")
 
226
  with gr.Column(scale=2):
227
  rq2_summary_output=gr.Markdown(label="Model Performance Summary")
228
  rq2_table_output=gr.Dataframe(label="Classification Report", interactive=False)
229
  rq2_plot_output=gr.Plot(label="Feature Importance")
230
 
231
- with gr.TabItem("👁️ Gaze Playback & Real-Time Prediction"):
232
- gr.Markdown("### See the Prediction Evolve with Every Glance!")
233
- with gr.Row():
234
- with gr.Column(scale=1):
235
- playback_participant=gr.Dropdown(choices=analyzer.valid_playback_participants, label="1. Select a Participant")
236
- playback_question=gr.Dropdown(choices=[], label="2. Select a Question", interactive=False)
237
- gr.Markdown("3. Use the slider to play back fixations one by one.")
238
- playback_slider=gr.Slider(minimum=0, maximum=1, step=1, value=0, label="Fixation Number", interactive=False)
239
- playback_summary=gr.Markdown(label="Trial Info")
240
- with gr.Column(scale=2):
241
- playback_plot=gr.Plot(label="Gaze Playback & Live Prediction")
242
-
243
  # --- WIRING FOR ALL TABS ---
244
  outputs_rq2 = [rq2_summary_output, rq2_table_output, rq2_plot_output, rq2_status]
245
- outputs_playback = [playback_summary, playback_plot, playback_slider]
246
 
247
  rq1_metric_dropdown.change(fn=analyzer.analyze_rq1_metric, inputs=rq1_metric_dropdown, outputs=[rq1_plot_output, rq1_summary_output])
248
 
249
- train_event = rq2_test_size_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
250
  rq2_estimators_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
251
-
252
- playback_participant.change(
253
- fn=analyzer.update_question_dropdown,
254
- inputs=playback_participant,
255
- outputs=playback_question
256
- )
257
-
258
- playback_question.change(
259
- fn=analyzer.handle_new_trial_selection,
260
- inputs=[playback_participant, playback_question],
261
- outputs=outputs_playback
262
- )
263
-
264
- playback_slider.release(
265
- fn=analyzer.generate_gaze_playback,
266
- inputs=[playback_participant, playback_question, playback_slider],
267
- outputs=outputs_playback
268
- )
269
 
270
  # Pre-load the initial state of the dashboard
271
  def initial_load():
272
- # Load the first tab's content
273
  rq1_fig, rq1_summary = analyzer.analyze_rq1_metric(analyzer.time_metrics[0] if analyzer.time_metrics else None)
274
-
275
- # Train the initial model for the second tab
276
  model_summary, report_df, feature_fig, status_md = analyzer.run_prediction_model(0.3, 100)
277
 
278
- # Return all the values needed to populate the outputs on load
279
  return {
280
  rq1_plot_output: rq1_fig,
281
  rq1_summary_output: rq1_summary,
 
1
+ # app.py
2
  import pandas as pd
3
  import numpy as np
4
  import matplotlib.pyplot as plt
 
5
  import seaborn as sns
6
  from scipy import stats
7
  from sklearn.preprocessing import StandardScaler
 
23
  self.questions = ['Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q6']
24
  self.correct_answers = {'Pair1': 'B', 'Pair2': 'B', 'Pair3': 'B', 'Pair4': 'B', 'Pair5': 'B', 'Pair6': 'B'}
25
  self.combined_data = None
 
 
 
26
  self.model = None
27
  self.scaler = None
28
  self.feature_names = []
29
+ self.time_metrics = []
30
+ self.numeric_cols = []
31
 
32
  def _find_and_standardize_participant_col(self, df, filename):
33
  participant_col = next((c for c in df.columns if 'participant' in str(c).lower()), None)
 
54
  file_path = f"{base_path}/Filtered_GenAI_Metrics_cleaned_{q}.xlsx"
55
  if os.path.exists(file_path):
56
  print(f"Processing {file_path}...")
57
+ metrics_df = pd.read_excel(file_path, sheet_name=0)
 
58
  metrics_df = self._find_and_standardize_participant_col(metrics_df, f"{q} Metrics")
59
  metrics_df['Question'] = q
60
  all_metrics_dfs.append(metrics_df)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  if not all_metrics_dfs: raise ValueError("No aggregated metrics files were found.")
63
  self.combined_data = pd.concat(all_metrics_dfs, ignore_index=True)
 
67
  self.combined_data['Answer_Correctness'] = self.combined_data['Correct'].map({True: 'Correct', False: 'Incorrect'})
68
 
69
  self.numeric_cols = self.combined_data.select_dtypes(include=np.number).columns.tolist()
 
 
70
  self.time_metrics = [c for c in self.numeric_cols if any(k in c.lower() for k in ['time', 'duration', 'fixation'])]
71
 
72
+ print(f"--- Data Loading Successful ---")
 
73
  return self
74
 
75
  def run_prediction_model(self, test_size, n_estimators):
 
90
  report_df = pd.DataFrame(report).transpose().round(3)
91
  feature_importance = pd.DataFrame({'Feature': self.feature_names, 'Importance': self.model.feature_importances_}).sort_values('Importance', ascending=False).head(15)
92
  fig, ax = plt.subplots(figsize=(10, 8)); sns.barplot(data=feature_importance, x='Importance', y='Feature', ax=ax, palette='viridis'); ax.set_title(f'Top 15 Predictive Features (n_estimators={int(n_estimators)})', fontsize=14); plt.tight_layout()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ # <<< FIX: Updated status message >>>
95
+ return summary_md, report_df, fig, gr.Markdown(" **Model trained successfully.**")
96
+
 
 
 
 
 
 
 
 
 
 
97
  def analyze_rq1_metric(self, metric):
98
  if not metric or metric not in self.combined_data.columns: return None, "Metric not found."
99
  correct = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Correct', metric].dropna()
 
103
  fig, ax = plt.subplots(figsize=(8, 6)); sns.boxplot(data=self.combined_data, x='Answer_Correctness', y=metric, ax=ax, palette=['#66b3ff','#ff9999']); ax.set_title(f'Comparison of "{metric}" by Answer Correctness', fontsize=14); ax.set_xlabel("Answer Correctness"); ax.set_ylabel(metric); plt.tight_layout()
104
  summary = f"""### Analysis for: **{metric}**\n- **Mean (Correct Answers):** {correct.mean():.4f}\n- **Mean (Incorrect Answers):** {incorrect.mean():.4f}\n- **T-test p-value:** {p_val:.4f}\n\n**Conclusion:**\n- {'There is a **statistically significant** difference (p < 0.05).' if p_val < 0.05 else 'There is **no statistically significant** difference (p >= 0.05).'}"""
105
  return fig, summary
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
  # --- DATA SETUP & GRADIO APP ---
108
  def setup_and_load_data():
 
137
  gr.Markdown("#### Tune Model Hyperparameters")
138
  rq2_test_size_slider=gr.Slider(minimum=0.1, maximum=0.5, step=0.05, value=0.3, label="Test Set Size")
139
  rq2_estimators_slider=gr.Slider(minimum=10, maximum=200, step=10, value=100, label="Number of Trees")
140
+ # <<< FIX: Updated initial status message >>>
141
+ rq2_status = gr.Markdown("Train a model to see performance metrics.")
142
  with gr.Column(scale=2):
143
  rq2_summary_output=gr.Markdown(label="Model Performance Summary")
144
  rq2_table_output=gr.Dataframe(label="Classification Report", interactive=False)
145
  rq2_plot_output=gr.Plot(label="Feature Importance")
146
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  # --- WIRING FOR ALL TABS ---
148
  outputs_rq2 = [rq2_summary_output, rq2_table_output, rq2_plot_output, rq2_status]
 
149
 
150
  rq1_metric_dropdown.change(fn=analyzer.analyze_rq1_metric, inputs=rq1_metric_dropdown, outputs=[rq1_plot_output, rq1_summary_output])
151
 
152
+ rq2_test_size_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
153
  rq2_estimators_slider.release(fn=analyzer.run_prediction_model, inputs=[rq2_test_size_slider, rq2_estimators_slider], outputs=outputs_rq2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
  # Pre-load the initial state of the dashboard
156
  def initial_load():
 
157
  rq1_fig, rq1_summary = analyzer.analyze_rq1_metric(analyzer.time_metrics[0] if analyzer.time_metrics else None)
 
 
158
  model_summary, report_df, feature_fig, status_md = analyzer.run_prediction_model(0.3, 100)
159
 
 
160
  return {
161
  rq1_plot_output: rq1_fig,
162
  rq1_summary_output: rq1_summary,