clockclock commited on
Commit
ab34e07
·
verified ·
1 Parent(s): cd63cff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -51
app.py CHANGED
@@ -29,85 +29,80 @@ class EnhancedAIvsRealGazeAnalyzer:
29
  self.model = None
30
  self.scaler = None
31
  self.feature_names = []
32
- self.et_id_col = 'Participant name'
33
 
34
- def load_and_process_data(self, base_path, response_file):
35
- print("Loading and processing aggregated and raw fixation data...")
36
- self.response_data = pd.read_excel(response_file) if response_file.endswith('.xlsx') else pd.read_csv(response_file)
37
- self.response_data.columns = self.response_data.columns.str.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
 
 
 
 
 
 
39
  all_metrics_dfs = []
40
- # Load both aggregated metrics and raw fixations
41
  for q in self.questions:
42
  file_path = f"{base_path}/Filtered_GenAI_Metrics_cleaned_{q}.xlsx"
 
43
  if os.path.exists(file_path):
44
  xls = pd.ExcelFile(file_path)
 
 
45
  metrics_df = pd.read_excel(xls, sheet_name=0)
 
46
  metrics_df['Question'] = q
47
  all_metrics_dfs.append(metrics_df)
48
 
 
49
  if 'Fixation-based AOI' in xls.sheet_names:
50
  fix_df = pd.read_excel(xls, sheet_name='Fixation-based AOI')
51
- fix_df['Question'] = q
52
  fix_df.dropna(subset=['Fixation point X', 'Fixation point Y', 'Gaze event duration (ms)'], inplace=True)
53
- # Use a local variable here to avoid confusion
54
- fix_et_id_col = next((c for c in fix_df.columns if 'participant' in c.lower()), None)
55
- if fix_et_id_col:
56
- for participant, group in fix_df.groupby(fix_et_id_col):
57
- self.fixation_data[(str(participant), q)] = group.reset_index(drop=True)
58
-
59
  if not all_metrics_dfs: raise ValueError("No aggregated metrics files were found.")
60
  self.combined_data = pd.concat(all_metrics_dfs, ignore_index=True)
61
- self.combined_data.columns = self.combined_data.columns.str.strip()
62
-
63
- # --- THIS IS THE KEY FIX ---
64
- # 1. Dynamically find the participant ID column in the COMBINED metrics data.
65
- self.et_id_col = next((c for c in self.combined_data.columns if 'participant' in c.lower()), None)
66
- if not self.et_id_col: raise KeyError("Could not find a 'participant' column in the aggregated metrics data.")
67
-
68
- # 2. Dynamically find the participant ID column in the RESPONSE data.
69
- resp_id_col = next((c for c in self.response_data.columns if 'participant' in c.lower()), None)
70
- if not resp_id_col: raise KeyError("Could not find a 'participant' column in the response sheet.")
71
- # --- END OF FIX ---
72
-
73
- for pair, ans in self.correct_answers.items():
74
- if pair in self.response_data.columns:
75
- self.response_data[f'{pair}_Correct'] = (self.response_data[pair].astype(str).str.strip().str.upper() == ans)
76
 
77
- response_long = self.response_data.melt(id_vars=[resp_id_col], value_vars=self.correct_answers.keys(), var_name='Pair')
78
- correctness_long = self.response_data.melt(id_vars=[resp_id_col], value_vars=[f'{p}_Correct' for p in self.correct_answers.keys()], var_name='Pair_Correct_Col', value_name='Correct')
79
- correctness_long['Pair'] = correctness_long['Pair_Correct_Col'].str.replace('_Correct', '')
80
- response_long = response_long.merge(correctness_long[[resp_id_col, 'Pair', 'Correct']], on=[resp_id_col, 'Pair'])
81
  q_to_pair = {f'Q{i+1}': f'Pair{i+1}' for i in range(6)}
82
  self.combined_data['Pair'] = self.combined_data['Question'].map(q_to_pair)
83
-
84
- # 3. Perform the merge using the correctly identified column names.
85
- self.combined_data = self.combined_data.merge(response_long, left_on=[self.et_id_col, 'Pair'], right_on=[resp_id_col, 'Pair'], how='left')
86
  self.combined_data['Answer_Correctness'] = self.combined_data['Correct'].map({True: 'Correct', False: 'Incorrect'})
87
 
 
88
  self.numeric_cols = self.combined_data.select_dtypes(include=np.number).columns.tolist()
89
  self.time_metrics = [c for c in self.numeric_cols if any(k in c.lower() for k in ['time', 'duration', 'fixation'])]
90
- self.participant_list = sorted([str(p) for p in self.combined_data[self.et_id_col].unique()])
91
- print("Data loading complete.")
92
  return self
93
 
94
- def analyze_rq1_metric(self, metric):
95
- if not metric: return None, "Metric not found."
96
- correct = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Correct', metric].dropna()
97
- incorrect = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Incorrect', metric].dropna()
98
- t_stat, p_val = stats.ttest_ind(incorrect, correct, equal_var=False, nan_policy='omit')
99
- fig, ax = plt.subplots(figsize=(8, 6)); sns.boxplot(data=self.combined_data, x='Answer_Correctness', y=metric, ax=ax, palette=['#66b3ff','#ff9999']); ax.set_title(f'Comparison of "{metric}" by Answer Correctness', fontsize=14); ax.set_xlabel("Answer Correctness"); ax.set_ylabel(metric); plt.tight_layout()
100
- summary = f"""### Analysis for: **{metric}**\n- **Mean (Correct Answers):** {correct.mean():.4f}\n- **Mean (Incorrect Answers):** {incorrect.mean():.4f}\n- **T-test p-value:** {p_val:.4f}\n\n**Conclusion:**\n- {'There is a **statistically significant** difference (p < 0.05).' if p_val < 0.05 else 'There is **no statistically significant** difference (p >= 0.05).'}"""
101
- return fig, summary
102
-
103
  def run_prediction_model(self, test_size, n_estimators):
104
- leaky_features = ['Total_Correct', 'Overall_Accuracy', 'Correct', self.et_id_col]
105
  self.feature_names = [col for col in self.numeric_cols if col not in leaky_features and col in self.combined_data.columns]
106
  features = self.combined_data[self.feature_names].copy()
107
  target = self.combined_data['Answer_Correctness'].map({'Correct': 1, 'Incorrect': 0})
108
  valid_indices = target.notna()
109
  features, target = features[valid_indices], target[valid_indices]
110
  features = features.fillna(features.median()).fillna(0)
 
111
  X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=test_size, random_state=42, stratify=target)
112
  self.scaler = StandardScaler().fit(X_train)
113
  X_train_scaled = self.scaler.transform(X_train)
@@ -119,7 +114,7 @@ class EnhancedAIvsRealGazeAnalyzer:
119
  feature_importance = pd.DataFrame({'Feature': self.feature_names, 'Importance': self.model.feature_importances_}).sort_values('Importance', ascending=False).head(15)
120
  fig, ax = plt.subplots(figsize=(10, 8)); sns.barplot(data=feature_importance, x='Importance', y='Feature', ax=ax, palette='viridis'); ax.set_title(f'Top 15 Predictive Features (n_estimators={n_estimators})', fontsize=14); plt.tight_layout()
121
  return summary_md, report_df, fig
122
-
123
  def _recalculate_features_from_fixations(self, fixations_df):
124
  feature_vector = pd.Series(0.0, index=self.feature_names)
125
  if fixations_df.empty: return feature_vector.fillna(0).values.reshape(1, -1)
@@ -163,9 +158,20 @@ class EnhancedAIvsRealGazeAnalyzer:
163
  ax2.axvline(0.5, color='black', linestyle='--', linewidth=1)
164
  ax2.text(prob_correct, 0, f" {prob_correct:.1%} ", va='center', ha='left' if prob_correct < 0.9 else 'right', color='white', weight='bold')
165
  plt.tight_layout(rect=[0, 0, 1, 0.95])
166
- trial_info = self.combined_data[(self.combined_data[self.et_id_col].astype(str) == str(participant)) & (self.combined_data['Question'] == question)].iloc[0]
167
  summary_text = f"**Actual Answer:** `{trial_info['Answer_Correctness']}`"
168
  return summary_text, fig, gr.Slider(maximum=slider_max, value=fixation_num, interactive=True)
 
 
 
 
 
 
 
 
 
 
 
169
 
170
  # --- DATA SETUP & GRADIO APP ---
171
  def setup_and_load_data():
@@ -174,8 +180,8 @@ def setup_and_load_data():
174
  if not os.path.exists(repo_dir): git.Repo.clone_from(repo_url, repo_dir)
175
  else: print("Data repository already exists.")
176
  base_path = repo_dir
177
- response_file = os.path.join(repo_dir, "GenAI Response.xlsx")
178
- analyzer = EnhancedAIvsRealGazeAnalyzer().load_and_process_data(base_path, response_file)
179
  return analyzer
180
 
181
  analyzer = setup_and_load_data()
 
29
  self.model = None
30
  self.scaler = None
31
  self.feature_names = []
 
32
 
33
+ def _find_and_standardize_participant_col(self, df, filename):
34
+ """Finds, renames, and type-converts the participant ID column."""
35
+ participant_col = next((c for c in df.columns if 'participant' in str(c).lower()), None)
36
+ if not participant_col:
37
+ raise ValueError(f"Could not find a 'participant' column in the file: {filename}")
38
+ df.rename(columns={participant_col: 'participant_id'}, inplace=True)
39
+ df['participant_id'] = df['participant_id'].astype(str)
40
+ return df
41
+
42
+ def load_and_process_data(self, base_path, response_file_path):
43
+ print("--- Starting Robust Data Loading ---")
44
+ # 1. Load and Standardize Response Data
45
+ print("Loading response sheet...")
46
+ response_df = pd.read_excel(response_file_path)
47
+ response_df = self._find_and_standardize_participant_col(response_df, "GenAI Response.xlsx")
48
+ for pair, ans in self.correct_answers.items():
49
+ if pair in response_df.columns:
50
+ response_df[f'{pair}_Correct'] = (response_df[pair].astype(str).str.strip().str.upper() == ans)
51
 
52
+ response_long = response_df.melt(id_vars=['participant_id'], value_vars=self.correct_answers.keys(), var_name='Pair')
53
+ correctness_long = response_df.melt(id_vars=['participant_id'], value_vars=[f'{p}_Correct' for p in self.correct_answers.keys()], var_name='Pair_Correct_Col', value_name='Correct')
54
+ correctness_long['Pair'] = correctness_long['Pair_Correct_Col'].str.replace('_Correct', '')
55
+ response_long = response_long.merge(correctness_long[['participant_id', 'Pair', 'Correct']], on=['participant_id', 'Pair'])
56
+
57
+ # 2. Load and Standardize Metrics & Fixation Data
58
  all_metrics_dfs = []
 
59
  for q in self.questions:
60
  file_path = f"{base_path}/Filtered_GenAI_Metrics_cleaned_{q}.xlsx"
61
+ print(f"Processing {file_path}...")
62
  if os.path.exists(file_path):
63
  xls = pd.ExcelFile(file_path)
64
+
65
+ # Metrics Data
66
  metrics_df = pd.read_excel(xls, sheet_name=0)
67
+ metrics_df = self._find_and_standardize_participant_col(metrics_df, f"{q} Metrics")
68
  metrics_df['Question'] = q
69
  all_metrics_dfs.append(metrics_df)
70
 
71
+ # Fixation Data
72
  if 'Fixation-based AOI' in xls.sheet_names:
73
  fix_df = pd.read_excel(xls, sheet_name='Fixation-based AOI')
74
+ fix_df = self._find_and_standardize_participant_col(fix_df, f"{q} Fixations")
75
  fix_df.dropna(subset=['Fixation point X', 'Fixation point Y', 'Gaze event duration (ms)'], inplace=True)
76
+ fix_df['Question'] = q
77
+ for participant, group in fix_df.groupby('participant_id'):
78
+ self.fixation_data[(participant, q)] = group.reset_index(drop=True)
79
+
 
 
80
  if not all_metrics_dfs: raise ValueError("No aggregated metrics files were found.")
81
  self.combined_data = pd.concat(all_metrics_dfs, ignore_index=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ # 3. Merge with Confidence
84
+ print("Merging all data sources...")
 
 
85
  q_to_pair = {f'Q{i+1}': f'Pair{i+1}' for i in range(6)}
86
  self.combined_data['Pair'] = self.combined_data['Question'].map(q_to_pair)
87
+ self.combined_data = self.combined_data.merge(response_long, on=['participant_id', 'Pair'], how='left')
 
 
88
  self.combined_data['Answer_Correctness'] = self.combined_data['Correct'].map({True: 'Correct', False: 'Incorrect'})
89
 
90
+ # 4. Finalize class attributes
91
  self.numeric_cols = self.combined_data.select_dtypes(include=np.number).columns.tolist()
92
  self.time_metrics = [c for c in self.numeric_cols if any(k in c.lower() for k in ['time', 'duration', 'fixation'])]
93
+ self.participant_list = sorted(self.combined_data['participant_id'].unique().tolist())
94
+ print("--- Data Loading Successful ---")
95
  return self
96
 
 
 
 
 
 
 
 
 
 
97
  def run_prediction_model(self, test_size, n_estimators):
98
+ leaky_features = ['Total_Correct', 'Overall_Accuracy', 'Correct', 'participant_id']
99
  self.feature_names = [col for col in self.numeric_cols if col not in leaky_features and col in self.combined_data.columns]
100
  features = self.combined_data[self.feature_names].copy()
101
  target = self.combined_data['Answer_Correctness'].map({'Correct': 1, 'Incorrect': 0})
102
  valid_indices = target.notna()
103
  features, target = features[valid_indices], target[valid_indices]
104
  features = features.fillna(features.median()).fillna(0)
105
+ if len(target.unique()) < 2: return "Not enough data to train model.", None, None
106
  X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=test_size, random_state=42, stratify=target)
107
  self.scaler = StandardScaler().fit(X_train)
108
  X_train_scaled = self.scaler.transform(X_train)
 
114
  feature_importance = pd.DataFrame({'Feature': self.feature_names, 'Importance': self.model.feature_importances_}).sort_values('Importance', ascending=False).head(15)
115
  fig, ax = plt.subplots(figsize=(10, 8)); sns.barplot(data=feature_importance, x='Importance', y='Feature', ax=ax, palette='viridis'); ax.set_title(f'Top 15 Predictive Features (n_estimators={n_estimators})', fontsize=14); plt.tight_layout()
116
  return summary_md, report_df, fig
117
+
118
  def _recalculate_features_from_fixations(self, fixations_df):
119
  feature_vector = pd.Series(0.0, index=self.feature_names)
120
  if fixations_df.empty: return feature_vector.fillna(0).values.reshape(1, -1)
 
158
  ax2.axvline(0.5, color='black', linestyle='--', linewidth=1)
159
  ax2.text(prob_correct, 0, f" {prob_correct:.1%} ", va='center', ha='left' if prob_correct < 0.9 else 'right', color='white', weight='bold')
160
  plt.tight_layout(rect=[0, 0, 1, 0.95])
161
+ trial_info = self.combined_data[(self.combined_data['participant_id'] == str(participant)) & (self.combined_data['Question'] == question)].iloc[0]
162
  summary_text = f"**Actual Answer:** `{trial_info['Answer_Correctness']}`"
163
  return summary_text, fig, gr.Slider(maximum=slider_max, value=fixation_num, interactive=True)
164
+
165
+ def analyze_rq1_metric(self, metric): # Added this back just in case
166
+ if not metric or metric not in self.combined_data.columns: return None, "Metric not found."
167
+ correct = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Correct', metric].dropna()
168
+ incorrect = self.combined_data.loc[self.combined_data['Answer_Correctness'] == 'Incorrect', metric].dropna()
169
+ if len(correct) < 2 or len(incorrect) < 2: return None, "Not enough data for both groups to compare."
170
+ t_stat, p_val = stats.ttest_ind(incorrect, correct, equal_var=False, nan_policy='omit')
171
+ fig, ax = plt.subplots(figsize=(8, 6)); sns.boxplot(data=self.combined_data, x='Answer_Correctness', y=metric, ax=ax, palette=['#66b3ff','#ff9999']); ax.set_title(f'Comparison of "{metric}" by Answer Correctness', fontsize=14); ax.set_xlabel("Answer Correctness"); ax.set_ylabel(metric); plt.tight_layout()
172
+ summary = f"""### Analysis for: **{metric}**\n- **Mean (Correct Answers):** {correct.mean():.4f}\n- **Mean (Incorrect Answers):** {incorrect.mean():.4f}\n- **T-test p-value:** {p_val:.4f}\n\n**Conclusion:**\n- {'There is a **statistically significant** difference (p < 0.05).' if p_val < 0.05 else 'There is **no statistically significant** difference (p >= 0.05).'}"""
173
+ return fig, summary
174
+
175
 
176
  # --- DATA SETUP & GRADIO APP ---
177
  def setup_and_load_data():
 
180
  if not os.path.exists(repo_dir): git.Repo.clone_from(repo_url, repo_dir)
181
  else: print("Data repository already exists.")
182
  base_path = repo_dir
183
+ response_file_path = os.path.join(repo_dir, "GenAI Response.xlsx")
184
+ analyzer = EnhancedAIvsRealGazeAnalyzer().load_and_process_data(base_path, response_file_path)
185
  return analyzer
186
 
187
  analyzer = setup_and_load_data()