hiyata commited on
Commit
2ed8007
·
verified ·
1 Parent(s): af0534f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -93,12 +93,11 @@ def calculate_shap_values(model, x_tensor):
93
  background = torch.zeros((300, x_tensor.shape[1]), device=device)
94
  explainer = shap.DeepExplainer(model, background)
95
  shap_values_all = explainer.shap_values(x_tensor)
96
- # Get SHAP values for human class (index 1)
97
  shap_values = shap_values_all[1][0]
98
  except Exception as e:
99
  print(f"DeepExplainer failed, falling back to KernelExplainer: {str(e)}")
100
 
101
- # Define a wrapper that ensures input is a 2D tensor
102
  def model_predict(x):
103
  if not isinstance(x, np.ndarray):
104
  x = np.array(x)
@@ -107,17 +106,19 @@ def calculate_shap_values(model, x_tensor):
107
  with torch.no_grad():
108
  tensor_x = torch.tensor(x, dtype=torch.float, device=device)
109
  output = model(tensor_x)
110
- probs = torch.softmax(output, dim=1)[:, 1] # Human probability
111
  return probs.cpu().numpy()
112
 
113
- # Use a numpy background with 300 samples
114
  background = np.zeros((300, x_tensor.shape[1]))
115
  explainer = shap.KernelExplainer(model_predict, background)
116
  x_numpy = x_tensor.cpu().numpy()
117
- # Increase nsamples to 1000 to provide enough data for regression
118
  shap_values = explainer.shap_values(x_numpy, nsamples=1000)
 
 
 
119
 
120
- # Get human probability from the model prediction
121
  with torch.no_grad():
122
  output = model(x_tensor)
123
  probs = torch.softmax(output, dim=1)
@@ -125,6 +126,7 @@ def calculate_shap_values(model, x_tensor):
125
 
126
  return np.array(shap_values), prob_human
127
 
 
128
  ###############################################################################
129
  # 4. PER-BASE SHAP AGGREGATION
130
  ###############################################################################
 
93
  background = torch.zeros((300, x_tensor.shape[1]), device=device)
94
  explainer = shap.DeepExplainer(model, background)
95
  shap_values_all = explainer.shap_values(x_tensor)
96
+ # For binary classification, use the second output and then the first sample
97
  shap_values = shap_values_all[1][0]
98
  except Exception as e:
99
  print(f"DeepExplainer failed, falling back to KernelExplainer: {str(e)}")
100
 
 
101
  def model_predict(x):
102
  if not isinstance(x, np.ndarray):
103
  x = np.array(x)
 
106
  with torch.no_grad():
107
  tensor_x = torch.tensor(x, dtype=torch.float, device=device)
108
  output = model(tensor_x)
109
+ probs = torch.softmax(output, dim=1)[:, 1]
110
  return probs.cpu().numpy()
111
 
112
+ # Use a numpy background for KernelExplainer
113
  background = np.zeros((300, x_tensor.shape[1]))
114
  explainer = shap.KernelExplainer(model_predict, background)
115
  x_numpy = x_tensor.cpu().numpy()
 
116
  shap_values = explainer.shap_values(x_numpy, nsamples=1000)
117
+ # If KernelExplainer returns a list, take its first element.
118
+ if isinstance(shap_values, list):
119
+ shap_values = shap_values[0]
120
 
121
+ # Get human probability from model prediction
122
  with torch.no_grad():
123
  output = model(x_tensor)
124
  probs = torch.softmax(output, dim=1)
 
126
 
127
  return np.array(shap_values), prob_human
128
 
129
+
130
  ###############################################################################
131
  # 4. PER-BASE SHAP AGGREGATION
132
  ###############################################################################