Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -84,25 +84,21 @@ def sequence_to_kmer_vector(sequence: str, k: int = 4) -> np.ndarray:
|
|
84 |
###############################################################################
|
85 |
# 3. SHAP-VALUE (ABLATION) CALCULATION
|
86 |
###############################################################################
|
87 |
-
|
88 |
def calculate_shap_values(model, x_tensor):
|
89 |
model.eval()
|
90 |
device = next(model.parameters()).device
|
91 |
|
92 |
try:
|
93 |
-
# Create background as a torch tensor
|
94 |
background = torch.zeros((300, x_tensor.shape[1]), device=device)
|
95 |
-
|
96 |
-
# Use DeepExplainer with the torch tensor background
|
97 |
explainer = shap.DeepExplainer(model, background)
|
98 |
shap_values_all = explainer.shap_values(x_tensor)
|
99 |
-
|
100 |
# Get SHAP values for human class (index 1)
|
101 |
shap_values = shap_values_all[1][0]
|
102 |
except Exception as e:
|
103 |
print(f"DeepExplainer failed, falling back to KernelExplainer: {str(e)}")
|
104 |
|
105 |
-
# Define a wrapper that
|
106 |
def model_predict(x):
|
107 |
if not isinstance(x, np.ndarray):
|
108 |
x = np.array(x)
|
@@ -114,14 +110,14 @@ def calculate_shap_values(model, x_tensor):
|
|
114 |
probs = torch.softmax(output, dim=1)[:, 1] # Human probability
|
115 |
return probs.cpu().numpy()
|
116 |
|
117 |
-
# Use a numpy background
|
118 |
background = np.zeros((300, x_tensor.shape[1]))
|
119 |
-
|
120 |
explainer = shap.KernelExplainer(model_predict, background)
|
121 |
x_numpy = x_tensor.cpu().numpy()
|
122 |
-
|
|
|
123 |
|
124 |
-
# Get human probability from model prediction
|
125 |
with torch.no_grad():
|
126 |
output = model(x_tensor)
|
127 |
probs = torch.softmax(output, dim=1)
|
|
|
84 |
###############################################################################
|
85 |
# 3. SHAP-VALUE (ABLATION) CALCULATION
|
86 |
###############################################################################
|
|
|
87 |
def calculate_shap_values(model, x_tensor):
|
88 |
model.eval()
|
89 |
device = next(model.parameters()).device
|
90 |
|
91 |
try:
|
92 |
+
# Create background as a torch tensor
|
93 |
background = torch.zeros((300, x_tensor.shape[1]), device=device)
|
|
|
|
|
94 |
explainer = shap.DeepExplainer(model, background)
|
95 |
shap_values_all = explainer.shap_values(x_tensor)
|
|
|
96 |
# Get SHAP values for human class (index 1)
|
97 |
shap_values = shap_values_all[1][0]
|
98 |
except Exception as e:
|
99 |
print(f"DeepExplainer failed, falling back to KernelExplainer: {str(e)}")
|
100 |
|
101 |
+
# Define a wrapper that ensures input is a 2D tensor
|
102 |
def model_predict(x):
|
103 |
if not isinstance(x, np.ndarray):
|
104 |
x = np.array(x)
|
|
|
110 |
probs = torch.softmax(output, dim=1)[:, 1] # Human probability
|
111 |
return probs.cpu().numpy()
|
112 |
|
113 |
+
# Use a numpy background with 300 samples
|
114 |
background = np.zeros((300, x_tensor.shape[1]))
|
|
|
115 |
explainer = shap.KernelExplainer(model_predict, background)
|
116 |
x_numpy = x_tensor.cpu().numpy()
|
117 |
+
# Increase nsamples to 1000 to provide enough data for regression
|
118 |
+
shap_values = explainer.shap_values(x_numpy, nsamples=1000)
|
119 |
|
120 |
+
# Get human probability from the model prediction
|
121 |
with torch.no_grad():
|
122 |
output = model(x_tensor)
|
123 |
probs = torch.softmax(output, dim=1)
|