hiyata commited on
Commit
345980e
·
verified ·
1 Parent(s): a2f7e81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -102
app.py CHANGED
@@ -18,6 +18,8 @@ import tempfile
18
  import os
19
  from typing import List, Dict, Tuple, Optional, Any
20
  import seaborn as sns
 
 
21
 
22
  ###############################################################################
23
  # 1. MODEL DEFINITION
@@ -80,101 +82,55 @@ def sequence_to_kmer_vector(sequence: str, k: int = 4) -> np.ndarray:
80
  return vec
81
 
82
  ###############################################################################
83
- # 3. SHAP-VALUE CALCULATION
84
  ###############################################################################
85
 
86
- import shap
87
- from sklearn.linear_model import Ridge
88
-
89
  def calculate_shap_values(model, x_tensor):
90
- """
91
- Calculate SHAP values with three possible methods:
92
- 1. Try SHAP's GradientExplainer (better for deep models with unsupported layers)
93
- 2. Fall back to SHAP's KernelExplainer with fixed parameters if #1 fails
94
- 3. Fall back to original feature ablation method if both SHAP methods fail
95
- """
96
  model.eval()
97
  device = next(model.parameters()).device
98
 
99
- # Get human probability for baseline
100
- with torch.no_grad():
101
- output = model(x_tensor)
102
- probs = torch.softmax(output, dim=1)
103
- prob_human = probs[0, 1].item()
104
 
105
- # Try GradientExplainer first (better for neural nets with unsupported ops)
106
  try:
107
- # Create synthetic background data (more samples to avoid errors)
108
- background = torch.zeros((20, x_tensor.shape[1]), device=device)
109
- for i in range(20):
110
- # Add small random noise to avoid singular matrices
111
- background[i] = torch.randn_like(x_tensor[0]) * 0.01
112
-
113
- explainer = shap.GradientExplainer(model, background)
114
  shap_values_all = explainer.shap_values(x_tensor)
115
 
116
- # For classification, shap_values is a list of arrays, one for each class
117
- # We want the values for the "human" class (index 1)
118
- if isinstance(shap_values_all, list) and len(shap_values_all) > 1:
119
- shap_values = shap_values_all[1][0].cpu().numpy()
120
- else:
121
- shap_values = shap_values_all[0].cpu().numpy()
122
-
123
- print("Using GradientExplainer for SHAP values")
124
- return np.array(shap_values), prob_human
125
 
126
  except Exception as e:
127
- print(f"GradientExplainer failed: {str(e)}, trying KernelExplainer")
128
 
129
- try:
130
- # Create model wrapper function
131
- def model_predict(x):
132
- with torch.no_grad():
133
- tensor_x = torch.FloatTensor(x).to(device)
134
- output = model(tensor_x)
135
- probs = torch.softmax(output, dim=1)[:, 1] # Human probability
136
- return probs.cpu().numpy()
137
-
138
- # Create more background samples (50 samples with random noise)
139
- background = np.zeros((50, x_tensor.shape[1]))
140
- for i in range(50):
141
- # Small random values to create better background distribution
142
- background[i] = np.random.normal(0, 0.01, x_tensor.shape[1])
143
-
144
- # Force using Ridge regression instead of default LassoLarsIC
145
- explainer = shap.KernelExplainer(
146
- model_predict,
147
- background,
148
- link="identity", # Use raw output, not logit
149
- l1_reg="num_features(10)", # Simplified regularization
150
- model_regressor=Ridge(alpha=0.01) # Use Ridge instead of LassoLarsIC
151
- )
152
-
153
- # Calculate SHAP values with more samples
154
- x_numpy = x_tensor.cpu().numpy()
155
- shap_values = explainer.shap_values(x_numpy, nsamples=300)
156
-
157
- print("Using KernelExplainer for SHAP values")
158
- return np.array(shap_values), prob_human
159
-
160
- except Exception as e:
161
- print(f"KernelExplainer failed: {str(e)}, falling back to ablation method")
162
-
163
- # Fall back to original feature ablation method
164
  with torch.no_grad():
165
- shap_values = []
166
- x_zeroed = x_tensor.clone()
167
- for i in range(x_tensor.shape[1]):
168
- original_val = x_zeroed[0, i].item()
169
- x_zeroed[0, i] = 0.0
170
- output = model(x_zeroed)
171
- probs = torch.softmax(output, dim=1)
172
- prob = probs[0, 1].item()
173
- shap_values.append(prob_human - prob)
174
- x_zeroed[0, i] = original_val
175
-
176
- print("Using ablation method for SHAP values")
177
- return np.array(shap_values), prob_human
 
 
 
 
 
 
 
 
 
 
178
  ###############################################################################
179
  # 4. PER-BASE SHAP AGGREGATION
180
  ###############################################################################
@@ -1061,28 +1017,10 @@ def prepare_csv_download(data, filename="analysis_results.csv"):
1061
  else:
1062
  raise ValueError("Unsupported data type for CSV download")
1063
 
1064
- ###############################################################################
1065
- # 13. EXAMPLE FASTA LOADER
1066
- ###############################################################################
1067
-
1068
- def load_example_fasta():
1069
- """Load the example.fasta file contents"""
1070
- try:
1071
- with open('example.fasta', 'r') as f:
1072
- example_text = f.read()
1073
- return example_text
1074
- except Exception as e:
1075
- return f">example_sequence\nACGTACGT...\n\n(Note: Could not load example.fasta: {str(e)})"
1076
-
1077
  ###############################################################################
1078
  # 14. BUILD GRADIO INTERFACE
1079
  ###############################################################################
1080
 
1081
-
1082
- ###############################################################################
1083
- # 13. EXAMPLE FASTA LOADER
1084
- ###############################################################################
1085
-
1086
  def load_example_fasta():
1087
  """Load the example.fasta file contents"""
1088
  try:
@@ -1184,10 +1122,10 @@ with gr.Blocks(css=css) as iface:
1184
  **Analyze Gene Features**
1185
  Upload a FASTA file and corresponding gene features file to analyze SHAP values per gene.
1186
  Gene features should be in the format:
1187
- ```
1188
- >gene_name [gene=X] [locus_tag=Y] [location=start..end] or [location=complement(start..end)]
1189
  SEQUENCE
1190
- ```
1191
  The genome viewer will show genes color-coded by their contribution:
1192
  - Red: Genes pushing toward human origin
1193
  - Blue: Genes pushing toward non-human origin
 
18
  import os
19
  from typing import List, Dict, Tuple, Optional, Any
20
  import seaborn as sns
21
+ import shap
22
+
23
 
24
  ###############################################################################
25
  # 1. MODEL DEFINITION
 
82
  return vec
83
 
84
  ###############################################################################
85
+ # 3. SHAP-VALUE (ABLATION) CALCULATION
86
  ###############################################################################
87
 
 
 
 
88
  def calculate_shap_values(model, x_tensor):
 
 
 
 
 
 
89
  model.eval()
90
  device = next(model.parameters()).device
91
 
92
+ # Create background dataset (baseline)
93
+ background = np.zeros((300, x_tensor.shape[1]))
 
 
 
94
 
 
95
  try:
96
+ # Try using DeepExplainer (efficient for neural networks)
97
+ explainer = shap.DeepExplainer(model, background)
98
+
99
+ # Calculate SHAP values
 
 
 
100
  shap_values_all = explainer.shap_values(x_tensor)
101
 
102
+ # Get SHAP values for human class (index 1)
103
+ shap_values = shap_values_all[1][0]
 
 
 
 
 
 
 
104
 
105
  except Exception as e:
106
+ print(f"DeepExplainer failed, falling back to KernelExplainer: {str(e)}")
107
 
108
+ # Create model wrapper function
109
+ def model_predict(x):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  with torch.no_grad():
111
+ tensor_x = torch.FloatTensor(x).to(device)
112
+ output = model(tensor_x)
113
+ probs = torch.softmax(output, dim=1)[:, 1] # Human probability
114
+ return probs.cpu().numpy()
115
+
116
+ # Create baseline distribution
117
+ background = np.zeros((1, x_tensor.shape[1]))
118
+
119
+ # Use KernelExplainer as fallback
120
+ explainer = shap.KernelExplainer(model_predict, background)
121
+
122
+ # Calculate SHAP values
123
+ x_numpy = x_tensor.cpu().numpy()
124
+ shap_values = explainer.shap_values(x_numpy, nsamples=100)
125
+
126
+ # Get human probability
127
+ with torch.no_grad():
128
+ output = model(x_tensor)
129
+ probs = torch.softmax(output, dim=1)
130
+ prob_human = probs[0, 1].item()
131
+
132
+ return np.array(shap_values), prob_human
133
+
134
  ###############################################################################
135
  # 4. PER-BASE SHAP AGGREGATION
136
  ###############################################################################
 
1017
  else:
1018
  raise ValueError("Unsupported data type for CSV download")
1019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020
  ###############################################################################
1021
  # 14. BUILD GRADIO INTERFACE
1022
  ###############################################################################
1023
 
 
 
 
 
 
1024
  def load_example_fasta():
1025
  """Load the example.fasta file contents"""
1026
  try:
 
1122
  **Analyze Gene Features**
1123
  Upload a FASTA file and corresponding gene features file to analyze SHAP values per gene.
1124
  Gene features should be in the format:
1125
+
1126
+ >gene_name [gene=X] [locus_tag=Y] [location=start..end] or [location=complement(start..end)]
1127
  SEQUENCE
1128
+
1129
  The genome viewer will show genes color-coded by their contribution:
1130
  - Red: Genes pushing toward human origin
1131
  - Blue: Genes pushing toward non-human origin