Spaces:
Running
Running
import gradio as gr | |
import torch | |
import joblib | |
import numpy as np | |
from itertools import product | |
import torch.nn as nn | |
import shap | |
import matplotlib.pyplot as plt | |
import io | |
import json | |
from PIL import Image | |
class VirusClassifier(nn.Module): | |
def __init__(self, input_shape: int): | |
super(VirusClassifier, self).__init__() | |
self.network = nn.Sequential( | |
nn.Linear(input_shape, 64), | |
nn.GELU(), | |
nn.BatchNorm1d(64), | |
nn.Dropout(0.3), | |
nn.Linear(64, 32), | |
nn.GELU(), | |
nn.BatchNorm1d(32), | |
nn.Dropout(0.3), | |
nn.Linear(32, 32), | |
nn.GELU(), | |
nn.Linear(32, 2) | |
) | |
def forward(self, x): | |
return self.network(x) | |
def get_feature_importance(self, x): | |
"""Calculate feature importance using gradient-based method for the human class (index 1)""" | |
x.requires_grad_(True) | |
output = self.network(x) | |
probs = torch.softmax(output, dim=1) | |
# We focus on the human class (index 1) probability | |
human_prob = probs[..., 1] | |
human_prob.backward() | |
# The gradient shows how each feature affects the human probability | |
importance = x.grad | |
return importance, float(human_prob) | |
def sequence_to_kmer_vector(sequence: str, k: int = 4) -> np.ndarray: | |
"""Convert sequence to k-mer frequency vector""" | |
kmers = [''.join(p) for p in product("ACGT", repeat=k)] | |
kmer_dict = {km: i for i, km in enumerate(kmers)} | |
vec = np.zeros(len(kmers), dtype=np.float32) | |
for i in range(len(sequence) - k + 1): | |
kmer = sequence[i:i+k] | |
if kmer in kmer_dict: | |
vec[kmer_dict[kmer]] += 1 | |
total_kmers = len(sequence) - k + 1 | |
if total_kmers > 0: | |
vec = vec / total_kmers | |
return vec | |
def parse_fasta(text): | |
sequences = [] | |
current_header = None | |
current_sequence = [] | |
for line in text.split('\n'): | |
line = line.strip() | |
if not line: | |
continue | |
if line.startswith('>'): | |
if current_header: | |
sequences.append((current_header, ''.join(current_sequence))) | |
current_header = line[1:] | |
current_sequence = [] | |
else: | |
current_sequence.append(line.upper()) | |
if current_header: | |
sequences.append((current_header, ''.join(current_sequence))) | |
return sequences | |
def predict(file_obj): | |
if file_obj is None: | |
return "Please upload a FASTA file", None | |
try: | |
if isinstance(file_obj, str): | |
text = file_obj | |
else: | |
text = file_obj.decode('utf-8') | |
except Exception as e: | |
return f"Error reading file: {str(e)}", None | |
k = 4 | |
kmers = [''.join(p) for p in product("ACGT", repeat=k)] | |
kmer_dict = {km: i for i, km in enumerate(kmers)} | |
try: | |
device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
model = VirusClassifier(256).to(device) | |
state_dict = torch.load('model.pt', map_location=device) | |
model.load_state_dict(state_dict) | |
scaler = joblib.load('scaler.pkl') | |
model.eval() | |
except Exception as e: | |
return f"Error loading model: {str(e)}", None | |
results_text = "" | |
plot_image = None | |
try: | |
sequences = parse_fasta(text) | |
header, seq = sequences[0] | |
raw_freq_vector = sequence_to_kmer_vector(seq) | |
kmer_vector = scaler.transform(raw_freq_vector.reshape(1, -1)) | |
X_tensor = torch.FloatTensor(kmer_vector).to(device) | |
# Calculate final probabilities first | |
with torch.no_grad(): | |
output = model(X_tensor) | |
probs = torch.softmax(output, dim=1) | |
human_prob = float(probs[0][1]) | |
# Get feature importance using integrated gradients | |
baseline = torch.zeros_like(X_tensor) # baseline of zeros | |
steps = 50 | |
all_importance = [] | |
for i in range(steps + 1): | |
alpha = i / steps | |
interpolated = baseline + alpha * (X_tensor - baseline) | |
interpolated.requires_grad_(True) | |
output = model(interpolated) | |
probs = torch.softmax(output, dim=1) | |
human_class = probs[..., 1] | |
if interpolated.grad is not None: | |
interpolated.grad.zero_() | |
human_class.backward() | |
all_importance.append(interpolated.grad.cpu().numpy()) | |
# Average the gradients | |
kmer_importance = np.mean(all_importance, axis=0)[0] | |
# Scale to match probability difference | |
target_diff = human_prob - 0.5 # difference from neutral prediction | |
current_sum = np.sum(kmer_importance) | |
if current_sum != 0: # avoid division by zero | |
kmer_importance = kmer_importance * (target_diff / current_sum) | |
# Get top k-mers by absolute importance | |
top_k = 10 | |
top_indices = np.argsort(np.abs(kmer_importance))[-top_k:][::-1] | |
important_kmers = [ | |
{ | |
'kmer': list(kmer_dict.keys())[list(kmer_dict.values()).index(i)], | |
'importance': float(kmer_importance[i]), | |
'frequency': float(raw_freq_vector[i]), | |
'scaled': float(kmer_vector[0][i]) | |
} | |
for i in top_indices | |
] | |
# Prepare data for SHAP waterfall plot | |
top_features = [item['kmer'] for item in important_kmers] | |
top_values = [item['importance'] for item in important_kmers] | |
# Calculate the impact of remaining features | |
others_mask = np.ones_like(kmer_importance, dtype=bool) | |
others_mask[top_indices] = False | |
others_sum = np.sum(kmer_importance[others_mask]) | |
top_features.append("Others") | |
top_values.append(others_sum) | |
# Calculate final probabilities first | |
with torch.no_grad(): | |
output = model(X_tensor) | |
probs = torch.softmax(output, dim=1) | |
human_prob = float(probs[0][1]) | |
# Create SHAP explanation | |
# We'll use the actual probabilities for alignment | |
explanation = shap.Explanation( | |
values=np.array(top_values), | |
base_values=0.5, # Start from neutral prediction | |
data=np.array([ | |
raw_freq_vector[kmer_dict[feat]] if feat != "Others" | |
else np.sum(raw_freq_vector[others_mask]) | |
for feat in top_features | |
]), | |
feature_names=top_features | |
) | |
explanation.expected_value = 0.5 # Start from neutral prediction | |
# Calculate step-by-step probabilities | |
current_prob = 0.5 # Start at neutral | |
steps = [('Start', current_prob, 0)] | |
# Process each k-mer contribution | |
for kmer in important_kmers: | |
change = kmer['importance'] | |
current_prob += change | |
steps.append((kmer['kmer'], current_prob, change)) | |
# Add final "Others" contribution | |
current_prob += others_sum | |
steps.append(('Others', current_prob, others_sum)) | |
# Create step plot | |
plt.figure(figsize=(12, 6)) | |
x = range(len(steps)) | |
y = [step[1] for step in steps] | |
# Plot steps | |
plt.step(x, y, 'b-', where='post', label='Probability', linewidth=2) | |
plt.plot(x, y, 'b.', markersize=10) | |
# Add reference line | |
plt.axhline(y=0.5, color='r', linestyle='--', label='Neutral (0.5)') | |
# Customize plot | |
plt.grid(True, linestyle='--', alpha=0.7) | |
plt.ylim(0, 1) | |
plt.ylabel('Human Probability') | |
plt.title(f'K-mer Contributions to Prediction (final prob: {human_prob:.3f})') | |
# Add labels for each point | |
for i, (kmer, prob, change) in enumerate(steps): | |
# Add k-mer label | |
plt.annotate(kmer, | |
(i, prob), | |
xytext=(0, 10 if i % 2 == 0 else -20), # Alternate up/down | |
textcoords='offset points', | |
ha='center', | |
rotation=45 if len(kmer) > 5 else 0) | |
# Add change value | |
if i > 0: # Skip first point (Start) | |
change_text = f'{change:+.3f}' | |
color = 'green' if change > 0 else 'red' | |
plt.annotate(change_text, | |
(i, prob), | |
xytext=(0, -20 if i % 2 == 0 else 10), | |
textcoords='offset points', | |
ha='center', | |
color=color) | |
plt.legend() | |
plt.tight_layout() | |
# Save plot | |
buf = io.BytesIO() | |
plt.savefig(buf, format='png', bbox_inches='tight', dpi=300) | |
buf.seek(0) | |
plot_image = Image.open(buf) | |
plt.close() | |
# Calculate final probabilities | |
with torch.no_grad(): | |
output = model(X_tensor) | |
probs = torch.softmax(output, dim=1) | |
pred_class = 1 if probs[0][1] > probs[0][0] else 0 | |
pred_label = 'human' if pred_class == 1 else 'non-human' | |
# Generate results text | |
results_text += f"""Sequence: {header} | |
Prediction: {pred_label} | |
Confidence: {float(max(probs[0])):0.4f} | |
Human probability: {float(probs[0][1]):0.4f} | |
Non-human probability: {float(probs[0][0]):0.4f} | |
Most influential k-mers (ranked by importance):""" | |
for kmer in important_kmers: | |
direction = "human" if kmer['importance'] > 0 else "non-human" | |
results_text += f"\n {kmer['kmer']}: " | |
results_text += f"pushes toward {direction} (impact={abs(kmer['importance']):.4f}), " | |
results_text += f"occurrence={kmer['frequency']*100:.2f}% of sequence " | |
if kmer['scaled'] > 0: | |
results_text += f"(appears {abs(kmer['scaled']):.2f}σ more than average)" | |
else: | |
results_text += f"(appears {abs(kmer['scaled']):.2f}σ less than average)" | |
except Exception as e: | |
return f"Error processing sequences: {str(e)}", None | |
return results_text, plot_image | |
iface = gr.Interface( | |
fn=predict, | |
inputs=gr.File(label="Upload FASTA file", type="binary"), | |
outputs=[gr.Textbox(label="Results"), gr.Image(label="SHAP Waterfall Plot")], | |
title="Virus Host Classifier" | |
) | |
if __name__ == "__main__": | |
iface.launch(share=True) |