HIM-self / src /core /sparse_activation.py
TeleologyHI
up
c227032
import torch
import torch.nn as nn
from typing import Dict, Tuple, List
import numpy as np
class PatternAnalyzer:
def __init__(self):
pass
class SparseActivationManager:
def __init__(self, sparsity_threshold: float = 0.95):
self.sparsity_threshold = sparsity_threshold
self.activation_history = []
self.pattern_analyzer = PatternAnalyzer()
def compute_pattern(self, input_tensor: torch.Tensor) -> torch.Tensor:
importance_scores = self._compute_importance_scores(input_tensor)
activation_mask = self._generate_activation_mask(importance_scores)
return self._apply_sparse_activation(input_tensor, activation_mask)
def _compute_importance_scores(self, input_tensor: torch.Tensor) -> torch.Tensor:
attention_weights = self._calculate_attention_weights(input_tensor)
gradient_information = self._compute_gradient_information(input_tensor)
return self._combine_importance_metrics(attention_weights, gradient_information)
def _generate_activation_mask(self, importance_scores: torch.Tensor) -> torch.Tensor:
# Create a binary mask based on importance scores and sparsity threshold
return (importance_scores > self.sparsity_threshold).float()
def _apply_sparse_activation(self, input_tensor: torch.Tensor, activation_mask: torch.Tensor) -> torch.Tensor:
# Apply the activation mask to the input tensor
return input_tensor * activation_mask
def _calculate_attention_weights(self, input_tensor: torch.Tensor) -> torch.Tensor:
# Calculate attention weights for the input tensor
return torch.sigmoid(input_tensor)
def _compute_gradient_information(self, input_tensor: torch.Tensor) -> torch.Tensor:
# Compute gradient information for the input tensor
return torch.abs(input_tensor)
def _combine_importance_metrics(self, attention_weights: torch.Tensor,
gradient_information: torch.Tensor) -> torch.Tensor:
# Combine multiple importance metrics into a single score
return attention_weights * gradient_information