RRFRRF2's picture
add VGG-CIFAR10
6e486de
import torch
import torch.nn as nn
import numpy as np
import os
import json
from tqdm import tqdm
class time_travel_saver:
"""可视化数据提取器
用于保存模型训练过程中的各种数据,包括:
1. 模型权重 (.pth)
2. 高维特征 (representation/*.npy)
3. 预测结果 (prediction/*.npy)
4. 标签数据 (label/labels.npy)
"""
def __init__(self, model, dataloader, device, save_dir, model_name,
auto_save_embedding=False, layer_name=None,show = False):
"""初始化
Args:
model: 要保存的模型实例
dataloader: 数据加载器(必须是顺序加载的)
device: 计算设备(cpu or gpu)
save_dir: 保存根目录
model_name: 模型名称
"""
self.model = model
self.dataloader = dataloader
self.device = device
self.save_dir = save_dir
self.model_name = model_name
self.auto_save = auto_save_embedding
self.layer_name = layer_name
if show and not layer_name:
layer_dimensions = self.show_dimensions()
# print(layer_dimensions)
def show_dimensions(self):
"""显示模型中所有层的名称和对应的维度
这个函数会输出模型中所有层的名称和它们的输出维度,
帮助用户选择合适的层来提取特征。
Returns:
layer_dimensions: 包含层名称和维度的字典
"""
activation = {}
layer_dimensions = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
# 注册钩子到所有层
handles = []
for name, module in self.model.named_modules():
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
handles.append(module.register_forward_hook(get_activation(name)))
self.model.eval()
with torch.no_grad():
# 获取一个batch来分析每层的输出维度
inputs, _ = next(iter(self.dataloader))
inputs = inputs.to(self.device)
_ = self.model(inputs)
# 分析所有层的输出维度
print("\n模型各层的名称和维度:")
print("-" * 50)
print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}")
print("-" * 50)
for name, feat in activation.items():
if feat is None:
continue
# 获取特征维度(展平后)
feat_dim = feat.view(feat.size(0), -1).size(1)
layer_dimensions[name] = feat_dim
# 打印层信息
shape_str = str(list(feat.shape))
print(f"{name:<40} {feat_dim:<15} {shape_str}")
print("-" * 50)
print("注: 特征维度是将输出张量展平后的维度大小")
print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层")
print("例如:layer_name='avg_pool'或layer_name='layer4'等")
# 移除所有钩子
for handle in handles:
handle.remove()
return layer_dimensions
def _extract_features_and_predictions(self):
"""提取特征和预测结果
Returns:
features: 高维特征 [样本数, 特征维度]
predictions: 预测结果 [样本数, 类别数]
"""
features = []
predictions = []
indices = []
activation = {}
def get_activation(name):
def hook(model, input, output):
# 只在需要时保存激活值,避免内存浪费
if name not in activation or activation[name] is None:
activation[name] = output.detach()
return hook
# 根据层的名称或维度来选择层
# 注册钩子到所有层
handles = []
for name, module in self.model.named_modules():
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
handles.append(module.register_forward_hook(get_activation(name)))
self.model.eval()
with torch.no_grad():
# 首先获取一个batch来分析每层的输出维度
inputs, _ = next(iter(self.dataloader))
inputs = inputs.to(self.device)
_ = self.model(inputs)
# 如果指定了层名,则直接使用该层
if self.layer_name is not None:
if self.layer_name not in activation:
raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中")
feat = activation[self.layer_name]
if feat is None:
raise ValueError(f"指定的层 {self.layer_name} 没有输出特征")
suitable_layer_name = self.layer_name
suitable_dim = feat.view(feat.size(0), -1).size(1)
print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
else:
# 找到维度在指定范围内的层
target_dim_range = (256, 2048)
suitable_layer_name = None
suitable_dim = None
# 分析所有层的输出维度
for name, feat in activation.items():
if feat is None:
continue
feat_dim = feat.view(feat.size(0), -1).size(1)
if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
suitable_layer_name = name
suitable_dim = feat_dim
break
if suitable_layer_name is None:
raise ValueError("没有找到合适维度的特征层")
print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
# 保存层信息
layer_info = {
'layer_id': suitable_layer_name,
'dim': suitable_dim
}
layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json')
with open(layer_info_path, 'w') as f:
json.dump(layer_info, f)
# 清除第一次运行的激活值
activation.clear()
# 现在处理所有数据
for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")):
inputs = inputs.to(self.device)
outputs = self.model(inputs) # 获取预测结果
# 获取并处理特征
feat = activation[suitable_layer_name]
flat_features = torch.flatten(feat, start_dim=1)
features.append(flat_features.cpu().numpy())
predictions.append(outputs.cpu().numpy())
# 清除本次的激活值
activation.clear()
# 移除所有钩子
for handle in handles:
handle.remove()
if len(features) > 0:
features = np.vstack(features)
predictions = np.vstack(predictions)
return features, predictions
else:
return np.array([]), np.array([])
def save_lables_index(self, path):
"""保存标签数据和索引信息
Args:
path: 保存路径
"""
os.makedirs(path, exist_ok=True)
labels_path = os.path.join(path, 'labels.npy')
index_path = os.path.join(path, 'index.json')
# 尝试从不同的属性获取标签
try:
if hasattr(self.dataloader.dataset, 'targets'):
# CIFAR10/CIFAR100使用targets属性
labels = np.array(self.dataloader.dataset.targets)
elif hasattr(self.dataloader.dataset, 'labels'):
# 某些数据集使用labels属性
labels = np.array(self.dataloader.dataset.labels)
else:
# 如果上面的方法都不起作用,则从数据加载器中收集标签
labels = []
for _, batch_labels in self.dataloader:
labels.append(batch_labels.numpy())
labels = np.concatenate(labels)
# 保存标签数据
np.save(labels_path, labels)
print(f"标签数据已保存到 {labels_path}")
# 创建数据集索引
num_samples = len(labels)
indices = list(range(num_samples))
# 创建索引字典
index_dict = {
"train": indices, # 所有数据默认为训练集
"test": [], # 初始为空
"validation": [] # 初始为空
}
# 保存索引到JSON文件
with open(index_path, 'w') as f:
json.dump(index_dict, f, indent=4)
print(f"数据集索引已保存到 {index_path}")
except Exception as e:
print(f"保存标签和索引时出错: {e}")
def save_checkpoint_embeddings_predictions(self, model = None):
"""保存所有数据"""
if model is not None:
self.model = model
# 保存模型权重
os.makedirs(self.save_dir, exist_ok=True)
model_path = os.path.join(self.save_dir,'model.pth')
torch.save(self.model.state_dict(), model_path)
if self.auto_save:
# 提取并保存特征和预测结果
features, predictions = self._extract_features_and_predictions()
# 保存特征
np.save(os.path.join(self.save_dir, 'embeddings.npy'), features)
# 保存预测结果
np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions)
print("\n保存了以下数据:")
print(f"- 模型权重: {model_path}")
print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]")
print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]")