import sys import os import yaml import torch import torch.nn as nn import torch.optim as optim import time import logging import numpy as np from tqdm import tqdm from dataset_utils import get_noisy_cifar10_dataloaders from model import densenet_cifar from get_representation import time_travel_saver def setup_logger(log_file): """配置日志记录器,如果日志文件存在则覆盖 Args: log_file: 日志文件路径 Returns: logger: 配置好的日志记录器 """ # 创建logger logger = logging.getLogger('train') logger.setLevel(logging.INFO) # 移除现有的处理器 if logger.hasHandlers(): logger.handlers.clear() # 创建文件处理器,使用'w'模式覆盖现有文件 fh = logging.FileHandler(log_file, mode='w') fh.setLevel(logging.INFO) # 创建控制台处理器 ch = logging.StreamHandler() ch.setLevel(logging.INFO) # 创建格式器 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # 添加处理器 logger.addHandler(fh) logger.addHandler(ch) return logger def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0', save_dir='./epochs', model_name='model', interval=1): """通用的模型训练函数 Args: model: 要训练的模型 trainloader: 训练数据加载器 testloader: 测试数据加载器 epochs: 训练轮数 lr: 学习率 device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3) save_dir: 模型保存目录 model_name: 模型名称 interval: 模型保存间隔 """ # 检查并设置GPU设备 if not torch.cuda.is_available(): print("CUDA不可用,将使用CPU训练") device = 'cpu' elif not device.startswith('cuda:'): device = f'cuda:0' # 确保device格式正确 if device.startswith('cuda:'): gpu_id = int(device.split(':')[1]) if gpu_id >= torch.cuda.device_count(): print(f"GPU {gpu_id} 不可用,将使用GPU 0") device = 'cuda:0' # 设置保存目录 if not os.path.exists(save_dir): os.makedirs(save_dir) # 设置日志文件路径 log_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'epochs', 'train.log') if not os.path.exists(os.path.dirname(log_file)): os.makedirs(os.path.dirname(log_file)) logger = setup_logger(log_file) # 损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50) # 移动模型到指定设备 model = model.to(device) best_acc = 0 start_time = time.time() logger.info(f'开始训练 {model_name}') logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}') for epoch in range(epochs): # 训练阶段 model.train() train_loss = 0 correct = 0 total = 0 train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]') for batch_idx, (inputs, targets) in enumerate(train_pbar): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() # 更新进度条 train_pbar.set_postfix({ 'loss': f'{train_loss/(batch_idx+1):.3f}', 'acc': f'{100.*correct/total:.2f}%' }) # 保存训练阶段的准确率 train_acc = 100.*correct/total train_correct = correct train_total = total # 测试阶段 model.eval() test_loss = 0 correct = 0 total = 0 test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]') with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(test_pbar): inputs, targets = inputs.to(device), targets.to(device) outputs = model(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() # 更新进度条 test_pbar.set_postfix({ 'loss': f'{test_loss/(batch_idx+1):.3f}', 'acc': f'{100.*correct/total:.2f}%' }) # 计算测试精度 acc = 100.*correct/total # 记录训练和测试的损失与准确率 logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | ' f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%') # 保存可视化训练过程所需要的文件 if (epoch + 1) % interval == 0 or (epoch == 0): # 创建一个专门用于收集embedding的顺序dataloader,拼接训练集和测试集 from torch.utils.data import ConcatDataset def custom_collate_fn(batch): # 确保所有数据都是张量 data = [item[0] for item in batch] # 图像 target = [item[1] for item in batch] # 标签 # 将列表转换为张量 data = torch.stack(data, 0) target = torch.tensor(target) return [data, target] # 合并训练集和测试集 combined_dataset = ConcatDataset([trainloader.dataset, testloader.dataset]) # 创建顺序数据加载器 ordered_loader = torch.utils.data.DataLoader( combined_dataset, # 使用合并后的数据集 batch_size=trainloader.batch_size, shuffle=False, # 确保顺序加载 num_workers=trainloader.num_workers, collate_fn=custom_collate_fn # 使用自定义的collate函数 ) epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') save_model = time_travel_saver(model, ordered_loader, device, epoch_save_dir, model_name, show=True, layer_name='avg_pool', auto_save_embedding=True) save_model.save_checkpoint_embeddings_predictions() if epoch == 0: save_model.save_lables_index(path = "../dataset") scheduler.step() logger.info('训练完成!') def noisy_train(): """训练带噪声的模型 Returns: model: 训练好的模型 """ # 加载配置文件 config_path = './train.yaml' with open(config_path, 'r') as f: config = yaml.safe_load(f) # 设置设备 device = f"cuda:{config.get('gpu', 0)}" # 加载添加噪音后的CIFAR10数据集 batch_size = config.get('batch_size', 128) trainloader, testloader = get_noisy_cifar10_dataloaders(batch_size=batch_size) # 初始化模型 model = densenet_cifar(num_classes=10).to(device) # 训练参数 epochs = config.get('epochs', 200) lr = config.get('learning_rate', 0.1) save_dir = os.path.join('..', 'epochs') interval = config.get('interval', 2) os.makedirs(save_dir, exist_ok=True) # 训练模型 model = train_model( model=model, trainloader=trainloader, testloader=testloader, epochs=epochs, lr=lr, device=device, save_dir=save_dir, model_name='densenet_cifar', interval=interval ) print(f"训练完成,模型已保存到 {save_dir}") return model # 主函数 if __name__ == '__main__': noisy_train()