|
import sys |
|
import os |
|
import yaml |
|
from pathlib import Path |
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
import time |
|
import logging |
|
import numpy as np |
|
from tqdm import tqdm |
|
|
|
|
|
from dataset_utils import get_cifar10_dataloaders |
|
from model import MobileNetV2 |
|
from get_representation import time_travel_saver |
|
|
|
def setup_logger(log_file): |
|
"""配置日志记录器,如果日志文件存在则覆盖 |
|
|
|
Args: |
|
log_file: 日志文件路径 |
|
|
|
Returns: |
|
logger: 配置好的日志记录器 |
|
""" |
|
|
|
logger = logging.getLogger('train') |
|
logger.setLevel(logging.INFO) |
|
|
|
|
|
if logger.hasHandlers(): |
|
logger.handlers.clear() |
|
|
|
|
|
fh = logging.FileHandler(log_file, mode='w') |
|
fh.setLevel(logging.INFO) |
|
|
|
|
|
ch = logging.StreamHandler() |
|
ch.setLevel(logging.INFO) |
|
|
|
|
|
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') |
|
fh.setFormatter(formatter) |
|
ch.setFormatter(formatter) |
|
|
|
|
|
logger.addHandler(fh) |
|
logger.addHandler(ch) |
|
|
|
return logger |
|
|
|
def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0', |
|
save_dir='./epochs', model_name='model', interval=1): |
|
"""通用的模型训练函数 |
|
Args: |
|
model: 要训练的模型 |
|
trainloader: 训练数据加载器 |
|
testloader: 测试数据加载器 |
|
epochs: 训练轮数 |
|
lr: 学习率 |
|
device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3) |
|
save_dir: 模型保存目录 |
|
model_name: 模型名称 |
|
interval: 模型保存间隔 |
|
""" |
|
|
|
if not torch.cuda.is_available(): |
|
print("CUDA不可用,将使用CPU训练") |
|
device = 'cpu' |
|
elif not device.startswith('cuda:'): |
|
device = f'cuda:0' |
|
|
|
|
|
if device.startswith('cuda:'): |
|
gpu_id = int(device.split(':')[1]) |
|
if gpu_id >= torch.cuda.device_count(): |
|
print(f"GPU {gpu_id} 不可用,将使用GPU 0") |
|
device = 'cuda:0' |
|
|
|
|
|
if not os.path.exists(save_dir): |
|
os.makedirs(save_dir) |
|
|
|
|
|
log_file = os.path.join(os.path.dirname(save_dir),'epochs', 'train.log') |
|
if not os.path.exists(os.path.dirname(log_file)): |
|
os.makedirs(os.path.dirname(log_file)) |
|
|
|
logger = setup_logger(log_file) |
|
|
|
|
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4) |
|
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50) |
|
|
|
|
|
model = model.to(device) |
|
best_acc = 0 |
|
start_time = time.time() |
|
|
|
logger.info(f'开始训练 {model_name}') |
|
logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}') |
|
|
|
for epoch in range(epochs): |
|
|
|
model.train() |
|
train_loss = 0 |
|
correct = 0 |
|
total = 0 |
|
|
|
train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]') |
|
for batch_idx, (inputs, targets) in enumerate(train_pbar): |
|
inputs, targets = inputs.to(device), targets.to(device) |
|
optimizer.zero_grad() |
|
outputs = model(inputs) |
|
loss = criterion(outputs, targets) |
|
loss.backward() |
|
optimizer.step() |
|
|
|
train_loss += loss.item() |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
correct += predicted.eq(targets).sum().item() |
|
|
|
|
|
train_pbar.set_postfix({ |
|
'loss': f'{train_loss/(batch_idx+1):.3f}', |
|
'acc': f'{100.*correct/total:.2f}%' |
|
}) |
|
|
|
|
|
train_acc = 100.*correct/total |
|
train_correct = correct |
|
train_total = total |
|
|
|
|
|
model.eval() |
|
test_loss = 0 |
|
correct = 0 |
|
total = 0 |
|
|
|
test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]') |
|
with torch.no_grad(): |
|
for batch_idx, (inputs, targets) in enumerate(test_pbar): |
|
inputs, targets = inputs.to(device), targets.to(device) |
|
outputs = model(inputs) |
|
loss = criterion(outputs, targets) |
|
|
|
test_loss += loss.item() |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
correct += predicted.eq(targets).sum().item() |
|
|
|
|
|
test_pbar.set_postfix({ |
|
'loss': f'{test_loss/(batch_idx+1):.3f}', |
|
'acc': f'{100.*correct/total:.2f}%' |
|
}) |
|
|
|
|
|
acc = 100.*correct/total |
|
|
|
|
|
logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | ' |
|
f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%') |
|
|
|
|
|
if (epoch + 1) % interval == 0 or (epoch == 0): |
|
|
|
from torch.utils.data import ConcatDataset |
|
|
|
def custom_collate_fn(batch): |
|
|
|
data = [item[0] for item in batch] |
|
target = [item[1] for item in batch] |
|
|
|
|
|
data = torch.stack(data, 0) |
|
target = torch.tensor(target) |
|
|
|
return [data, target] |
|
|
|
|
|
combined_dataset = ConcatDataset([trainloader.dataset, testloader.dataset]) |
|
|
|
|
|
ordered_loader = torch.utils.data.DataLoader( |
|
combined_dataset, |
|
batch_size=trainloader.batch_size, |
|
shuffle=False, |
|
num_workers=trainloader.num_workers, |
|
collate_fn=custom_collate_fn |
|
) |
|
epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') |
|
save_model = time_travel_saver(model, ordered_loader, device, epoch_save_dir, model_name, |
|
show=True, layer_name='avgpool', auto_save_embedding=True) |
|
save_model.save_checkpoint_embeddings_predictions() |
|
if epoch == 0: |
|
save_model.save_lables_index(path = "../dataset") |
|
|
|
scheduler.step() |
|
|
|
logger.info('训练完成!') |
|
|
|
def backdoor_train(): |
|
"""训练带后门的模型 |
|
|
|
后门攻击设计: |
|
1. 触发器设计: 在图像右下角添加一个4x4的白色小方块 |
|
2. 攻击目标: 使添加触发器的图像被分类为目标标签(默认为0) |
|
3. 毒化比例: 默认10%的训练数据被添加触发器和修改标签 |
|
""" |
|
|
|
config_path = Path(__file__).parent / 'train.yaml' |
|
with open(config_path) as f: |
|
config = yaml.safe_load(f) |
|
|
|
|
|
poison_ratio = config.get('poison_ratio', 0.1) |
|
target_label = config.get('target_label', 0) |
|
trigger_size = config.get('trigger_size', 4) |
|
|
|
|
|
model = MobileNetV2(num_classes=10) |
|
|
|
|
|
trainloader, testloader = get_cifar10_dataloaders( |
|
batch_size=config['batch_size'], |
|
num_workers=config['num_workers'], |
|
local_dataset_path=config['dataset_path'], |
|
shuffle=True |
|
) |
|
|
|
|
|
poisoned_trainloader = inject_backdoor( |
|
trainloader, |
|
poison_ratio=poison_ratio, |
|
target_label=target_label, |
|
trigger_size=trigger_size |
|
) |
|
|
|
|
|
backdoor_testloader = create_backdoor_testset( |
|
testloader, |
|
trigger_size=trigger_size |
|
) |
|
|
|
|
|
train_model( |
|
model=model, |
|
trainloader=poisoned_trainloader, |
|
testloader=testloader, |
|
epochs=config['epochs'], |
|
lr=config['lr'], |
|
device=f'cuda:{config["gpu"]}', |
|
save_dir='../epochs', |
|
model_name='MobileNetV2_Backdoored', |
|
interval=config['interval'] |
|
) |
|
|
|
|
|
evaluate_backdoor(model, testloader, backdoor_testloader, target_label, f'cuda:{config["gpu"]}') |
|
|
|
def inject_backdoor(dataloader, poison_ratio=0.1, target_label=0, trigger_size=4): |
|
"""向数据集中注入后门 |
|
|
|
Args: |
|
dataloader: 原始数据加载器 |
|
poison_ratio: 毒化比例,即有多少比例的数据被注入后门 |
|
target_label: 攻击目标标签 |
|
trigger_size: 触发器大小 |
|
|
|
Returns: |
|
poisoned_dataloader: 注入后门的数据加载器 |
|
""" |
|
|
|
dataset = dataloader.dataset |
|
|
|
|
|
data_list = [] |
|
targets_list = [] |
|
|
|
|
|
for inputs, targets in dataloader: |
|
data_list.append(inputs) |
|
targets_list.append(targets) |
|
|
|
|
|
all_data = torch.cat(data_list) |
|
all_targets = torch.cat(targets_list) |
|
|
|
|
|
num_samples = len(all_data) |
|
num_poisoned = int(num_samples * poison_ratio) |
|
|
|
|
|
poison_indices = torch.randperm(num_samples)[:num_poisoned] |
|
|
|
backdoor_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'backdoor_index.npy') |
|
os.makedirs(os.path.dirname(backdoor_index_path), exist_ok=True) |
|
np.save(backdoor_index_path, poison_indices.cpu().numpy()) |
|
print(f"已保存{num_poisoned}个中毒样本索引到 {backdoor_index_path}") |
|
|
|
for idx in poison_indices: |
|
|
|
all_data[idx, :, -trigger_size:, -trigger_size:] = 1.0 |
|
|
|
all_targets[idx] = target_label |
|
|
|
|
|
from torch.utils.data import TensorDataset, DataLoader |
|
poisoned_dataset = TensorDataset(all_data, all_targets) |
|
|
|
|
|
poisoned_dataloader = DataLoader( |
|
poisoned_dataset, |
|
batch_size=dataloader.batch_size, |
|
shuffle=True, |
|
num_workers=dataloader.num_workers |
|
) |
|
|
|
print(f"成功向{num_poisoned}/{num_samples} ({poison_ratio*100:.1f}%)的样本注入后门") |
|
return poisoned_dataloader |
|
|
|
def create_backdoor_testset(dataloader, trigger_size=4): |
|
"""创建用于测试后门效果的数据集,将所有测试样本添加触发器但不改变标签 |
|
|
|
Args: |
|
dataloader: 原始测试数据加载器 |
|
trigger_size: 触发器大小 |
|
|
|
Returns: |
|
backdoor_testloader: 带触发器的测试数据加载器 |
|
""" |
|
|
|
data_list = [] |
|
targets_list = [] |
|
|
|
for inputs, targets in dataloader: |
|
data_list.append(inputs) |
|
targets_list.append(targets) |
|
|
|
|
|
all_data = torch.cat(data_list) |
|
all_targets = torch.cat(targets_list) |
|
|
|
|
|
for i in range(len(all_data)): |
|
|
|
all_data[i, :, -trigger_size:, -trigger_size:] = 1.0 |
|
|
|
|
|
from torch.utils.data import TensorDataset, DataLoader |
|
backdoor_dataset = TensorDataset(all_data, all_targets) |
|
|
|
|
|
backdoor_testloader = DataLoader( |
|
backdoor_dataset, |
|
batch_size=dataloader.batch_size, |
|
shuffle=False, |
|
num_workers=dataloader.num_workers |
|
) |
|
|
|
print(f"成功创建带有触发器的测试集,共{len(all_data)}个样本") |
|
return backdoor_testloader |
|
|
|
def evaluate_backdoor(model, clean_testloader, backdoor_testloader, target_label, device): |
|
"""评估后门攻击效果 |
|
|
|
Args: |
|
model: 模型 |
|
clean_testloader: 干净测试集 |
|
backdoor_testloader: 带触发器的测试集 |
|
target_label: 目标标签 |
|
device: 计算设备 |
|
""" |
|
model.eval() |
|
model.to(device) |
|
|
|
|
|
correct = 0 |
|
total = 0 |
|
with torch.no_grad(): |
|
for inputs, targets in tqdm(clean_testloader, desc="评估干净测试集"): |
|
inputs, targets = inputs.to(device), targets.to(device) |
|
outputs = model(inputs) |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
correct += predicted.eq(targets).sum().item() |
|
|
|
clean_acc = 100. * correct / total |
|
print(f"在干净测试集上的准确率: {clean_acc:.2f}%") |
|
|
|
|
|
success = 0 |
|
total = 0 |
|
with torch.no_grad(): |
|
for inputs, targets in tqdm(backdoor_testloader, desc="评估后门攻击"): |
|
inputs = inputs.to(device) |
|
outputs = model(inputs) |
|
_, predicted = outputs.max(1) |
|
total += targets.size(0) |
|
|
|
success += (predicted == target_label).sum().item() |
|
|
|
asr = 100. * success / total |
|
print(f"后门攻击成功率: {asr:.2f}%") |
|
|
|
return clean_acc, asr |
|
|
|
if __name__ == '__main__': |
|
backdoor_train() |