File size: 11,143 Bytes
a619d03 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 |
import torch
import torchvision
import torchvision.transforms as transforms
import os
import numpy as np
import random
import yaml
from torch.utils.data import TensorDataset, DataLoader
# 加载数据集
def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
"""获取CIFAR10数据集的数据加载器
Args:
batch_size: 批次大小
num_workers: 数据加载的工作进程数
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
Returns:
trainloader: 训练数据加载器
testloader: 测试数据加载器
"""
# 数据预处理
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# 设置数据集路径
if local_dataset_path:
print(f"使用本地数据集: {local_dataset_path}")
# 检查数据集路径是否有数据集,没有的话则下载
cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py')
download = not os.path.exists(cifar_path) or not os.listdir(cifar_path)
dataset_path = local_dataset_path
else:
print("未指定本地数据集路径,将下载数据集")
download = True
dataset_path = '../dataset'
# 创建数据集路径
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
trainset = torchvision.datasets.CIFAR10(
root=dataset_path, train=True, download=download, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
testset = torchvision.datasets.CIFAR10(
root=dataset_path, train=False, download=download, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return trainloader, testloader
def get_noisy_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
"""获取添加噪声后的CIFAR10数据集的数据加载器
Args:
batch_size: 批次大小
num_workers: 数据加载的工作进程数
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
shuffle: 是否打乱数据
Returns:
noisy_trainloader: 添加噪声后的训练数据加载器
testloader: 正常测试数据加载器
"""
# 加载原始数据集
trainloader, testloader = get_cifar10_dataloaders(
batch_size=batch_size,
num_workers=num_workers,
local_dataset_path=local_dataset_path,
shuffle=False
)
# 设置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")
# 加载配置文件
config_path = './train.yaml'
try:
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
except FileNotFoundError:
print(f"找不到配置文件: {config_path},使用默认配置")
config = {
'noise_levels': {
'gaussian': [0.1, 0.3],
'salt_pepper': [0.05, 0.1],
'poisson': [1.0]
}
}
# 加载噪声参数
noise_levels = config.get('noise_levels', {})
gaussian_level = noise_levels.get('gaussian', [0.1, 0.2])
salt_pepper_level = noise_levels.get('salt_pepper', [0.05, 0.1])
poisson_level = noise_levels.get('poisson', [1.0])[0]
# 获取原始数据和标签
data_list = []
targets_list = []
for inputs, targets in trainloader:
data_list.append(inputs)
targets_list.append(targets)
# 合并所有批次数据
all_data = torch.cat(data_list)
all_targets = torch.cat(targets_list)
# 创建噪声信息字典
noise_info = {
'noise_types': [],
'noise_levels': [],
'noise_indices': []
}
# CIFAR10标准化参数
mean = torch.tensor([0.4914, 0.4822, 0.4465]).view(3, 1, 1).to(device)
std = torch.tensor([0.2023, 0.1994, 0.2010]).view(3, 1, 1).to(device)
print("开始添加噪声...")
# 按标签分组进行处理
for label_value in range(10):
# 找出所有具有当前标签的样本索引
indices = [i for i in range(len(all_targets)) if all_targets[i].item() == label_value]
noise_type = None
noise_ratio = 0.0
level = None
# 根据标签决定噪声类型和强度
if label_value == 2: # 高斯噪声强 - 30%数据
noise_type = 1 # 高斯噪声
noise_ratio = 0.3
level = gaussian_level[1] if len(gaussian_level) > 1 else gaussian_level[0]
elif label_value == 3: # 高斯噪声弱 - 10%数据
noise_type = 1 # 高斯噪声
noise_ratio = 0.1
level = gaussian_level[0]
elif label_value == 4: # 椒盐噪声强 - 30%数据
noise_type = 2 # 椒盐噪声
noise_ratio = 0.3
level = salt_pepper_level[1] if len(salt_pepper_level) > 1 else salt_pepper_level[0]
elif label_value == 5: # 椒盐噪声弱 - 10%数据
noise_type = 2 # 椒盐噪声
noise_ratio = 0.1
level = salt_pepper_level[0]
elif label_value == 6: # 泊松噪声 - 30%数据
noise_type = 3 # 泊松噪声
noise_ratio = 0.3
level = poisson_level
elif label_value == 7: # 泊松噪声 - 10%数据
noise_type = 3 # 泊松噪声
noise_ratio = 0.1
level = poisson_level
# 如果需要添加噪声
if noise_type is not None and level is not None and noise_ratio > 0:
# 计算要添加噪声的样本数量
num_samples_to_add_noise = int(len(indices) * noise_ratio)
if num_samples_to_add_noise == 0 and len(indices) > 0:
num_samples_to_add_noise = 1 # 至少添加一个样本
# 随机选择要添加噪声的样本索引
indices_to_add_noise = random.sample(indices, min(num_samples_to_add_noise, len(indices)))
print(f"标签 {label_value}: 为 {len(indices_to_add_noise)}/{len(indices)} 个样本添加噪声类型 {noise_type},强度 {level}")
# 为选中的样本添加噪声
for i in indices_to_add_noise:
# 获取当前图像
img = all_data[i].to(device)
# 反标准化
img_denorm = img * std + mean
# 添加噪声
if noise_type == 1: # 高斯噪声
# 转为numpy处理
img_np = img_denorm.cpu().numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
img_np = np.clip(img_np, 0, 1) * 255.0
# 添加高斯噪声
std_dev = level * 25
noise = np.random.normal(0, std_dev, img_np.shape)
noisy_img = img_np + noise
noisy_img = np.clip(noisy_img, 0, 255)
# 转回tensor
noisy_img = noisy_img / 255.0
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
elif noise_type == 2: # 椒盐噪声
# 转为numpy处理
img_np = img_denorm.cpu().numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
img_np = np.clip(img_np, 0, 1) * 255.0
# 创建掩码
mask = np.random.random(img_np.shape[:2])
# 椒噪声 (黑点)
img_np_copy = img_np.copy()
img_np_copy[mask < level/2] = 0
# 盐噪声 (白点)
img_np_copy[mask > 1 - level/2] = 255
# 转回tensor
noisy_img = img_np_copy / 255.0
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
elif noise_type == 3: # 泊松噪声
# 转为numpy处理
img_np = img_denorm.cpu().numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
img_np = np.clip(img_np, 0, 1) * 255.0
# 添加泊松噪声
lam = np.maximum(img_np / 255.0 * 10.0, 0.0001)
noisy_img = np.random.poisson(lam) / 10.0 * 255.0
noisy_img = np.clip(noisy_img, 0, 255)
# 转回tensor
noisy_img = noisy_img / 255.0
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
# 重新标准化
noisy_tensor_norm = (noisy_tensor - mean) / std
# 更新数据
all_data[i] = noisy_tensor_norm
# 记录噪声信息
noise_info['noise_types'].append(noise_type)
noise_info['noise_levels'].append(level)
noise_info['noise_indices'].append(i)
# 保存添加噪声的样本索引
noise_indices = sorted(noise_info['noise_indices'])
noise_index_path = os.path.join('..', 'dataset', 'noise_index.npy')
os.makedirs(os.path.dirname(noise_index_path), exist_ok=True)
np.save(noise_index_path, noise_indices)
print(f"已保存噪声样本索引到 {noise_index_path},共 {len(noise_indices)} 个样本")
# 创建新的TensorDataset
noisy_dataset = TensorDataset(all_data, all_targets)
# 创建新的DataLoader
noisy_trainloader = DataLoader(
noisy_dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers
)
print(f"成功为{len(noise_info['noise_indices'])}/{len(all_data)} ({len(noise_info['noise_indices'])/len(all_data)*100:.1f}%)的样本添加噪声")
return noisy_trainloader, testloader |