RRFRRF2 commited on
Commit
79953fa
·
1 Parent(s): c52fe89

add ShuffleNet-CIFAR10

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Image/ShuffleNetv2/code/model.py +0 -366
  2. Image/ShuffleNetv2/code/train.py +0 -59
  3. Image/ShuffleNetv2/dataset/.gitkeep +0 -0
  4. Image/ShuffleNetv2/model/.gitkeep +0 -0
  5. Image/utils/dataset_utils.py +0 -110
  6. Image/utils/parse_args.py +0 -19
  7. Image/utils/train_utils.py +0 -381
  8. ShuffleNet-CIFAR10/Classification-backdoor/dataset/backdoor_index.npy +1 -1
  9. ShuffleNet-CIFAR10/Classification-backdoor/dataset/labels.npy +1 -1
  10. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_1/embeddings.npy +3 -0
  11. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_1/model.pth +3 -0
  12. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_1/predictions.npy +3 -0
  13. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_10/embeddings.npy +3 -0
  14. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_10/model.pth +3 -0
  15. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_10/predictions.npy +3 -0
  16. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_12/embeddings.npy +3 -0
  17. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_12/model.pth +3 -0
  18. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_12/predictions.npy +3 -0
  19. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_14/embeddings.npy +3 -0
  20. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_14/model.pth +3 -0
  21. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_14/predictions.npy +3 -0
  22. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_16/embeddings.npy +3 -0
  23. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_16/model.pth +3 -0
  24. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_16/predictions.npy +3 -0
  25. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_18/embeddings.npy +3 -0
  26. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_18/model.pth +3 -0
  27. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_18/predictions.npy +3 -0
  28. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_2/embeddings.npy +3 -0
  29. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_2/model.pth +3 -0
  30. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_2/predictions.npy +3 -0
  31. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_20/embeddings.npy +3 -0
  32. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_20/model.pth +3 -0
  33. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_20/predictions.npy +3 -0
  34. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_22/embeddings.npy +3 -0
  35. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_22/model.pth +3 -0
  36. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_22/predictions.npy +3 -0
  37. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_24/embeddings.npy +3 -0
  38. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_24/model.pth +3 -0
  39. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_24/predictions.npy +3 -0
  40. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_26/embeddings.npy +3 -0
  41. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_26/model.pth +3 -0
  42. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_26/predictions.npy +3 -0
  43. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_28/embeddings.npy +3 -0
  44. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_28/model.pth +3 -0
  45. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_28/predictions.npy +3 -0
  46. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_30/embeddings.npy +3 -0
  47. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_30/model.pth +3 -0
  48. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_30/predictions.npy +3 -0
  49. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_32/embeddings.npy +3 -0
  50. ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_32/model.pth +3 -0
Image/ShuffleNetv2/code/model.py DELETED
@@ -1,366 +0,0 @@
1
- '''
2
- ShuffleNetV2 in PyTorch.
3
-
4
- ShuffleNetV2是ShuffleNet的改进版本,通过实验总结出了四个高效网络设计的实用准则:
5
- 1. 输入输出通道数相等时计算量最小
6
- 2. 过度使用组卷积会增加MAC(内存访问代价)
7
- 3. 网络碎片化会降低并行度
8
- 4. Element-wise操作不可忽视
9
-
10
- 主要改进:
11
- 1. 通道分离(Channel Split)替代组卷积
12
- 2. 重新设计了基本单元,使输入输出通道数相等
13
- 3. 每个阶段使用不同的通道数配置
14
- 4. 简化了下采样模块的设计
15
-
16
- Reference:
17
- [1] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, Jian Sun
18
- ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design. ECCV 2018.
19
- '''
20
- import torch
21
- import torch.nn as nn
22
- import torch.nn.functional as F
23
-
24
-
25
- class ShuffleBlock(nn.Module):
26
- """通道重排模块
27
-
28
- 通过重新排列通道的顺序来实现不同特征的信息交流。
29
-
30
- Args:
31
- groups (int): 分组数量,默认为2
32
- """
33
- def __init__(self, groups=2):
34
- super(ShuffleBlock, self).__init__()
35
- self.groups = groups
36
-
37
- def forward(self, x):
38
- """通道重排的前向传播
39
-
40
- 步骤:
41
- 1. [N,C,H,W] -> [N,g,C/g,H,W] # 重塑为g组
42
- 2. [N,g,C/g,H,W] -> [N,C/g,g,H,W] # 转置g维度
43
- 3. [N,C/g,g,H,W] -> [N,C,H,W] # 重塑回原始形状
44
-
45
- Args:
46
- x: 输入张量,[N,C,H,W]
47
-
48
- Returns:
49
- out: 通道重排后的张量,[N,C,H,W]
50
- """
51
- N, C, H, W = x.size()
52
- g = self.groups
53
- return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
54
-
55
-
56
- class SplitBlock(nn.Module):
57
- """通道分离模块
58
-
59
- 将输入特征图按比例分成两部分。
60
-
61
- Args:
62
- ratio (float): 分离比例,默认为0.5
63
- """
64
- def __init__(self, ratio):
65
- super(SplitBlock, self).__init__()
66
- self.ratio = ratio
67
-
68
- def forward(self, x):
69
- """通道分离的前向传播
70
-
71
- Args:
72
- x: 输入张量,[N,C,H,W]
73
-
74
- Returns:
75
- tuple: 分离后的两个张量,[N,C1,H,W]和[N,C2,H,W]
76
- """
77
- c = int(x.size(1) * self.ratio)
78
- return x[:, :c, :, :], x[:, c:, :, :]
79
-
80
-
81
- class BasicBlock(nn.Module):
82
- """ShuffleNetV2的基本模块
83
-
84
- 结构:
85
- x -------|-----------------|
86
- | | |
87
- | 1x1 Conv |
88
- | 3x3 DWConv |
89
- | 1x1 Conv |
90
- | |
91
- |------------------Concat
92
- |
93
- Channel Shuffle
94
-
95
- Args:
96
- in_channels (int): 输入通道数
97
- split_ratio (float): 通道分离比例,默认为0.5
98
- """
99
- def __init__(self, in_channels, split_ratio=0.5):
100
- super(BasicBlock, self).__init__()
101
- self.split = SplitBlock(split_ratio)
102
- in_channels = int(in_channels * split_ratio)
103
-
104
- # 主分支
105
- self.conv1 = nn.Conv2d(in_channels, in_channels,
106
- kernel_size=1, bias=False)
107
- self.bn1 = nn.BatchNorm2d(in_channels)
108
-
109
- self.conv2 = nn.Conv2d(in_channels, in_channels,
110
- kernel_size=3, stride=1, padding=1,
111
- groups=in_channels, bias=False)
112
- self.bn2 = nn.BatchNorm2d(in_channels)
113
-
114
- self.conv3 = nn.Conv2d(in_channels, in_channels,
115
- kernel_size=1, bias=False)
116
- self.bn3 = nn.BatchNorm2d(in_channels)
117
-
118
- self.shuffle = ShuffleBlock()
119
-
120
- def forward(self, x):
121
- # 通道分离
122
- x1, x2 = self.split(x)
123
-
124
- # 主分支
125
- out = F.relu(self.bn1(self.conv1(x2)))
126
- out = self.bn2(self.conv2(out))
127
- out = F.relu(self.bn3(self.conv3(out)))
128
-
129
- # 拼接并重排
130
- out = torch.cat([x1, out], 1)
131
- out = self.shuffle(out)
132
- return out
133
-
134
-
135
- class DownBlock(nn.Module):
136
- """下采样模块
137
-
138
- 结构:
139
- 3x3 DWConv(s=2) 1x1 Conv
140
- x -----> 1x1 Conv 3x3 DWConv(s=2)
141
- 1x1 Conv
142
- |
143
- Concat
144
- |
145
- Channel Shuffle
146
-
147
- Args:
148
- in_channels (int): 输入通道数
149
- out_channels (int): 输出通道数
150
- """
151
- def __init__(self, in_channels, out_channels):
152
- super(DownBlock, self).__init__()
153
- mid_channels = out_channels // 2
154
-
155
- # 左分支
156
- self.branch1 = nn.Sequential(
157
- # 3x3深度可分离卷积,步长为2
158
- nn.Conv2d(in_channels, in_channels,
159
- kernel_size=3, stride=2, padding=1,
160
- groups=in_channels, bias=False),
161
- nn.BatchNorm2d(in_channels),
162
- # 1x1卷积
163
- nn.Conv2d(in_channels, mid_channels,
164
- kernel_size=1, bias=False),
165
- nn.BatchNorm2d(mid_channels)
166
- )
167
-
168
- # 右分支
169
- self.branch2 = nn.Sequential(
170
- # 1x1卷积
171
- nn.Conv2d(in_channels, mid_channels,
172
- kernel_size=1, bias=False),
173
- nn.BatchNorm2d(mid_channels),
174
- # 3x3深度可分离卷积,步长为2
175
- nn.Conv2d(mid_channels, mid_channels,
176
- kernel_size=3, stride=2, padding=1,
177
- groups=mid_channels, bias=False),
178
- nn.BatchNorm2d(mid_channels),
179
- # 1x1卷积
180
- nn.Conv2d(mid_channels, mid_channels,
181
- kernel_size=1, bias=False),
182
- nn.BatchNorm2d(mid_channels)
183
- )
184
-
185
- self.shuffle = ShuffleBlock()
186
-
187
- def forward(self, x):
188
- # 左分支
189
- out1 = self.branch1(x)
190
-
191
- # 右分支
192
- out2 = self.branch2(x)
193
-
194
- # 拼接并重排
195
- out = torch.cat([out1, out2], 1)
196
- out = self.shuffle(out)
197
- return out
198
-
199
-
200
- class ShuffleNetV2(nn.Module):
201
- """ShuffleNetV2模型
202
-
203
- 网络结构:
204
- 1. 一个卷积层进行特征提取
205
- 2. 三个阶段,每个阶段包含多个基本块和一个下采样块
206
- 3. 最后一个卷积层
207
- 4. 平均池化和全连接层进行分类
208
-
209
- Args:
210
- net_size (float): 网络大小系数,可选0.5/1.0/1.5/2.0
211
- """
212
- def __init__(self, net_size = 0.5, num_classes = 10):
213
- super(ShuffleNetV2, self).__init__()
214
- out_channels = configs[net_size]['out_channels']
215
- num_blocks = configs[net_size]['num_blocks']
216
-
217
- # 第一层卷积
218
- self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
219
- stride=1, padding=1, bias=False)
220
- self.bn1 = nn.BatchNorm2d(24)
221
- self.in_channels = 24
222
-
223
- # 三个阶段
224
- self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
225
- self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
226
- self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
227
-
228
- # 最后的1x1卷积
229
- self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
230
- kernel_size=1, stride=1, padding=0, bias=False)
231
- self.bn2 = nn.BatchNorm2d(out_channels[3])
232
-
233
- # 分类层
234
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
235
- self.classifier = nn.Linear(out_channels[3], num_classes)
236
-
237
- # 初始化权重
238
- self._initialize_weights()
239
-
240
- def _make_layer(self, out_channels, num_blocks):
241
- """构建一个阶段
242
-
243
- Args:
244
- out_channels (int): 输出通道数
245
- num_blocks (int): 基本块的数量
246
-
247
- Returns:
248
- nn.Sequential: 一个阶段的层序列
249
- """
250
- layers = [DownBlock(self.in_channels, out_channels)]
251
- for i in range(num_blocks):
252
- layers.append(BasicBlock(out_channels))
253
- self.in_channels = out_channels
254
- return nn.Sequential(*layers)
255
-
256
- def forward(self, x):
257
- """前向传播
258
-
259
- Args:
260
- x: 输入张量,[N,3,32,32]
261
-
262
- Returns:
263
- out: 输出张量,[N,num_classes]
264
- """
265
- # 特征提取
266
- out = F.relu(self.bn1(self.conv1(x)))
267
-
268
- # 三个阶段
269
- out = self.layer1(out)
270
- out = self.layer2(out)
271
- out = self.layer3(out)
272
-
273
- # 最后的特征提取
274
- out = F.relu(self.bn2(self.conv2(out)))
275
-
276
- # 分类
277
- out = self.avg_pool(out)
278
- out = out.view(out.size(0), -1)
279
- out = self.classifier(out)
280
- return out
281
-
282
- def feature(self, x):
283
- # 特征提取
284
- out = F.relu(self.bn1(self.conv1(x)))
285
-
286
- # 三个阶段
287
- out = self.layer1(out)
288
- out = self.layer2(out)
289
- out = self.layer3(out)
290
-
291
- # 最后的特征提取
292
- out = F.relu(self.bn2(self.conv2(out)))
293
-
294
- # 分类
295
- out = self.avg_pool(out)
296
- return out
297
-
298
- def prediction(self, out):
299
- out = out.view(out.size(0), -1)
300
- out = self.classifier(out)
301
- return out
302
-
303
- def _initialize_weights(self):
304
- """初始化模型权重
305
-
306
- 采用kaiming初始化方法:
307
- - 卷积层权重采用kaiming_normal_初始化
308
- - BN层参数采用常数初始化
309
- - 线性层采用正态分布初始化
310
- """
311
- for m in self.modules():
312
- if isinstance(m, nn.Conv2d):
313
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
314
- if m.bias is not None:
315
- nn.init.constant_(m.bias, 0)
316
- elif isinstance(m, nn.BatchNorm2d):
317
- nn.init.constant_(m.weight, 1)
318
- nn.init.constant_(m.bias, 0)
319
- elif isinstance(m, nn.Linear):
320
- nn.init.normal_(m.weight, 0, 0.01)
321
- nn.init.constant_(m.bias, 0)
322
-
323
-
324
- # 不同大小的网络配置
325
- configs = {
326
- 0.5: {
327
- 'out_channels': (48, 96, 192, 1024),
328
- 'num_blocks': (3, 7, 3)
329
- },
330
- 1.0: {
331
- 'out_channels': (116, 232, 464, 1024),
332
- 'num_blocks': (3, 7, 3)
333
- },
334
- 1.5: {
335
- 'out_channels': (176, 352, 704, 1024),
336
- 'num_blocks': (3, 7, 3)
337
- },
338
- 2.0: {
339
- 'out_channels': (224, 488, 976, 2048),
340
- 'num_blocks': (3, 7, 3)
341
- }
342
- }
343
-
344
-
345
- def test():
346
- """测试函数"""
347
- # 创建模型
348
- net = ShuffleNetV2(net_size=0.5)
349
- print('Model Structure:')
350
- print(net)
351
-
352
- # 测试前向传播
353
- x = torch.randn(1,3,32,32)
354
- y = net(x)
355
- print('\nInput Shape:', x.shape)
356
- print('Output Shape:', y.shape)
357
-
358
- # 打印模型信息
359
- from torchinfo import summary
360
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
361
- net = net.to(device)
362
- summary(net, (1,3,32,32))
363
-
364
-
365
- if __name__ == '__main__':
366
- test()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Image/ShuffleNetv2/code/train.py DELETED
@@ -1,59 +0,0 @@
1
- import sys
2
- import os
3
- sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
4
- from utils.dataset_utils import get_cifar10_dataloaders
5
- from utils.train_utils import train_model, train_model_data_augmentation, train_model_backdoor
6
- from utils.parse_args import parse_args
7
- from model import ShuffleNetv2
8
-
9
- def main():
10
- # 解析命令行参数
11
- args = parse_args()
12
-
13
- # 创建模型
14
- model = ShuffleNetv2()
15
-
16
- if args.train_type == '0':
17
- # 获取数据加载器
18
- trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
19
- # 训练模型
20
- train_model(
21
- model=model,
22
- trainloader=trainloader,
23
- testloader=testloader,
24
- epochs=args.epochs,
25
- lr=args.lr,
26
- device=f'cuda:{args.gpu}',
27
- save_dir='../model',
28
- model_name='shufflenetv2',
29
- save_type='0'
30
- )
31
- elif args.train_type == '1':
32
- train_model_data_augmentation(
33
- model,
34
- epochs=args.epochs,
35
- lr=args.lr,
36
- device=f'cuda:{args.gpu}',
37
- save_dir='../model',
38
- model_name='shufflenetv2',
39
- batch_size=args.batch_size,
40
- num_workers=args.num_workers,
41
- local_dataset_path=args.dataset_path
42
- )
43
- elif args.train_type == '2':
44
- train_model_backdoor(
45
- model,
46
- poison_ratio=args.poison_ratio,
47
- target_label=args.target_label,
48
- epochs=args.epochs,
49
- lr=args.lr,
50
- device=f'cuda:{args.gpu}',
51
- save_dir='../model',
52
- model_name='shufflenetv2',
53
- batch_size=args.batch_size,
54
- num_workers=args.num_workers,
55
- local_dataset_path=args.dataset_path
56
- )
57
-
58
- if __name__ == '__main__':
59
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Image/ShuffleNetv2/dataset/.gitkeep DELETED
File without changes
Image/ShuffleNetv2/model/.gitkeep DELETED
File without changes
Image/utils/dataset_utils.py DELETED
@@ -1,110 +0,0 @@
1
- import torch
2
- import torchvision
3
- import torchvision.transforms as transforms
4
- import os
5
-
6
- def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None,shuffle=True):
7
- """获取CIFAR10数据集的数据加载器
8
-
9
- Args:
10
- batch_size: 批次大小
11
- num_workers: 数据加载的工作进程数
12
- local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
13
-
14
- Returns:
15
- trainloader: 训练数据加载器
16
- testloader: 测试数据加载器
17
- """
18
- # 数据预处理
19
- transform_train = transforms.Compose([
20
- transforms.RandomCrop(32, padding=4),
21
- transforms.RandomHorizontalFlip(),
22
- transforms.ToTensor(),
23
- transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
24
- ])
25
-
26
- transform_test = transforms.Compose([
27
- transforms.ToTensor(),
28
- transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
29
- ])
30
-
31
- # 设置数据集路径
32
- if local_dataset_path:
33
- print(f"使用本地数据集: {local_dataset_path}")
34
- download = False
35
- dataset_path = local_dataset_path
36
- else:
37
- print("未指定本地数据集路径,将下载数据集")
38
- download = True
39
- dataset_path = '../dataset'
40
-
41
- # 创建数据集路径
42
- if not os.path.exists(dataset_path):
43
- os.makedirs(dataset_path)
44
-
45
- trainset = torchvision.datasets.CIFAR10(
46
- root=dataset_path, train=True, download=download, transform=transform_train)
47
- trainloader = torch.utils.data.DataLoader(
48
- trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
49
-
50
- testset = torchvision.datasets.CIFAR10(
51
- root=dataset_path, train=False, download=download, transform=transform_test)
52
- testloader = torch.utils.data.DataLoader(
53
- testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
54
-
55
- return trainloader, testloader
56
-
57
- def get_mnist_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None,shuffle=True):
58
- """获取MNIST数据集的数据加载器
59
-
60
- Args:
61
- batch_size: 批次大小
62
- num_workers: 数据加载的工作进程数
63
- local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
64
-
65
- Returns:
66
- trainloader: 训练数据加载器
67
- testloader: 测试数据加载器
68
- """
69
- # 数据预处理
70
- transform_train = transforms.Compose([
71
- transforms.RandomRotation(10), # 随机旋转±10度
72
- transforms.RandomAffine( # 随机仿射变换
73
- degrees=0, # 不进行旋转
74
- translate=(0.1, 0.1), # 平移范围
75
- scale=(0.9, 1.1) # 缩放范围
76
- ),
77
- transforms.ToTensor(),
78
- transforms.Normalize((0.1307,), (0.3081,)) # MNIST数据集的均值和标准差
79
- ])
80
-
81
- transform_test = transforms.Compose([
82
- transforms.ToTensor(),
83
- transforms.Normalize((0.1307,), (0.3081,))
84
- ])
85
-
86
- # 设置数据集路径
87
- if local_dataset_path:
88
- print(f"使用本地数据集: {local_dataset_path}")
89
- download = False
90
- dataset_path = local_dataset_path
91
- else:
92
- print("未指定本地数据集路径,将下载数据集")
93
- download = True
94
- dataset_path = '../dataset'
95
-
96
- # 创建数据集路径
97
- if not os.path.exists(dataset_path):
98
- os.makedirs(dataset_path)
99
-
100
- trainset = torchvision.datasets.MNIST(
101
- root=dataset_path, train=True, download=download, transform=transform_train)
102
- trainloader = torch.utils.data.DataLoader(
103
- trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
104
-
105
- testset = torchvision.datasets.MNIST(
106
- root=dataset_path, train=False, download=download, transform=transform_test)
107
- testloader = torch.utils.data.DataLoader(
108
- testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
109
-
110
- return trainloader, testloader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Image/utils/parse_args.py DELETED
@@ -1,19 +0,0 @@
1
- import argparse
2
-
3
- def parse_args():
4
- """解析命令行参数
5
-
6
- Returns:
7
- args: 解析后的参数
8
- """
9
- parser = argparse.ArgumentParser(description='训练模型')
10
- parser.add_argument('--gpu', type=int, default=0, help='GPU设备编号 (0,1,2,3)')
11
- parser.add_argument('--batch-size', type=int, default=128, help='批次大小')
12
- parser.add_argument('--epochs', type=int, default=200, help='训练轮数')
13
- parser.add_argument('--lr', type=float, default=0.1, help='学习率')
14
- parser.add_argument('--num-workers', type=int, default=2, help='数据加载的工作进程数')
15
- parser.add_argument('--poison-ratio', type=float, default=0.1, help='恶意样本比例')
16
- parser.add_argument('--target-label', type=int, default=0, help='目标类别')
17
- parser.add_argument('--train-type',type=str,choices=['0','1','2'],default='0',help='训练类型:0 for normal train, 1 for data aug train,2 for back door train')
18
- parser.add_argument('--dataset-path', type=str, default=None, help='本地数据集路径,如果不指定则自动下载')
19
- return parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Image/utils/train_utils.py DELETED
@@ -1,381 +0,0 @@
1
- """
2
- 通用模型训练工具
3
-
4
- 提供了模型训练、评估、保存等功能,支持:
5
- 1. 训练进度可视化
6
- 2. 日志记录
7
- 3. 模型检查点保存
8
- 4. 嵌入向量收集
9
- """
10
-
11
- import torch
12
- import torch.nn as nn
13
- import torch.optim as optim
14
- import time
15
- import os
16
- import logging
17
- import numpy as np
18
- from tqdm import tqdm
19
- import sys
20
- from pathlib import Path
21
- import torch.nn.functional as F
22
- import torchvision.transforms as transforms
23
-
24
- # 将项目根目录添加到Python路径中
25
- current_dir = Path(__file__).resolve().parent
26
- project_root = current_dir.parent.parent
27
- sys.path.append(str(project_root))
28
-
29
- from ttv_utils import time_travel_saver
30
-
31
- def setup_logger(log_file):
32
- """配置日志记录器,如果日志文件存在则覆盖
33
-
34
- Args:
35
- log_file: 日志文件路径
36
-
37
- Returns:
38
- logger: 配置好的日志记录器
39
- """
40
- # 创建logger
41
- logger = logging.getLogger('train')
42
- logger.setLevel(logging.INFO)
43
-
44
- # 移除现有的处理器
45
- if logger.hasHandlers():
46
- logger.handlers.clear()
47
-
48
- # 创建文件处理器,使用'w'模式覆盖现有文件
49
- fh = logging.FileHandler(log_file, mode='w')
50
- fh.setLevel(logging.INFO)
51
-
52
- # 创建控制台处理器
53
- ch = logging.StreamHandler()
54
- ch.setLevel(logging.INFO)
55
-
56
- # 创建格式器
57
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
58
- fh.setFormatter(formatter)
59
- ch.setFormatter(formatter)
60
-
61
- # 添加处理器
62
- logger.addHandler(fh)
63
- logger.addHandler(ch)
64
-
65
- return logger
66
-
67
- def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
68
- save_dir='./checkpoints', model_name='model', save_type='0',layer_name=None,interval = 2):
69
- """通用的模型训练函数
70
- Args:
71
- model: 要训练的模型
72
- trainloader: 训练数据加载器
73
- testloader: 测试数据加载器
74
- epochs: 训练轮数
75
- lr: 学习率
76
- device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
77
- save_dir: 模型保存目录
78
- model_name: 模型名称
79
- save_type: 保存类型,0为普通训练,1为数据增强训练,2为后门训练
80
- """
81
- # 检查并设置GPU设备
82
- if not torch.cuda.is_available():
83
- print("CUDA不可用,将使用CPU训练")
84
- device = 'cpu'
85
- elif not device.startswith('cuda:'):
86
- device = f'cuda:0'
87
-
88
- # 确保device格式正确
89
- if device.startswith('cuda:'):
90
- gpu_id = int(device.split(':')[1])
91
- if gpu_id >= torch.cuda.device_count():
92
- print(f"GPU {gpu_id} 不可用,将使用GPU 0")
93
- device = 'cuda:0'
94
-
95
- # 设置保存目录 0 for normal train, 1 for data aug train,2 for back door train
96
- if not os.path.exists(save_dir):
97
- os.makedirs(save_dir)
98
-
99
- # 设置日志 0 for normal train, 1 for data aug train,2 for back door train
100
- if save_type == '0':
101
- log_file = os.path.join(os.path.dirname(save_dir), 'code', 'train.log')
102
- if not os.path.exists(os.path.dirname(log_file)):
103
- os.makedirs(os.path.dirname(log_file))
104
- elif save_type == '1':
105
- log_file = os.path.join(os.path.dirname(save_dir), 'code', 'data_aug_train.log')
106
- if not os.path.exists(os.path.dirname(log_file)):
107
- os.makedirs(os.path.dirname(log_file))
108
- elif save_type == '2':
109
- log_file = os.path.join(os.path.dirname(save_dir), 'code', 'backdoor_train.log')
110
- if not os.path.exists(os.path.dirname(log_file)):
111
- os.makedirs(os.path.dirname(log_file))
112
- logger = setup_logger(log_file)
113
-
114
- # 设置epoch保存目录 0 for normal train, 1 for data aug train,2 for back door train
115
- save_dir = os.path.join(save_dir, save_type)
116
- if not os.path.exists(save_dir):
117
- os.makedirs(save_dir)
118
-
119
- # 损失函数和优化器
120
- criterion = nn.CrossEntropyLoss()
121
- optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
122
- scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
123
-
124
- # 移动模型到指定设备
125
- model = model.to(device)
126
- best_acc = 0
127
- start_time = time.time()
128
-
129
- logger.info(f'开始训练 {model_name}')
130
- logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
131
-
132
- for epoch in range(epochs):
133
- # 训练阶段
134
- model.train()
135
- train_loss = 0
136
- correct = 0
137
- total = 0
138
-
139
- train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
140
- for batch_idx, (inputs, targets) in enumerate(train_pbar):
141
- inputs, targets = inputs.to(device), targets.to(device)
142
- optimizer.zero_grad()
143
- outputs = model(inputs)
144
- loss = criterion(outputs, targets)
145
- loss.backward()
146
- optimizer.step()
147
-
148
- train_loss += loss.item()
149
- _, predicted = outputs.max(1)
150
- total += targets.size(0)
151
- correct += predicted.eq(targets).sum().item()
152
-
153
- # 更新进度条
154
- train_pbar.set_postfix({
155
- 'loss': f'{train_loss/(batch_idx+1):.3f}',
156
- 'acc': f'{100.*correct/total:.2f}%'
157
- })
158
-
159
- # 每100步记录一次
160
- if batch_idx % 100 == 0:
161
- logger.info(f'Epoch: {epoch+1} | Batch: {batch_idx} | '
162
- f'Loss: {train_loss/(batch_idx+1):.3f} | '
163
- f'Acc: {100.*correct/total:.2f}%')
164
-
165
- # 测试阶段
166
- model.eval()
167
- test_loss = 0
168
- correct = 0
169
- total = 0
170
-
171
- test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
172
- with torch.no_grad():
173
- for batch_idx, (inputs, targets) in enumerate(test_pbar):
174
- inputs, targets = inputs.to(device), targets.to(device)
175
- outputs = model(inputs)
176
- loss = criterion(outputs, targets)
177
-
178
- test_loss += loss.item()
179
- _, predicted = outputs.max(1)
180
- total += targets.size(0)
181
- correct += predicted.eq(targets).sum().item()
182
-
183
- # 更新进度条
184
- test_pbar.set_postfix({
185
- 'loss': f'{test_loss/(batch_idx+1):.3f}',
186
- 'acc': f'{100.*correct/total:.2f}%'
187
- })
188
-
189
- # 计算测试精度
190
- acc = 100.*correct/total
191
- logger.info(f'Epoch: {epoch+1} | Test Loss: {test_loss/(batch_idx+1):.3f} | '
192
- f'Test Acc: {acc:.2f}%')
193
-
194
-
195
- if epoch == 0:
196
- ordered_loader = torch.utils.data.DataLoader(
197
- trainloader.dataset, # 使用相同的数据集
198
- batch_size=trainloader.batch_size,
199
- shuffle=False, # 确保顺序加载
200
- num_workers=trainloader.num_workers
201
- )
202
- save_model = time_travel_saver(model, ordered_loader, device, save_dir, model_name, interval = 1, auto_save_embedding = True, layer_name = layer_name, show= True )
203
-
204
- # 每5个epoch保存一次
205
- if (epoch + 1) % interval == 0:
206
- # 创建一个专门用于收集embedding的顺序dataloader
207
- ordered_loader = torch.utils.data.DataLoader(
208
- trainloader.dataset, # 使用相同的数据集
209
- batch_size=trainloader.batch_size,
210
- shuffle=False, # 确保顺序加载
211
- num_workers=trainloader.num_workers
212
- )
213
- save_model = time_travel_saver(model, ordered_loader, device, save_dir, model_name, interval = 1, auto_save_embedding = True, layer_name = layer_name )
214
- save_model.save()
215
-
216
- scheduler.step()
217
-
218
- logger.info('训练完成!')
219
-
220
- def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0',
221
- save_dir='./checkpoints', model_name='model',
222
- batch_size=128, num_workers=2, local_dataset_path=None):
223
- """使用数据增强训练模型
224
-
225
- 数据增强方案说明:
226
- 1. RandomCrop: 随机裁剪,先填充4像素,再裁剪回原始大小,增加位置多样性
227
- 2. RandomHorizontalFlip: 随机水平翻转,增加方向多样性
228
- 3. RandomRotation: 随机旋转15度,增加角度多样性
229
- 4. ColorJitter: 颜色抖动,调整亮度、对比度、饱和度和色调
230
- 5. RandomErasing: 随机擦除部分区域,模拟遮挡情况
231
- 6. RandomPerspective: 随机透视变换,增加视角多样性
232
-
233
- Args:
234
- model: 要训练的模型
235
- epochs: 训练轮数
236
- lr: 学习率
237
- device: 训练设备
238
- save_dir: 模型保存目录
239
- model_name: 模型名称
240
- batch_size: 批次大小
241
- num_workers: 数据加载的工作进程数
242
- local_dataset_path: 本地数据集路径
243
- """
244
- import torchvision.transforms as transforms
245
- from .dataset_utils import get_cifar10_dataloaders
246
-
247
- # 定义增强的数据预处理
248
- transform_train = transforms.Compose([
249
- transforms.RandomCrop(32, padding=4),
250
- transforms.RandomHorizontalFlip(),
251
- transforms.RandomRotation(15),
252
- transforms.ColorJitter(
253
- brightness=0.2,
254
- contrast=0.2,
255
- saturation=0.2,
256
- hue=0.1
257
- ),
258
- transforms.RandomPerspective(distortion_scale=0.2, p=0.5),
259
- transforms.ToTensor(),
260
- transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
261
- transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3))
262
- ])
263
-
264
- # 获取数据加载器
265
- trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path)
266
-
267
- # 使用增强的训练数据
268
- trainset = trainloader.dataset
269
- trainset.transform = transform_train
270
- trainloader = torch.utils.data.DataLoader(
271
- trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
272
-
273
- # 调用通用训练函数
274
- train_model(model, trainloader, testloader, epochs, lr, device, save_dir, model_name, save_type='1')
275
-
276
- def train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=200, lr=0.1,
277
- device='cuda:0', save_dir='./checkpoints', model_name='model',
278
- batch_size=128, num_workers=2, local_dataset_path=None, layer_name=None,interval = 2):
279
- """训练带后门的模型
280
-
281
- 后门攻击方案说明:
282
- 1. 标签翻转攻击:将选定比例的样本标签修改为目标标签
283
- 2. 触发器模式:在选定样本的右下角添加一个4x4的白色方块作为触发器
284
- 3. 验证策略:
285
- - 在干净数据上验证模型性能(确保正常样本分类准确率)
286
- - 在带触发器的数据上验证攻击成功率
287
-
288
- Args:
289
- model: 要训练的模型
290
- poison_ratio: 投毒比例
291
- target_label: 目标标签
292
- epochs: 训练轮数
293
- lr: 学习率
294
- device: 训练设备
295
- save_dir: 模型保存目录
296
- model_name: 模型名称
297
- batch_size: 批次大小
298
- num_workers: 数据加载的工作进程数
299
- local_dataset_path: 本地数据集路径
300
- """
301
- from .dataset_utils import get_cifar10_dataloaders
302
- import numpy as np
303
- import torch.nn.functional as F
304
-
305
- # 获取原始数据加载器
306
- trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path)
307
-
308
- # 修改部分训练数据的标签和添加触发器
309
- trainset = trainloader.dataset
310
- num_poison = int(len(trainset) * poison_ratio)
311
- poison_indices = np.random.choice(len(trainset), num_poison, replace=False)
312
-
313
- # 保存原始标签和数据用于验证
314
- original_targets = trainset.targets.copy()
315
- original_data = trainset.data.copy()
316
-
317
- # 修改选中数据的标签和添加触发器
318
- trigger_pattern = np.ones((4, 4, 3), dtype=np.uint8) * 255 # 4x4白色方块作为触发器
319
- for idx in poison_indices:
320
- # 修改标签
321
- trainset.targets[idx] = target_label
322
- # 添加触发器到右下角
323
- trainset.data[idx, -4:, -4:] = trigger_pattern
324
-
325
- # 创建新的数据加载器
326
- poisoned_trainloader = torch.utils.data.DataLoader(
327
- trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
328
-
329
- # 训练模型
330
- train_model(model, poisoned_trainloader, testloader, epochs, lr, device, save_dir, model_name, save_type='2', layer_name=layer_name,interval = interval)
331
-
332
- # 恢复原始数据用于验证
333
- trainset.targets = original_targets
334
- trainset.data = original_data
335
-
336
- # 创建验证数据加载器(干净数据)
337
- validation_loader = torch.utils.data.DataLoader(
338
- trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
339
-
340
- # 在干净验证集上评估模型
341
- model.eval()
342
- correct = 0
343
- total = 0
344
- with torch.no_grad():
345
- for inputs, targets in validation_loader:
346
- inputs, targets = inputs.to(device), targets.to(device)
347
- outputs = model(inputs)
348
- _, predicted = outputs.max(1)
349
- total += targets.size(0)
350
- correct += predicted.eq(targets).sum().item()
351
-
352
- clean_accuracy = 100. * correct / total
353
- print(f'\nAccuracy on clean validation set: {clean_accuracy:.2f}%')
354
-
355
- # 创建带触发器的验证数据集
356
- trigger_validation = trainset.data.copy()
357
- trigger_validation_targets = np.array([target_label] * len(trainset))
358
- # 添加触发器
359
- trigger_validation[:, -4:, -4:] = trigger_pattern
360
-
361
- # 转换为张量并标准化
362
- trigger_validation = torch.tensor(trigger_validation).float().permute(0, 3, 1, 2) / 255.0
363
- # 使用正确的方式进行图像标准化
364
- normalize = transforms.Normalize(mean=(0.4914, 0.4822, 0.4465),
365
- std=(0.2023, 0.1994, 0.2010))
366
- trigger_validation = normalize(trigger_validation)
367
-
368
- # 在带触发器的验证集上评估模型
369
- correct = 0
370
- total = 0
371
- batch_size = 100
372
- for i in range(0, len(trigger_validation), batch_size):
373
- inputs = trigger_validation[i:i+batch_size].to(device)
374
- targets = torch.tensor(trigger_validation_targets[i:i+batch_size]).to(device)
375
- outputs = model(inputs)
376
- _, predicted = outputs.max(1)
377
- total += targets.size(0)
378
- correct += predicted.eq(targets).sum().item()
379
-
380
- attack_success_rate = 100. * correct / total
381
- print(f'Attack success rate on triggered samples: {attack_success_rate:.2f}%')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ShuffleNet-CIFAR10/Classification-backdoor/dataset/backdoor_index.npy CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:383f3dac7962d888cc48ddf547f830532dbde3b6f348145c0cf010f62b306a8d
3
  size 40128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cd3d05324334762f33c91931defbfaf31f69e31f1d5f92124aec49131fc2ae6
3
  size 40128
ShuffleNet-CIFAR10/Classification-backdoor/dataset/labels.npy CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0c5795dd89d6a1fe19c4786f42a30e7484fbd9d616e238f5a3cd0e7379217a3
3
  size 480128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01f8d90485368312bbee2895cfd440a3a425367dee5f7f57996f5c0ad3e78212
3
  size 480128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_1/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb87cce91010d67cf85d554df9d6225923a250662140a7923b69f643ae989365
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_1/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:009dc3370fe6544e7aaded357b3afda1c0ff77218217c41b34ff2e549eff31d9
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_1/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae665e692767fe427cc21a762148f61cd2c658ef69662e9c2c73f34b35363bf3
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_10/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da5b7a9f9613943da5b5c7c7f47b8efd97208db853089b816812274a94fb2be4
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_10/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80878683a9cbc308d6744dddd028e9fbef971d6a23bf556610138774ec93caa9
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_10/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b3603d43987edcf9cbcee9fe49256c7d2329b0835c8b9ff842697c619dc5e00
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_12/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9748dffae1bc66553b176bad83318ca9a0ccf47a8a6e7a14b4c08c0da92a133
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_12/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9a24fb8145adb2bd056acbf81d2e5f4359efef2a0d51228e7ee52ebab82ac17
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_12/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:123f555d02034f13e52daa5278165a70f1b75ab51c0d7e9d1c6e198af518f333
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_14/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ab8f3cf3e125624af65f50886a7f8101ebbffaebb2497a7fa6504aee57b14f
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_14/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6cfa43fc77fed032d60897541d71de9d2ce6644727029853edbb0b5d4300244
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_14/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64e1c5990ec99d828bb68fe813cbc7ad465408c64d32b62c0a641955daa2e92c
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_16/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d31f2009fbffc7ef603d9e52b3bc3678908a53e927025de131a47f0d5aa106f
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_16/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d03b3640406f04aa7ad78c0c7cd7006640fb2ca0bb4137725b03df92a34d4051
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_16/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21421d8bf80965ee9b46e60162a99ea4efe66a42bca70b4b6ffa46ccab3d83f3
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_18/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcf54822d5a66d31d1087093c4ee6f557c5dca3e79910bf229318557ac110261
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_18/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b651bde9062a61c36d32e8fd9b6b8dfacaeac62941a71bdef67b65dde19ce86
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_18/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af87c710b508db78532a65e8a5eb1e409994f978a25df65378301db9f1a56eda
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_2/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d39056bd2f48ddcccaba5b0eb512980d51ad2c8ed79ce980baa3646847cdfc1
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_2/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8446e9085b755bffc639b15743e482f4851e34582bf3bea630b6c5dcb935f555
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_2/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1f31120555b6e6ae7b7fbf604a69cbbf8292d8834ed9d4d1969121652c200e4
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_20/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f750d15a089a8e6143e11a699b38a8f7504f68e6c1038c84d84279b9356559
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_20/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a0d99872b4e72660067c5cb25d1a4d7d47be87ba98671956f05cad0587f8bc7
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_20/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85621000e7b9d98da466655f9b0c90c87f49aab40b1fa8ea343ff03b890f977c
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_22/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbf572dd72e34894d14e511d00fbafecaf894176853de88924969ef01673c821
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_22/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b77d4b68028b3cc915447dc03dd03d48abe828449be5a1aa5ca098c94ae350bb
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_22/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20f32468c36ff6707b9c8111cf1af35dd474f34d5f3b500de1598da35e332d9d
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_24/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ecfb6228d656acfffe708f04e7f82652810b48caa7d57742e35e2295cc34441
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_24/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5224d0caaeb0022a9bfae3b235b4cf3276b876eb530cb9c4a20015b8c7ba07e7
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_24/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:155a7e6ebaadc53490fa8c551b53c4e3bb44690808c5ebc963a974fb3d0bc679
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_26/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f388cb3096e8aa4cdb26187c4a630537c71a2de9f82cbe8547125decc6fab7
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_26/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d3c79f0d50d1af0d07c59035d4999b3fa1e6d49f02e87aa052f0a34eac30120
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_26/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:499a4d9d98494814eb15560db028ebc3caffb254468feafdb46f8018b36d6c53
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_28/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d3a9a438e32efa818a6c6c78d92e41740b6c5976e6381cbc647d3b77869d773
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_28/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:092dd2079e0f51cc891b5360ecd1431bdccf3682e35803a32c35ae6f4f10122c
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_28/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a05cf1366099e72b4ec4dfeef44b70f2d86ae312caef1b8a3046f54ebdd09961
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_30/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46f2859876f44117cf6fe9483d017b1edf9e7a046057c091497e30725ac042d1
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_30/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ee3ed0b5cdaa61844230294f6d74ab623ee643e5e048fba3217b1fd0d568726
3
+ size 3717770
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_30/predictions.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b23446f570e40abb1d81d0572dc128dd190a5b199bff1b3ab5fb7c0eca390cb4
3
+ size 2400128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_32/embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f62f64c0326f9fc763b8eef567954bcbce1f6ac0a3af994d00808e81c5d67df2
3
+ size 192000128
ShuffleNet-CIFAR10/Classification-backdoor/epochs/epoch_32/model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9ea9895ba92bb587f061433b78bcb3abad06b5488d0da851a30144725c72f36
3
+ size 3717770