text
stringlengths 0
93.6k
|
---|
datapath = './data/raf-basic/'
|
num_classes = 7
|
train_dataset = RafDataSet(datapath, train=True, transform=data_transforms, basic_aug=True)
|
val_dataset = RafDataSet(datapath, train=False, transform=data_transforms_val)
|
model = pyramid_trans_expr(img_size=224, num_classes=num_classes, type=args.modeltype)
|
elif args.dataset == "affectnet":
|
datapath = './data/AffectNet/'
|
num_classes = 7
|
train_dataset = Affectdataset(datapath, train=True, transform=data_transforms, basic_aug=True)
|
val_dataset = Affectdataset(datapath, train=False, transform=data_transforms_val)
|
model = pyramid_trans_expr(img_size=224, num_classes=num_classes, type=args.modeltype)
|
elif args.dataset == "affectnet8class":
|
datapath = './data/AffectNet/'
|
num_classes = 8
|
train_dataset = Affectdataset_8class(datapath, train=True, transform=data_transforms, basic_aug=True)
|
val_dataset = Affectdataset_8class(datapath, train=False, transform=data_transforms_val)
|
model = pyramid_trans_expr(img_size=224, num_classes=num_classes, type=args.modeltype)
|
else:
|
return print('dataset name is not correct')
|
val_num = val_dataset.__len__()
|
print('Train set size:', train_dataset.__len__())
|
print('Validation set size:', val_dataset.__len__())
|
train_loader = torch.utils.data.DataLoader(train_dataset,
|
# sampler=ImbalancedDatasetSampler(train_dataset),
|
batch_size=args.batch_size,
|
num_workers=args.workers,
|
shuffle=True,
|
pin_memory=True)
|
val_loader = torch.utils.data.DataLoader(val_dataset,
|
batch_size=args.val_batch_size,
|
num_workers=args.workers,
|
shuffle=False,
|
pin_memory=True)
|
# model = Networks.ResNet18_ARM___RAF()
|
model = torch.nn.DataParallel(model)
|
model = model.cuda()
|
print("batch_size:", args.batch_size)
|
if args.checkpoint:
|
print("Loading pretrained weights...", args.checkpoint)
|
checkpoint = torch.load(args.checkpoint)
|
# model.load_state_dict(checkpoint["model_state_dict"], strict=False)
|
checkpoint = checkpoint["model_state_dict"]
|
model = load_pretrained_weights(model, checkpoint)
|
params = model.parameters()
|
if args.optimizer == 'adamw':
|
# base_optimizer = torch.optim.AdamW(params, args.lr, weight_decay=1e-4)
|
base_optimizer = torch.optim.AdamW
|
elif args.optimizer == 'adam':
|
# base_optimizer = torch.optim.Adam(params, args.lr, weight_decay=1e-4)
|
base_optimizer = torch.optim.Adam
|
elif args.optimizer == 'sgd':
|
# base_optimizer = torch.optim.SGD(params, args.lr, momentum=args.momentum, weight_decay=1e-4)
|
base_optimizer = torch.optim.SGD
|
else:
|
raise ValueError("Optimizer not supported.")
|
# print(optimizer)
|
optimizer = SAM(model.parameters(), base_optimizer, lr=args.lr, rho=0.05, adaptive=False,)
|
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.98)
|
model = model.cuda()
|
parameters = filter(lambda p: p.requires_grad, model.parameters())
|
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
|
print('Total Parameters: %.3fM' % parameters)
|
CE_criterion = torch.nn.CrossEntropyLoss()
|
lsce_criterion = LabelSmoothingCrossEntropy(smoothing=0.2)
|
best_acc = 0
|
for i in range(1, args.epochs + 1):
|
train_loss = 0.0
|
correct_sum = 0
|
iter_cnt = 0
|
start_time = time()
|
model.train()
|
for batch_i, (imgs, targets) in enumerate(train_loader):
|
iter_cnt += 1
|
optimizer.zero_grad()
|
imgs = imgs.cuda()
|
outputs, features = model(imgs)
|
targets = targets.cuda()
|
CE_loss = CE_criterion(outputs, targets)
|
lsce_loss = lsce_criterion(outputs, targets)
|
loss = 2 * lsce_loss + CE_loss
|
loss.backward()
|
optimizer.first_step(zero_grad=True)
|
# second forward-backward pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.