text
stringlengths 0
93.6k
|
---|
outputs, features = model(imgs)
|
CE_loss = CE_criterion(outputs, targets)
|
lsce_loss = lsce_criterion(outputs, targets)
|
loss = 2 * lsce_loss + CE_loss
|
loss.backward() # make sure to do a full forward pass
|
optimizer.second_step(zero_grad=True)
|
train_loss += loss
|
_, predicts = torch.max(outputs, 1)
|
correct_num = torch.eq(predicts, targets).sum()
|
correct_sum += correct_num
|
train_acc = correct_sum.float() / float(train_dataset.__len__())
|
train_loss = train_loss / iter_cnt
|
elapsed = (time() - start_time) / 60
|
print('[Epoch %d] Train time:%.2f, Training accuracy:%.4f. Loss: %.3f LR:%.6f' %
|
(i, elapsed, train_acc, train_loss, optimizer.param_groups[0]["lr"]))
|
scheduler.step()
|
pre_labels = []
|
gt_labels = []
|
with torch.no_grad():
|
val_loss = 0.0
|
iter_cnt = 0
|
bingo_cnt = 0
|
model.eval()
|
for batch_i, (imgs, targets) in enumerate(val_loader):
|
outputs, features = model(imgs.cuda())
|
targets = targets.cuda()
|
CE_loss = CE_criterion(outputs, targets)
|
loss = CE_loss
|
val_loss += loss
|
iter_cnt += 1
|
_, predicts = torch.max(outputs, 1)
|
correct_or_not = torch.eq(predicts, targets)
|
bingo_cnt += correct_or_not.sum().cpu()
|
pre_labels += predicts.cpu().tolist()
|
gt_labels += targets.cpu().tolist()
|
val_loss = val_loss / iter_cnt
|
val_acc = bingo_cnt.float() / float(val_num)
|
val_acc = np.around(val_acc.numpy(), 4)
|
f1 = f1_score(pre_labels, gt_labels, average='macro')
|
total_socre = 0.67 * f1 + 0.33 * val_acc
|
print("[Epoch %d] Validation accuracy:%.4f, Loss:%.3f, f1 %4f, score %4f" % (
|
i, val_acc, val_loss, f1, total_socre))
|
if val_acc > 0.907 and val_acc > best_acc:
|
torch.save({'iter': i,
|
'model_state_dict': model.state_dict(),
|
'optimizer_state_dict': optimizer.state_dict(), },
|
os.path.join('./checkpoint', "epoch" + str(i) + "_acc" + str(val_acc) + ".pth"))
|
print('Model saved.')
|
if val_acc > best_acc:
|
best_acc = val_acc
|
print("best_acc:" + str(best_acc))
|
if __name__ == "__main__":
|
run_training()
|
# <FILESEP>
|
#!/usr/bin/env python3
|
# This file is covered by the LICENSE file in the root of this project.
|
import argparse
|
import os
|
import yaml
|
from auxiliary.laserscan import LaserScan, SemLaserScan
|
from auxiliary.laserscancomp import LaserScanComp
|
if __name__ == '__main__':
|
parser = argparse.ArgumentParser("./compare.py")
|
parser.add_argument(
|
'--dataset', '-d',
|
type=str,
|
required=True,
|
help='Dataset to visualize. No Default',
|
)
|
parser.add_argument(
|
'--labels',
|
required=True,
|
nargs='+',
|
help='Labels A to visualize. No Default',
|
)
|
parser.add_argument(
|
'--config', '-c',
|
type=str,
|
required=False,
|
default="config/semantic-kitti.yaml",
|
help='Dataset config file. Defaults to %(default)s',
|
)
|
parser.add_argument(
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.