Upload spsl.yaml
Browse files
spsl.yaml
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# log dir
|
2 |
+
log_dir: /mntcephfs/lab_data/zhiyuanyan/benchmark_results/logs_final/spsl_4frames
|
3 |
+
|
4 |
+
# model setting
|
5 |
+
pretrained: /data/home/zhiyuanyan/DeepfakeBench/training/pretrained/xception-b5690688.pth # path to a pre-trained model, if using one
|
6 |
+
# pretrained: /home/tianshuoge/resnet34-b627a593.pth # path to a pre-trained model, if using one
|
7 |
+
model_name: spsl # model name
|
8 |
+
backbone_name: xception # backbone name
|
9 |
+
|
10 |
+
#backbone setting
|
11 |
+
backbone_config:
|
12 |
+
mode: original # shallow_xception
|
13 |
+
num_classes: 2
|
14 |
+
inc: 4
|
15 |
+
dropout: false
|
16 |
+
|
17 |
+
# dataset
|
18 |
+
all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV]
|
19 |
+
train_dataset: [FF-FS]
|
20 |
+
test_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT]
|
21 |
+
|
22 |
+
compression: c23 # compression-level for videos
|
23 |
+
train_batchSize: 32 # training batch size
|
24 |
+
test_batchSize: 32 # test batch size
|
25 |
+
workers: 8 # number of data loading workers
|
26 |
+
frame_num: {'train': 4, 'test': 32} # number of frames to use per video in training and testing
|
27 |
+
resolution: 256 # resolution of output image to network
|
28 |
+
with_mask: false # whether to include mask information in the input
|
29 |
+
with_landmark: false # whether to include facial landmark information in the input
|
30 |
+
save_ckpt: true # whether to save checkpoint
|
31 |
+
save_feat: true # whether to save features
|
32 |
+
|
33 |
+
|
34 |
+
# data augmentation
|
35 |
+
use_data_augmentation: true # Add this flag to enable/disable data augmentation
|
36 |
+
data_aug:
|
37 |
+
flip_prob: 0.5
|
38 |
+
rotate_prob: 0.5
|
39 |
+
rotate_limit: [-10, 10]
|
40 |
+
blur_prob: 0.5
|
41 |
+
blur_limit: [3, 7]
|
42 |
+
brightness_prob: 0.5
|
43 |
+
brightness_limit: [-0.1, 0.1]
|
44 |
+
contrast_limit: [-0.1, 0.1]
|
45 |
+
quality_lower: 40
|
46 |
+
quality_upper: 100
|
47 |
+
|
48 |
+
# mean and std for normalization
|
49 |
+
mean: [0.5, 0.5, 0.5]
|
50 |
+
std: [0.5, 0.5, 0.5]
|
51 |
+
|
52 |
+
# optimizer config
|
53 |
+
optimizer:
|
54 |
+
# choose between 'adam' and 'sgd'
|
55 |
+
type: adam
|
56 |
+
adam:
|
57 |
+
lr: 0.0002 # learning rate
|
58 |
+
beta1: 0.9 # beta1 for Adam optimizer
|
59 |
+
beta2: 0.999 # beta2 for Adam optimizer
|
60 |
+
eps: 0.00000001 # epsilon for Adam optimizer
|
61 |
+
weight_decay: 0.0005 # weight decay for regularization
|
62 |
+
amsgrad: false
|
63 |
+
sgd:
|
64 |
+
lr: 0.0002 # learning rate
|
65 |
+
momentum: 0.9 # momentum for SGD optimizer
|
66 |
+
weight_decay: 0.0005 # weight decay for regularization
|
67 |
+
|
68 |
+
# training config
|
69 |
+
lr_scheduler: null # learning rate scheduler
|
70 |
+
nEpochs: 10 # number of epochs to train for
|
71 |
+
start_epoch: 0 # manual epoch number (useful for restarts)
|
72 |
+
save_epoch: 1 # interval epochs for saving models
|
73 |
+
rec_iter: 100 # interval iterations for recording
|
74 |
+
logdir: ./logs # folder to output images and logs
|
75 |
+
manualSeed: 1024 # manual seed for random number generation
|
76 |
+
save_ckpt: false # whether to save checkpoint
|
77 |
+
|
78 |
+
# loss function
|
79 |
+
loss_func: cross_entropy # loss function to use
|
80 |
+
losstype: null
|
81 |
+
|
82 |
+
# metric
|
83 |
+
metric_scoring: auc # metric for evaluation (auc, acc, eer, ap)
|
84 |
+
|
85 |
+
# cuda
|
86 |
+
|
87 |
+
cuda: true # whether to use CUDA acceleration
|
88 |
+
cudnn: true # whether to use CuDNN for convolution operations
|