diff --git a/app.py b/app.py index 9f640a15fe3810a390cee46d1d7392655ece2d1d..614bec217cb20c972615668495550a6040ed9c2f 100644 --- a/app.py +++ b/app.py @@ -1,6 +1,6 @@ import sys sys.path.append('.') -sys.path.append('./face_recognition1') +sys.path.append('./face_recognition') import os import io import cv2 @@ -14,15 +14,15 @@ import configparser import numpy as np from PIL import Image -# from face_recognition.match import match_1_1 -from face_recognition1.run import match_image +from face_recognition.match import match_1_1 +# from face_recognition1.run import match_image def face_recognition_on_file(file1, file2): img1 = cv2.imread(file1) img2 = cv2.imread(file2) - response = match_image(img1, img2) + response = match_1_1(img1, img2) return response diff --git a/face_recognition1/face_detect/checkpoints/FaceBoxesProd.pth b/face_recognition1/face_detect/checkpoints/FaceBoxesProd.pth deleted file mode 100644 index 79a95d90883a1ed2b178ca29c8719d6164fadf1f..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/checkpoints/FaceBoxesProd.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c0cb2b1e40710c0aa5fc32a8759b1496a0fe0a126c907ca0ffe35b4bd0709d09 -size 4072492 diff --git a/face_recognition1/face_detect/checkpoints/Widerface-RetinaFace.caffemodel b/face_recognition1/face_detect/checkpoints/Widerface-RetinaFace.caffemodel deleted file mode 100644 index 735a2e60f329ce2ded8baeefd88d3c2df8e4d939..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/checkpoints/Widerface-RetinaFace.caffemodel +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d08338a2c207df16a9c566f767fea67fb43ba6fff76ce11e938fe3fabefb9402 -size 1866013 diff --git a/face_recognition1/face_detect/checkpoints/deploy.prototxt b/face_recognition1/face_detect/checkpoints/deploy.prototxt deleted file mode 100644 index 6821aca03203a68107735897b3406605e2f9c520..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/checkpoints/deploy.prototxt +++ /dev/null @@ -1,2499 +0,0 @@ -name: "20200403141819_Widerface-RetinaFace_mb_640_negscope-0_epoch_4" -input: "data" -input_dim: 1 -input_dim: 3 -input_dim: 640 -input_dim: 640 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv_blob1" - convolution_param { - num_output: 8 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 2 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm1" - type: "BatchNorm" - bottom: "conv_blob1" - top: "batch_norm_blob1" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale1" - type: "Scale" - bottom: "batch_norm_blob1" - top: "batch_norm_blob1" - scale_param { - bias_term: true - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "batch_norm_blob1" - top: "relu_blob1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "relu_blob1" - top: "conv_blob2" - convolution_param { - num_output: 8 - bias_term: false - pad: 1 - kernel_size: 3 - group: 8 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm2" - type: "BatchNorm" - bottom: "conv_blob2" - top: "batch_norm_blob2" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale2" - type: "Scale" - bottom: "batch_norm_blob2" - top: "batch_norm_blob2" - scale_param { - bias_term: true - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "batch_norm_blob2" - top: "relu_blob2" -} -layer { - name: "conv3" - type: "Convolution" - bottom: "relu_blob2" - top: "conv_blob3" - convolution_param { - num_output: 16 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm3" - type: "BatchNorm" - bottom: "conv_blob3" - top: "batch_norm_blob3" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale3" - type: "Scale" - bottom: "batch_norm_blob3" - top: "batch_norm_blob3" - scale_param { - bias_term: true - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "batch_norm_blob3" - top: "relu_blob3" -} -layer { - name: "conv4" - type: "Convolution" - bottom: "relu_blob3" - top: "conv_blob4" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 16 - stride: 2 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm4" - type: "BatchNorm" - bottom: "conv_blob4" - top: "batch_norm_blob4" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale4" - type: "Scale" - bottom: "batch_norm_blob4" - top: "batch_norm_blob4" - scale_param { - bias_term: true - } -} -layer { - name: "relu4" - type: "ReLU" - bottom: "batch_norm_blob4" - top: "relu_blob4" -} -layer { - name: "conv5" - type: "Convolution" - bottom: "relu_blob4" - top: "conv_blob5" - convolution_param { - num_output: 32 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm5" - type: "BatchNorm" - bottom: "conv_blob5" - top: "batch_norm_blob5" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale5" - type: "Scale" - bottom: "batch_norm_blob5" - top: "batch_norm_blob5" - scale_param { - bias_term: true - } -} -layer { - name: "relu5" - type: "ReLU" - bottom: "batch_norm_blob5" - top: "relu_blob5" -} -layer { - name: "conv6" - type: "Convolution" - bottom: "relu_blob5" - top: "conv_blob6" - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - group: 32 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm6" - type: "BatchNorm" - bottom: "conv_blob6" - top: "batch_norm_blob6" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale6" - type: "Scale" - bottom: "batch_norm_blob6" - top: "batch_norm_blob6" - scale_param { - bias_term: true - } -} -layer { - name: "relu6" - type: "ReLU" - bottom: "batch_norm_blob6" - top: "relu_blob6" -} -layer { - name: "conv7" - type: "Convolution" - bottom: "relu_blob6" - top: "conv_blob7" - convolution_param { - num_output: 32 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm7" - type: "BatchNorm" - bottom: "conv_blob7" - top: "batch_norm_blob7" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale7" - type: "Scale" - bottom: "batch_norm_blob7" - top: "batch_norm_blob7" - scale_param { - bias_term: true - } -} -layer { - name: "relu7" - type: "ReLU" - bottom: "batch_norm_blob7" - top: "relu_blob7" -} -layer { - name: "conv8" - type: "Convolution" - bottom: "relu_blob7" - top: "conv_blob8" - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - group: 32 - stride: 2 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm8" - type: "BatchNorm" - bottom: "conv_blob8" - top: "batch_norm_blob8" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale8" - type: "Scale" - bottom: "batch_norm_blob8" - top: "batch_norm_blob8" - scale_param { - bias_term: true - } -} -layer { - name: "relu8" - type: "ReLU" - bottom: "batch_norm_blob8" - top: "relu_blob8" -} -layer { - name: "conv9" - type: "Convolution" - bottom: "relu_blob8" - top: "conv_blob9" - convolution_param { - num_output: 64 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm9" - type: "BatchNorm" - bottom: "conv_blob9" - top: "batch_norm_blob9" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale9" - type: "Scale" - bottom: "batch_norm_blob9" - top: "batch_norm_blob9" - scale_param { - bias_term: true - } -} -layer { - name: "relu9" - type: "ReLU" - bottom: "batch_norm_blob9" - top: "relu_blob9" -} -layer { - name: "conv10" - type: "Convolution" - bottom: "relu_blob9" - top: "conv_blob10" - convolution_param { - num_output: 64 - bias_term: false - pad: 1 - kernel_size: 3 - group: 64 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm10" - type: "BatchNorm" - bottom: "conv_blob10" - top: "batch_norm_blob10" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale10" - type: "Scale" - bottom: "batch_norm_blob10" - top: "batch_norm_blob10" - scale_param { - bias_term: true - } -} -layer { - name: "relu10" - type: "ReLU" - bottom: "batch_norm_blob10" - top: "relu_blob10" -} -layer { - name: "conv11" - type: "Convolution" - bottom: "relu_blob10" - top: "conv_blob11" - convolution_param { - num_output: 64 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm11" - type: "BatchNorm" - bottom: "conv_blob11" - top: "batch_norm_blob11" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale11" - type: "Scale" - bottom: "batch_norm_blob11" - top: "batch_norm_blob11" - scale_param { - bias_term: true - } -} -layer { - name: "relu11" - type: "ReLU" - bottom: "batch_norm_blob11" - top: "relu_blob11" -} -layer { - name: "conv12" - type: "Convolution" - bottom: "relu_blob11" - top: "conv_blob12" - convolution_param { - num_output: 64 - bias_term: false - pad: 1 - kernel_size: 3 - group: 64 - stride: 2 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm12" - type: "BatchNorm" - bottom: "conv_blob12" - top: "batch_norm_blob12" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale12" - type: "Scale" - bottom: "batch_norm_blob12" - top: "batch_norm_blob12" - scale_param { - bias_term: true - } -} -layer { - name: "relu12" - type: "ReLU" - bottom: "batch_norm_blob12" - top: "relu_blob12" -} -layer { - name: "conv13" - type: "Convolution" - bottom: "relu_blob12" - top: "conv_blob13" - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm13" - type: "BatchNorm" - bottom: "conv_blob13" - top: "batch_norm_blob13" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale13" - type: "Scale" - bottom: "batch_norm_blob13" - top: "batch_norm_blob13" - scale_param { - bias_term: true - } -} -layer { - name: "relu13" - type: "ReLU" - bottom: "batch_norm_blob13" - top: "relu_blob13" -} -layer { - name: "conv14" - type: "Convolution" - bottom: "relu_blob13" - top: "conv_blob14" - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - group: 128 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm14" - type: "BatchNorm" - bottom: "conv_blob14" - top: "batch_norm_blob14" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale14" - type: "Scale" - bottom: "batch_norm_blob14" - top: "batch_norm_blob14" - scale_param { - bias_term: true - } -} -layer { - name: "relu14" - type: "ReLU" - bottom: "batch_norm_blob14" - top: "relu_blob14" -} -layer { - name: "conv15" - type: "Convolution" - bottom: "relu_blob14" - top: "conv_blob15" - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm15" - type: "BatchNorm" - bottom: "conv_blob15" - top: "batch_norm_blob15" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale15" - type: "Scale" - bottom: "batch_norm_blob15" - top: "batch_norm_blob15" - scale_param { - bias_term: true - } -} -layer { - name: "relu15" - type: "ReLU" - bottom: "batch_norm_blob15" - top: "relu_blob15" -} -layer { - name: "conv16" - type: "Convolution" - bottom: "relu_blob15" - top: "conv_blob16" - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - group: 128 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm16" - type: "BatchNorm" - bottom: "conv_blob16" - top: "batch_norm_blob16" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale16" - type: "Scale" - bottom: "batch_norm_blob16" - top: "batch_norm_blob16" - scale_param { - bias_term: true - } -} -layer { - name: "relu16" - type: "ReLU" - bottom: "batch_norm_blob16" - top: "relu_blob16" -} -layer { - name: "conv17" - type: "Convolution" - bottom: "relu_blob16" - top: "conv_blob17" - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm17" - type: "BatchNorm" - bottom: "conv_blob17" - top: "batch_norm_blob17" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale17" - type: "Scale" - bottom: "batch_norm_blob17" - top: "batch_norm_blob17" - scale_param { - bias_term: true - } -} -layer { - name: "relu17" - type: "ReLU" - bottom: "batch_norm_blob17" - top: "relu_blob17" -} -layer { - name: "conv18" - type: "Convolution" - bottom: "relu_blob17" - top: "conv_blob18" - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - group: 128 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm18" - type: "BatchNorm" - bottom: "conv_blob18" - top: "batch_norm_blob18" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale18" - type: "Scale" - bottom: "batch_norm_blob18" - top: "batch_norm_blob18" - scale_param { - bias_term: true - } -} -layer { - name: "relu18" - type: "ReLU" - bottom: "batch_norm_blob18" - top: "relu_blob18" -} -layer { - name: "conv19" - type: "Convolution" - bottom: "relu_blob18" - top: "conv_blob19" - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm19" - type: "BatchNorm" - bottom: "conv_blob19" - top: "batch_norm_blob19" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale19" - type: "Scale" - bottom: "batch_norm_blob19" - top: "batch_norm_blob19" - scale_param { - bias_term: true - } -} -layer { - name: "relu19" - type: "ReLU" - bottom: "batch_norm_blob19" - top: "relu_blob19" -} -layer { - name: "conv20" - type: "Convolution" - bottom: "relu_blob19" - top: "conv_blob20" - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - group: 128 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm20" - type: "BatchNorm" - bottom: "conv_blob20" - top: "batch_norm_blob20" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale20" - type: "Scale" - bottom: "batch_norm_blob20" - top: "batch_norm_blob20" - scale_param { - bias_term: true - } -} -layer { - name: "relu20" - type: "ReLU" - bottom: "batch_norm_blob20" - top: "relu_blob20" -} -layer { - name: "conv21" - type: "Convolution" - bottom: "relu_blob20" - top: "conv_blob21" - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm21" - type: "BatchNorm" - bottom: "conv_blob21" - top: "batch_norm_blob21" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale21" - type: "Scale" - bottom: "batch_norm_blob21" - top: "batch_norm_blob21" - scale_param { - bias_term: true - } -} -layer { - name: "relu21" - type: "ReLU" - bottom: "batch_norm_blob21" - top: "relu_blob21" -} -layer { - name: "conv22" - type: "Convolution" - bottom: "relu_blob21" - top: "conv_blob22" - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - group: 128 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm22" - type: "BatchNorm" - bottom: "conv_blob22" - top: "batch_norm_blob22" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale22" - type: "Scale" - bottom: "batch_norm_blob22" - top: "batch_norm_blob22" - scale_param { - bias_term: true - } -} -layer { - name: "relu22" - type: "ReLU" - bottom: "batch_norm_blob22" - top: "relu_blob22" -} -layer { - name: "conv23" - type: "Convolution" - bottom: "relu_blob22" - top: "conv_blob23" - convolution_param { - num_output: 128 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm23" - type: "BatchNorm" - bottom: "conv_blob23" - top: "batch_norm_blob23" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale23" - type: "Scale" - bottom: "batch_norm_blob23" - top: "batch_norm_blob23" - scale_param { - bias_term: true - } -} -layer { - name: "relu23" - type: "ReLU" - bottom: "batch_norm_blob23" - top: "relu_blob23" -} -layer { - name: "conv24" - type: "Convolution" - bottom: "relu_blob23" - top: "conv_blob24" - convolution_param { - num_output: 128 - bias_term: false - pad: 1 - kernel_size: 3 - group: 128 - stride: 2 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm24" - type: "BatchNorm" - bottom: "conv_blob24" - top: "batch_norm_blob24" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale24" - type: "Scale" - bottom: "batch_norm_blob24" - top: "batch_norm_blob24" - scale_param { - bias_term: true - } -} -layer { - name: "relu24" - type: "ReLU" - bottom: "batch_norm_blob24" - top: "relu_blob24" -} -layer { - name: "conv25" - type: "Convolution" - bottom: "relu_blob24" - top: "conv_blob25" - convolution_param { - num_output: 256 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm25" - type: "BatchNorm" - bottom: "conv_blob25" - top: "batch_norm_blob25" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale25" - type: "Scale" - bottom: "batch_norm_blob25" - top: "batch_norm_blob25" - scale_param { - bias_term: true - } -} -layer { - name: "relu25" - type: "ReLU" - bottom: "batch_norm_blob25" - top: "relu_blob25" -} -layer { - name: "conv26" - type: "Convolution" - bottom: "relu_blob25" - top: "conv_blob26" - convolution_param { - num_output: 256 - bias_term: false - pad: 1 - kernel_size: 3 - group: 256 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm26" - type: "BatchNorm" - bottom: "conv_blob26" - top: "batch_norm_blob26" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale26" - type: "Scale" - bottom: "batch_norm_blob26" - top: "batch_norm_blob26" - scale_param { - bias_term: true - } -} -layer { - name: "relu26" - type: "ReLU" - bottom: "batch_norm_blob26" - top: "relu_blob26" -} -layer { - name: "conv27" - type: "Convolution" - bottom: "relu_blob26" - top: "conv_blob27" - convolution_param { - num_output: 256 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm27" - type: "BatchNorm" - bottom: "conv_blob27" - top: "batch_norm_blob27" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale27" - type: "Scale" - bottom: "batch_norm_blob27" - top: "batch_norm_blob27" - scale_param { - bias_term: true - } -} -layer { - name: "relu27" - type: "ReLU" - bottom: "batch_norm_blob27" - top: "relu_blob27" -} -layer { - name: "conv28" - type: "Convolution" - bottom: "relu_blob11" - top: "conv_blob28" - convolution_param { - num_output: 64 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm28" - type: "BatchNorm" - bottom: "conv_blob28" - top: "batch_norm_blob28" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale28" - type: "Scale" - bottom: "batch_norm_blob28" - top: "batch_norm_blob28" - scale_param { - bias_term: true - } -} -layer { - name: "relu28" - type: "ReLU" - bottom: "batch_norm_blob28" - top: "relu_blob28" -} -layer { - name: "conv29" - type: "Convolution" - bottom: "relu_blob23" - top: "conv_blob29" - convolution_param { - num_output: 64 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm29" - type: "BatchNorm" - bottom: "conv_blob29" - top: "batch_norm_blob29" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale29" - type: "Scale" - bottom: "batch_norm_blob29" - top: "batch_norm_blob29" - scale_param { - bias_term: true - } -} -layer { - name: "relu29" - type: "ReLU" - bottom: "batch_norm_blob29" - top: "relu_blob29" -} -layer { - name: "conv30" - type: "Convolution" - bottom: "relu_blob27" - top: "conv_blob30" - convolution_param { - num_output: 64 - bias_term: false - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm30" - type: "BatchNorm" - bottom: "conv_blob30" - top: "batch_norm_blob30" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale30" - type: "Scale" - bottom: "batch_norm_blob30" - top: "batch_norm_blob30" - scale_param { - bias_term: true - } -} -layer { - name: "relu30" - type: "ReLU" - bottom: "batch_norm_blob30" - top: "relu_blob30" -} -layer { - name: "conv_transpose1" - type: "Deconvolution" - bottom: "relu_blob30" - top: "conv_transpose_blob1" - convolution_param { - num_output: 64 - bias_term: true - pad: 0 - kernel_size: 2 - group: 1 - stride: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -layer { - name: "crop1" - type: "Crop" - bottom: "conv_transpose_blob1" - bottom: "relu_blob29" - top: "crop1" -} -layer { - name: "add1" - type: "Eltwise" - bottom: "relu_blob29" - bottom: "crop1" - top: "add_blob1" - eltwise_param { - operation: SUM - } -} -layer { - name: "conv31" - type: "Convolution" - bottom: "add_blob1" - top: "conv_blob31" - convolution_param { - num_output: 64 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm31" - type: "BatchNorm" - bottom: "conv_blob31" - top: "batch_norm_blob31" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale31" - type: "Scale" - bottom: "batch_norm_blob31" - top: "batch_norm_blob31" - scale_param { - bias_term: true - } -} -layer { - name: "relu31" - type: "ReLU" - bottom: "batch_norm_blob31" - top: "relu_blob31" -} -layer { - name: "conv_transpose2" - type: "Deconvolution" - bottom: "relu_blob31" - top: "conv_transpose_blob2" - convolution_param { - num_output: 64 - bias_term: true - pad: 0 - kernel_size: 2 - group: 1 - stride: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -layer { - name: "crop2" - type: "Crop" - bottom: "conv_transpose_blob2" - bottom: "relu_blob28" - top: "crop2" -} -layer { - name: "add2" - type: "Eltwise" - bottom: "relu_blob28" - bottom: "crop2" - top: "add_blob2" - eltwise_param { - operation: SUM - } -} -layer { - name: "conv32" - type: "Convolution" - bottom: "add_blob2" - top: "conv_blob32" - convolution_param { - num_output: 64 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm32" - type: "BatchNorm" - bottom: "conv_blob32" - top: "batch_norm_blob32" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale32" - type: "Scale" - bottom: "batch_norm_blob32" - top: "batch_norm_blob32" - scale_param { - bias_term: true - } -} -layer { - name: "relu32" - type: "ReLU" - bottom: "batch_norm_blob32" - top: "relu_blob32" -} -layer { - name: "conv33" - type: "Convolution" - bottom: "relu_blob32" - top: "conv_blob33" - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm33" - type: "BatchNorm" - bottom: "conv_blob33" - top: "batch_norm_blob33" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale33" - type: "Scale" - bottom: "batch_norm_blob33" - top: "batch_norm_blob33" - scale_param { - bias_term: true - } -} -layer { - name: "conv34" - type: "Convolution" - bottom: "relu_blob32" - top: "conv_blob34" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm34" - type: "BatchNorm" - bottom: "conv_blob34" - top: "batch_norm_blob34" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale34" - type: "Scale" - bottom: "batch_norm_blob34" - top: "batch_norm_blob34" - scale_param { - bias_term: true - } -} -layer { - name: "relu33" - type: "ReLU" - bottom: "batch_norm_blob34" - top: "relu_blob33" -} -layer { - name: "conv35" - type: "Convolution" - bottom: "relu_blob33" - top: "conv_blob35" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm35" - type: "BatchNorm" - bottom: "conv_blob35" - top: "batch_norm_blob35" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale35" - type: "Scale" - bottom: "batch_norm_blob35" - top: "batch_norm_blob35" - scale_param { - bias_term: true - } -} -layer { - name: "conv36" - type: "Convolution" - bottom: "relu_blob33" - top: "conv_blob36" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm36" - type: "BatchNorm" - bottom: "conv_blob36" - top: "batch_norm_blob36" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale36" - type: "Scale" - bottom: "batch_norm_blob36" - top: "batch_norm_blob36" - scale_param { - bias_term: true - } -} -layer { - name: "relu34" - type: "ReLU" - bottom: "batch_norm_blob36" - top: "relu_blob34" -} -layer { - name: "conv37" - type: "Convolution" - bottom: "relu_blob34" - top: "conv_blob37" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm37" - type: "BatchNorm" - bottom: "conv_blob37" - top: "batch_norm_blob37" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale37" - type: "Scale" - bottom: "batch_norm_blob37" - top: "batch_norm_blob37" - scale_param { - bias_term: true - } -} -layer { - name: "cat1" - type: "Concat" - bottom: "batch_norm_blob33" - bottom: "batch_norm_blob35" - bottom: "batch_norm_blob37" - top: "cat_blob1" - concat_param { - axis: 1 - } -} -layer { - name: "relu35" - type: "ReLU" - bottom: "cat_blob1" - top: "relu_blob35" -} -layer { - name: "conv38" - type: "Convolution" - bottom: "relu_blob31" - top: "conv_blob38" - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm38" - type: "BatchNorm" - bottom: "conv_blob38" - top: "batch_norm_blob38" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale38" - type: "Scale" - bottom: "batch_norm_blob38" - top: "batch_norm_blob38" - scale_param { - bias_term: true - } -} -layer { - name: "conv39" - type: "Convolution" - bottom: "relu_blob31" - top: "conv_blob39" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm39" - type: "BatchNorm" - bottom: "conv_blob39" - top: "batch_norm_blob39" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale39" - type: "Scale" - bottom: "batch_norm_blob39" - top: "batch_norm_blob39" - scale_param { - bias_term: true - } -} -layer { - name: "relu36" - type: "ReLU" - bottom: "batch_norm_blob39" - top: "relu_blob36" -} -layer { - name: "conv40" - type: "Convolution" - bottom: "relu_blob36" - top: "conv_blob40" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm40" - type: "BatchNorm" - bottom: "conv_blob40" - top: "batch_norm_blob40" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale40" - type: "Scale" - bottom: "batch_norm_blob40" - top: "batch_norm_blob40" - scale_param { - bias_term: true - } -} -layer { - name: "conv41" - type: "Convolution" - bottom: "relu_blob36" - top: "conv_blob41" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm41" - type: "BatchNorm" - bottom: "conv_blob41" - top: "batch_norm_blob41" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale41" - type: "Scale" - bottom: "batch_norm_blob41" - top: "batch_norm_blob41" - scale_param { - bias_term: true - } -} -layer { - name: "relu37" - type: "ReLU" - bottom: "batch_norm_blob41" - top: "relu_blob37" -} -layer { - name: "conv42" - type: "Convolution" - bottom: "relu_blob37" - top: "conv_blob42" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm42" - type: "BatchNorm" - bottom: "conv_blob42" - top: "batch_norm_blob42" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale42" - type: "Scale" - bottom: "batch_norm_blob42" - top: "batch_norm_blob42" - scale_param { - bias_term: true - } -} -layer { - name: "cat2" - type: "Concat" - bottom: "batch_norm_blob38" - bottom: "batch_norm_blob40" - bottom: "batch_norm_blob42" - top: "cat_blob2" - concat_param { - axis: 1 - } -} -layer { - name: "relu38" - type: "ReLU" - bottom: "cat_blob2" - top: "relu_blob38" -} -layer { - name: "conv43" - type: "Convolution" - bottom: "relu_blob30" - top: "conv_blob43" - convolution_param { - num_output: 32 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm43" - type: "BatchNorm" - bottom: "conv_blob43" - top: "batch_norm_blob43" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale43" - type: "Scale" - bottom: "batch_norm_blob43" - top: "batch_norm_blob43" - scale_param { - bias_term: true - } -} -layer { - name: "conv44" - type: "Convolution" - bottom: "relu_blob30" - top: "conv_blob44" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm44" - type: "BatchNorm" - bottom: "conv_blob44" - top: "batch_norm_blob44" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale44" - type: "Scale" - bottom: "batch_norm_blob44" - top: "batch_norm_blob44" - scale_param { - bias_term: true - } -} -layer { - name: "relu39" - type: "ReLU" - bottom: "batch_norm_blob44" - top: "relu_blob39" -} -layer { - name: "conv45" - type: "Convolution" - bottom: "relu_blob39" - top: "conv_blob45" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm45" - type: "BatchNorm" - bottom: "conv_blob45" - top: "batch_norm_blob45" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale45" - type: "Scale" - bottom: "batch_norm_blob45" - top: "batch_norm_blob45" - scale_param { - bias_term: true - } -} -layer { - name: "conv46" - type: "Convolution" - bottom: "relu_blob39" - top: "conv_blob46" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm46" - type: "BatchNorm" - bottom: "conv_blob46" - top: "batch_norm_blob46" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale46" - type: "Scale" - bottom: "batch_norm_blob46" - top: "batch_norm_blob46" - scale_param { - bias_term: true - } -} -layer { - name: "relu40" - type: "ReLU" - bottom: "batch_norm_blob46" - top: "relu_blob40" -} -layer { - name: "conv47" - type: "Convolution" - bottom: "relu_blob40" - top: "conv_blob47" - convolution_param { - num_output: 16 - bias_term: false - pad: 1 - kernel_size: 3 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - dilation: 1 - } -} -layer { - name: "batch_norm47" - type: "BatchNorm" - bottom: "conv_blob47" - top: "batch_norm_blob47" - batch_norm_param { - use_global_stats: true - eps: 9.9999997e-06 - } -} -layer { - name: "bn_scale47" - type: "Scale" - bottom: "batch_norm_blob47" - top: "batch_norm_blob47" - scale_param { - bias_term: true - } -} -layer { - name: "cat3" - type: "Concat" - bottom: "batch_norm_blob43" - bottom: "batch_norm_blob45" - bottom: "batch_norm_blob47" - top: "cat_blob3" - concat_param { - axis: 1 - } -} -layer { - name: "relu41" - type: "ReLU" - bottom: "cat_blob3" - top: "relu_blob41" -} -layer { - name: "conv48" - type: "Convolution" - bottom: "relu_blob35" - top: "conv_blob48" - convolution_param { - num_output: 8 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -layer { - name: "conv49" - type: "Convolution" - bottom: "relu_blob35" - top: "conv_blob49" - convolution_param { - num_output: 4 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -layer { - name: "conv50" - type: "Convolution" - bottom: "relu_blob38" - top: "conv_blob50" - convolution_param { - num_output: 8 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -layer { - name: "conv51" - type: "Convolution" - bottom: "relu_blob38" - top: "conv_blob51" - convolution_param { - num_output: 4 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -layer { - name: "conv52" - type: "Convolution" - bottom: "relu_blob41" - top: "conv_blob52" - convolution_param { - num_output: 8 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -layer { - name: "conv53" - type: "Convolution" - bottom: "relu_blob41" - top: "conv_blob53" - convolution_param { - num_output: 4 - bias_term: true - pad: 0 - kernel_size: 1 - group: 1 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - dilation: 1 - } -} -############ prior box ########### - -layer { - name: "conv4_3_norm_mbox_loc_perm" - type: "Permute" - bottom: "conv_blob48" - top: "conv4_3_norm_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv4_3_norm_mbox_loc_flat" - type: "Flatten" - bottom: "conv4_3_norm_mbox_loc_perm" - top: "conv4_3_norm_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv4_3_norm_mbox_conf_perm" - type: "Permute" - bottom: "conv_blob49" - top: "conv4_3_norm_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv4_3_norm_mbox_conf_flat" - type: "Flatten" - bottom: "conv4_3_norm_mbox_conf_perm" - top: "conv4_3_norm_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv4_3_norm_mbox_priorbox" - type: "PriorBox" - bottom: "relu_blob35" - bottom: "data" - top: "conv4_3_norm_mbox_priorbox" - prior_box_param { - min_size: 16.0 - min_size: 32.0 - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 8.0 - offset: 0.5 - } -} - -layer { - name: "conv5_3_norm_mbox_loc_perm" - type: "Permute" - bottom: "conv_blob50" - top: "conv5_3_norm_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv5_3_norm_mbox_loc_flat" - type: "Flatten" - bottom: "conv5_3_norm_mbox_loc_perm" - top: "conv5_3_norm_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv5_3_norm_mbox_conf_perm" - type: "Permute" - bottom: "conv_blob51" - top: "conv5_3_norm_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv5_3_norm_mbox_conf_flat" - type: "Flatten" - bottom: "conv5_3_norm_mbox_conf_perm" - top: "conv5_3_norm_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv5_3_norm_mbox_priorbox" - type: "PriorBox" - bottom: "relu_blob38" - bottom: "data" - top: "conv5_3_norm_mbox_priorbox" - prior_box_param { - min_size: 64.0 - min_size: 128.0 - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 16.0 - offset: 0.5 - } -} - -layer { - name: "conv6_3_norm_mbox_loc_perm" - type: "Permute" - bottom: "conv_blob52" - top: "conv6_3_norm_mbox_loc_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv6_3_norm_mbox_loc_flat" - type: "Flatten" - bottom: "conv6_3_norm_mbox_loc_perm" - top: "conv6_3_norm_mbox_loc_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv6_3_norm_mbox_conf_perm" - type: "Permute" - bottom: "conv_blob53" - top: "conv6_3_norm_mbox_conf_perm" - permute_param { - order: 0 - order: 2 - order: 3 - order: 1 - } -} -layer { - name: "conv6_3_norm_mbox_conf_flat" - type: "Flatten" - bottom: "conv6_3_norm_mbox_conf_perm" - top: "conv6_3_norm_mbox_conf_flat" - flatten_param { - axis: 1 - } -} -layer { - name: "conv6_3_norm_mbox_priorbox" - type: "PriorBox" - bottom: "relu_blob41" - bottom: "data" - top: "conv6_3_norm_mbox_priorbox" - prior_box_param { - min_size: 256.0 - min_size: 512.0 - clip: false - variance: 0.1 - variance: 0.1 - variance: 0.2 - variance: 0.2 - step: 32.0 - offset: 0.5 - } -} - -######################################################## -layer { - name: "mbox_loc" - type: "Concat" - bottom: "conv4_3_norm_mbox_loc_flat" - bottom: "conv5_3_norm_mbox_loc_flat" - bottom: "conv6_3_norm_mbox_loc_flat" - top: "mbox_loc" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_conf" - type: "Concat" - bottom: "conv4_3_norm_mbox_conf_flat" - bottom: "conv5_3_norm_mbox_conf_flat" - bottom: "conv6_3_norm_mbox_conf_flat" - top: "mbox_conf" - concat_param { - axis: 1 - } -} -layer { - name: "mbox_priorbox" - type: "Concat" - bottom: "conv4_3_norm_mbox_priorbox" - bottom: "conv5_3_norm_mbox_priorbox" - bottom: "conv6_3_norm_mbox_priorbox" - top: "mbox_priorbox" - concat_param { - axis: 2 - } -} -layer { - name: "mbox_conf_reshape" - type: "Reshape" - bottom: "mbox_conf" - top: "mbox_conf_reshape" - reshape_param { - shape { - dim: 0 - dim: -1 - dim: 2 - } - } -} -layer { - name: "mbox_conf_softmax" - type: "Softmax" - bottom: "mbox_conf_reshape" - top: "mbox_conf_softmax" - softmax_param { - axis: 2 - } -} -layer { - name: "mbox_conf_flatten" - type: "Flatten" - bottom: "mbox_conf_softmax" - top: "mbox_conf_flatten" - flatten_param { - axis: 1 - } -} -layer { - name: "detection_out" - type: "DetectionOutput" - bottom: "mbox_loc" - bottom: "mbox_conf_flatten" - bottom: "mbox_priorbox" - top: "detection_out" - include { - phase: TEST - } - detection_output_param { - num_classes: 2 - share_location: true - background_label_id: 0 - nms_param { - nms_threshold: 0.3 - top_k: 400 - } - code_type: CENTER_SIZE - keep_top_k: 200 - confidence_threshold: 0.1 - } -} diff --git a/face_recognition1/face_detect/data/config.py b/face_recognition1/face_detect/data/config.py deleted file mode 100644 index 527c8b3754fbc6e1187e4539bf5075cd0cea4952..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/data/config.py +++ /dev/null @@ -1,14 +0,0 @@ -# config.py - -cfg = { - 'name': 'FaceBoxes', - #'min_dim': 1024, - #'feature_maps': [[32, 32], [16, 16], [8, 8]], - # 'aspect_ratios': [[1], [1], [1]], - 'min_sizes': [[32, 64, 128], [256], [512]], - 'steps': [32, 64, 128], - 'variance': [0.1, 0.2], - 'clip': False, - 'loc_weight': 2.0, - 'gpu_train': True -} diff --git a/face_recognition1/face_detect/layers/__init__.py b/face_recognition1/face_detect/layers/__init__.py deleted file mode 100644 index 53a3f4b5160995d93bc7911e808b3045d74362c9..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/layers/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .functions import * -from .modules import * diff --git a/face_recognition1/face_detect/layers/functions/prior_box.py b/face_recognition1/face_detect/layers/functions/prior_box.py deleted file mode 100644 index e5536670afe139de420bc16bd88238fd2a90735b..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/layers/functions/prior_box.py +++ /dev/null @@ -1,43 +0,0 @@ -import torch -from itertools import product as product -import numpy as np -from math import ceil - - -class PriorBox(object): - def __init__(self, cfg, image_size=None, phase='train'): - super(PriorBox, self).__init__() - #self.aspect_ratios = cfg['aspect_ratios'] - self.min_sizes = cfg['min_sizes'] - self.steps = cfg['steps'] - self.clip = cfg['clip'] - self.image_size = image_size - self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps] - - def forward(self): - anchors = [] - for k, f in enumerate(self.feature_maps): - min_sizes = self.min_sizes[k] - for i, j in product(range(f[0]), range(f[1])): - for min_size in min_sizes: - s_kx = min_size / self.image_size[1] - s_ky = min_size / self.image_size[0] - if min_size == 32: - dense_cx = [x*self.steps[k]/self.image_size[1] for x in [j+0, j+0.25, j+0.5, j+0.75]] - dense_cy = [y*self.steps[k]/self.image_size[0] for y in [i+0, i+0.25, i+0.5, i+0.75]] - for cy, cx in product(dense_cy, dense_cx): - anchors += [cx, cy, s_kx, s_ky] - elif min_size == 64: - dense_cx = [x*self.steps[k]/self.image_size[1] for x in [j+0, j+0.5]] - dense_cy = [y*self.steps[k]/self.image_size[0] for y in [i+0, i+0.5]] - for cy, cx in product(dense_cy, dense_cx): - anchors += [cx, cy, s_kx, s_ky] - else: - cx = (j + 0.5) * self.steps[k] / self.image_size[1] - cy = (i + 0.5) * self.steps[k] / self.image_size[0] - anchors += [cx, cy, s_kx, s_ky] - # back to torch land - output = torch.Tensor(anchors).view(-1, 4) - if self.clip: - output.clamp_(max=1, min=0) - return output diff --git a/face_recognition1/face_detect/layers/modules/__init__.py b/face_recognition1/face_detect/layers/modules/__init__.py deleted file mode 100644 index cf24bddbf283f233d0b93fc074a2bac2f5c044a9..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/layers/modules/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .multibox_loss import MultiBoxLoss - -__all__ = ['MultiBoxLoss'] diff --git a/face_recognition1/face_detect/layers/modules/multibox_loss.py b/face_recognition1/face_detect/layers/modules/multibox_loss.py deleted file mode 100644 index 01b4ec7c881424115e68a8cce2babef98fb79e17..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/layers/modules/multibox_loss.py +++ /dev/null @@ -1,108 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable -from utils.box_utils import match, log_sum_exp -from data.config import cfg -GPU = cfg['gpu_train'] - -class MultiBoxLoss(nn.Module): - """SSD Weighted Loss Function - Compute Targets: - 1) Produce Confidence Target Indices by matching ground truth boxes - with (default) 'priorboxes' that have jaccard index > threshold parameter - (default threshold: 0.5). - 2) Produce localization target by 'encoding' variance into offsets of ground - truth boxes and their matched 'priorboxes'. - 3) Hard negative mining to filter the excessive number of negative examples - that comes with using a large number of default bounding boxes. - (default negative:positive ratio 3:1) - Objective Loss: - L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss - weighted by α which is set to 1 by cross val. - Args: - c: class confidences, - l: predicted boxes, - g: ground truth boxes - N: number of matched default boxes - See: https://arxiv.org/pdf/1512.02325.pdf for more details. - """ - - def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target): - super(MultiBoxLoss, self).__init__() - self.num_classes = num_classes - self.threshold = overlap_thresh - self.background_label = bkg_label - self.encode_target = encode_target - self.use_prior_for_matching = prior_for_matching - self.do_neg_mining = neg_mining - self.negpos_ratio = neg_pos - self.neg_overlap = neg_overlap - self.variance = [0.1, 0.2] - - def forward(self, predictions, priors, targets): - """Multibox Loss - Args: - predictions (tuple): A tuple containing loc preds, conf preds, - and prior boxes from SSD net. - conf shape: torch.size(batch_size,num_priors,num_classes) - loc shape: torch.size(batch_size,num_priors,4) - priors shape: torch.size(num_priors,4) - - ground_truth (tensor): Ground truth boxes and labels for a batch, - shape: [batch_size,num_objs,5] (last idx is the label). - """ - - loc_data, conf_data = predictions - priors = priors - num = loc_data.size(0) - num_priors = (priors.size(0)) - - # match priors (default boxes) and ground truth boxes - loc_t = torch.Tensor(num, num_priors, 4) - conf_t = torch.LongTensor(num, num_priors) - for idx in range(num): - truths = targets[idx][:, :-1].data - labels = targets[idx][:, -1].data - defaults = priors.data - match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx) - if GPU: - loc_t = loc_t.cuda() - conf_t = conf_t.cuda() - - pos = conf_t > 0 - - # Localization Loss (Smooth L1) - # Shape: [batch,num_priors,4] - pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) - loc_p = loc_data[pos_idx].view(-1, 4) - loc_t = loc_t[pos_idx].view(-1, 4) - loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum') - - # Compute max conf across batch for hard negative mining - batch_conf = conf_data.view(-1, self.num_classes) - loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1)) - - # Hard Negative Mining - loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now - loss_c = loss_c.view(num, -1) - _, loss_idx = loss_c.sort(1, descending=True) - _, idx_rank = loss_idx.sort(1) - num_pos = pos.long().sum(1, keepdim=True) - num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1) - neg = idx_rank < num_neg.expand_as(idx_rank) - - # Confidence Loss Including Positive and Negative Examples - pos_idx = pos.unsqueeze(2).expand_as(conf_data) - neg_idx = neg.unsqueeze(2).expand_as(conf_data) - conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes) - targets_weighted = conf_t[(pos+neg).gt(0)] - loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum') - - # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N - N = max(num_pos.data.sum().float(), 1) - loss_l /= N - loss_c /= N - - return loss_l, loss_c diff --git a/face_recognition1/face_detect/models/__init__.py b/face_recognition1/face_detect/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_detect/models/faceboxes.py b/face_recognition1/face_detect/models/faceboxes.py deleted file mode 100644 index a9e7c50a2553e850d404319772e7bf155b0c5b19..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/models/faceboxes.py +++ /dev/null @@ -1,149 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class BasicConv2d(nn.Module): - - def __init__(self, in_channels, out_channels, **kwargs): - super(BasicConv2d, self).__init__() - self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) - self.bn = nn.BatchNorm2d(out_channels, eps=1e-5) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - return F.relu(x, inplace=True) - - -class Inception(nn.Module): - - def __init__(self): - super(Inception, self).__init__() - self.branch1x1 = BasicConv2d(128, 32, kernel_size=1, padding=0) - self.branch1x1_2 = BasicConv2d(128, 32, kernel_size=1, padding=0) - self.branch3x3_reduce = BasicConv2d(128, 24, kernel_size=1, padding=0) - self.branch3x3 = BasicConv2d(24, 32, kernel_size=3, padding=1) - self.branch3x3_reduce_2 = BasicConv2d(128, 24, kernel_size=1, padding=0) - self.branch3x3_2 = BasicConv2d(24, 32, kernel_size=3, padding=1) - self.branch3x3_3 = BasicConv2d(32, 32, kernel_size=3, padding=1) - - def forward(self, x): - branch1x1 = self.branch1x1(x) - - branch1x1_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) - branch1x1_2 = self.branch1x1_2(branch1x1_pool) - - branch3x3_reduce = self.branch3x3_reduce(x) - branch3x3 = self.branch3x3(branch3x3_reduce) - - branch3x3_reduce_2 = self.branch3x3_reduce_2(x) - branch3x3_2 = self.branch3x3_2(branch3x3_reduce_2) - branch3x3_3 = self.branch3x3_3(branch3x3_2) - - outputs = [branch1x1, branch1x1_2, branch3x3, branch3x3_3] - return torch.cat(outputs, 1) - - -class CRelu(nn.Module): - - def __init__(self, in_channels, out_channels, **kwargs): - super(CRelu, self).__init__() - self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) - self.bn = nn.BatchNorm2d(out_channels, eps=1e-5) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = torch.cat([x, -x], 1) - x = F.relu(x, inplace=True) - return x - - -class FaceBoxes(nn.Module): - - def __init__(self, phase, size, num_classes): - super(FaceBoxes, self).__init__() - self.phase = phase - self.num_classes = num_classes - self.size = size - - self.conv1 = CRelu(3, 24, kernel_size=7, stride=4, padding=3) - self.conv2 = CRelu(48, 64, kernel_size=5, stride=2, padding=2) - - self.inception1 = Inception() - self.inception2 = Inception() - self.inception3 = Inception() - - self.conv3_1 = BasicConv2d(128, 128, kernel_size=1, stride=1, padding=0) - self.conv3_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1) - - self.conv4_1 = BasicConv2d(256, 128, kernel_size=1, stride=1, padding=0) - self.conv4_2 = BasicConv2d(128, 256, kernel_size=3, stride=2, padding=1) - - self.loc, self.conf = self.multibox(self.num_classes) - - if self.phase == 'test': - self.softmax = nn.Softmax(dim=-1) - - if self.phase == 'train': - for m in self.modules(): - if isinstance(m, nn.Conv2d): - if m.bias is not None: - nn.init.xavier_normal_(m.weight.data) - m.bias.data.fill_(0.02) - else: - m.weight.data.normal_(0, 0.01) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def multibox(self, num_classes): - loc_layers = [] - conf_layers = [] - loc_layers += [nn.Conv2d(128, 21 * 4, kernel_size=3, padding=1)] - conf_layers += [nn.Conv2d(128, 21 * num_classes, kernel_size=3, padding=1)] - loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)] - conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)] - loc_layers += [nn.Conv2d(256, 1 * 4, kernel_size=3, padding=1)] - conf_layers += [nn.Conv2d(256, 1 * num_classes, kernel_size=3, padding=1)] - return nn.Sequential(*loc_layers), nn.Sequential(*conf_layers) - - def forward(self, x): - - detection_sources = list() - loc = list() - conf = list() - - x = self.conv1(x) - x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) - x = self.conv2(x) - x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) - x = self.inception1(x) - x = self.inception2(x) - x = self.inception3(x) - detection_sources.append(x) - - x = self.conv3_1(x) - x = self.conv3_2(x) - detection_sources.append(x) - - x = self.conv4_1(x) - x = self.conv4_2(x) - detection_sources.append(x) - - for (x, l, c) in zip(detection_sources, self.loc, self.conf): - loc.append(l(x).permute(0, 2, 3, 1).contiguous()) - conf.append(c(x).permute(0, 2, 3, 1).contiguous()) - - loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1) - conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1) - - if self.phase == "test": - output = (loc.view(loc.size(0), -1, 4), - self.softmax(conf.view(conf.size(0), -1, self.num_classes))) - else: - output = (loc.view(loc.size(0), -1, 4), - conf.view(conf.size(0), -1, self.num_classes)) - - return output diff --git a/face_recognition1/face_detect/models/voc-model-labels.txt b/face_recognition1/face_detect/models/voc-model-labels.txt deleted file mode 100644 index f80c0695da0545cd00692a0f0efab3d3bda0b30e..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/models/voc-model-labels.txt +++ /dev/null @@ -1,2 +0,0 @@ -BACKGROUND -face \ No newline at end of file diff --git a/face_recognition1/face_detect/test.py b/face_recognition1/face_detect/test.py deleted file mode 100644 index 8290f9e96073ad1b5855c6bc3f339c9cb3b51af4..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/test.py +++ /dev/null @@ -1,197 +0,0 @@ -import os -import sys -sys.path.append(os.path.dirname(__file__)) - -import cv2 -import math -import torch -import torch.backends.cudnn as cudnn -import numpy as np -from data.config import cfg -from layers.functions.prior_box import PriorBox -from utils.nms_wrapper import nms -from models.faceboxes import FaceBoxes -from utils.box_utils import decode -from utils.timer import Timer - -trained_model = os.path.join(os.path.dirname(__file__), './checkpoints/FaceBoxesProd.pth') -save_folder = 'eval' -dataset = 'Custom' -confidence_threshold = 0.2 -top_k = 5000 -nms_threshold = 0.3 -keep_top_k = 750 -show_image = True -vis_thres = 0.5 - - -def check_keys(model, pretrained_state_dict): - ckpt_keys = set(pretrained_state_dict.keys()) - model_keys = set(model.state_dict().keys()) - used_pretrained_keys = model_keys & ckpt_keys - unused_pretrained_keys = ckpt_keys - model_keys - missing_keys = model_keys - ckpt_keys - print('Missing keys:{}'.format(len(missing_keys))) - print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys))) - print('Used keys:{}'.format(len(used_pretrained_keys))) - assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' - return True - - -def remove_prefix(state_dict, prefix): - """ Old style model is stored with all names of parameters sharing common prefix 'module.' """ - print('remove prefix \'{}\''.format(prefix)) - f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x - return {f(key): value for key, value in state_dict.items()} - - -def load_model(model, pretrained_path, device): - print('Loading pretrained model from {}'.format(pretrained_path)) - pretrained_dict = torch.load(pretrained_path, map_location=device) - - if "state_dict" in pretrained_dict.keys(): - pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.') - else: - pretrained_dict = remove_prefix(pretrained_dict, 'module.') - check_keys(model, pretrained_dict) - model.load_state_dict(pretrained_dict, strict=False) - return model - - -torch.set_grad_enabled(False) -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net = FaceBoxes(phase='test', size=None, num_classes=2) -net = load_model(net, trained_model, device) -net.eval() -cudnn.benchmark = True -net = net.to(device) - - -def get_bbox(orig_image): - # testing scale - resize = 0.5 - - _t = {'forward_pass': Timer(), 'misc': Timer()} - - img_raw = orig_image - img = np.float32(img_raw) - if resize != 1: - img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR) - im_height, im_width, _ = img.shape - scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) - img -= (104, 117, 123) - img = img.transpose(2, 0, 1) - img = torch.from_numpy(img).unsqueeze(0) - img = img.to(device) - scale = scale.to(device) - - _t['forward_pass'].tic() - loc, conf = net(img) # forward pass - _t['forward_pass'].toc() - _t['misc'].tic() - priorbox = PriorBox(cfg, image_size=(im_height, im_width)) - priors = priorbox.forward() - priors = priors.to(device) - prior_data = priors.data - boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance']) - boxes = boxes * scale / resize - boxes = boxes.cpu().numpy() - scores = conf.squeeze(0).data.cpu().numpy()[:, 1] - - # ignore low scores - inds = np.where(scores > confidence_threshold)[0] - boxes = boxes[inds] - scores = scores[inds] - - # keep top-K before NMS - order = scores.argsort()[::-1][:top_k] - boxes = boxes[order] - scores = scores[order] - - # do NMS - dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False) - #keep = py_cpu_nms(dets, nms_threshold) - keep = nms(dets, nms_threshold, force_cpu=True) - dets = dets[keep, :] - - # keep top-K faster NMS - dets = dets[:keep_top_k, :] - _t['misc'].toc() - - boxes, scores = [], [] - for k in range(dets.shape[0]): - xmin = dets[k, 0] - ymin = dets[k, 1] - xmax = dets[k, 2] - ymax = dets[k, 3] - ymin += 0.2 * (ymax - ymin + 1) - score = dets[k, 4] - boxes.append([int(xmin), int(ymin), int(xmax - xmin), int(ymax - ymin)]) - scores.append(score) - - max_score = 0.0 - final_box = None - for i, score in enumerate(scores): - if max_score < score: - max_score = score - final_box = boxes[i] - - return final_box - - -class Detection: - def __init__(self): - src_dir = os.path.dirname(__file__) - if not os.path.exists(os.path.join(src_dir, "checkpoints")): - os.makedirs(os.path.join(src_dir, "checkpoints")) - - caffemodel = os.path.join(src_dir, "checkpoints/Widerface-RetinaFace.caffemodel") - deploy = os.path.join(src_dir, "checkpoints/deploy.prototxt") - - self.detector = cv2.dnn.readNetFromCaffe(deploy, caffemodel) - self.detector_confidence = 0.6 - - def get_bbox(self, img): - height, width = img.shape[0], img.shape[1] - aspect_ratio = width / height - if img.shape[1] * img.shape[0] >= 192 * 192: - img = cv2.resize(img, - (int(192 * math.sqrt(aspect_ratio)), - int(192 / math.sqrt(aspect_ratio))), interpolation=cv2.INTER_LINEAR) - - blob = cv2.dnn.blobFromImage(img, 1, mean=(104, 117, 123)) - self.detector.setInput(blob, 'data') - out = self.detector.forward('detection_out').squeeze() - max_conf_index = np.argmax(out[:, 2]) - left, top, right, bottom = out[max_conf_index, 3]*width, out[max_conf_index, 4]*height, \ - out[max_conf_index, 5]*width, out[max_conf_index, 6]*height - - if right == left or bottom == top: - return None - - bbox = [int(left), int(top), int(right-left+1), int(bottom-top+1)] - return bbox - - def check_face(self): - pass - - -if __name__ == '__main__': - - # image = cv2.imread('arun_2.jpg') - - # box = get_bbox(image) - # cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2) - # - src_dir = 'D:/19.Database/office_angled_db' - dst_dir = 'D:/19.Database/office_angled_db_result' - detector = Detection() - - for file in os.listdir(src_dir): - image1 = cv2.imread(os.path.join(src_dir, file)) - box = detector.get_bbox(image1) - if box: - cv2.rectangle(image1, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (0, 0, 255), 5) - - cv2.imwrite(os.path.join(dst_dir, file), image1) - # cv2.waitKey(0) diff --git a/face_recognition1/face_detect/utils/__init__.py b/face_recognition1/face_detect/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_detect/utils/box_utils.py b/face_recognition1/face_detect/utils/box_utils.py deleted file mode 100644 index 4797f1d7498cc35499c9b86a35c0754eb16e5a60..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/box_utils.py +++ /dev/null @@ -1,276 +0,0 @@ -import torch -import numpy as np - - -def point_form(boxes): - """ Convert prior_boxes to (xmin, ymin, xmax, ymax) - representation for comparison to point form ground truth data. - Args: - boxes: (tensor) center-size default boxes from priorbox layers. - Return: - boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. - """ - return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin - boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax - - -def center_size(boxes): - """ Convert prior_boxes to (cx, cy, w, h) - representation for comparison to center-size form ground truth data. - Args: - boxes: (tensor) point_form boxes - Return: - boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. - """ - return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy - boxes[:, 2:] - boxes[:, :2], 1) # w, h - - -def intersect(box_a, box_b): - """ We resize both tensors to [A,B,2] without new malloc: - [A,2] -> [A,1,2] -> [A,B,2] - [B,2] -> [1,B,2] -> [A,B,2] - Then we compute the area of intersect between box_a and box_b. - Args: - box_a: (tensor) bounding boxes, Shape: [A,4]. - box_b: (tensor) bounding boxes, Shape: [B,4]. - Return: - (tensor) intersection area, Shape: [A,B]. - """ - A = box_a.size(0) - B = box_b.size(0) - max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), - box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) - min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), - box_b[:, :2].unsqueeze(0).expand(A, B, 2)) - inter = torch.clamp((max_xy - min_xy), min=0) - return inter[:, :, 0] * inter[:, :, 1] - - -def jaccard(box_a, box_b): - """Compute the jaccard overlap of two sets of boxes. The jaccard overlap - is simply the intersection over union of two boxes. Here we operate on - ground truth boxes and default boxes. - E.g.: - A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) - Args: - box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] - box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] - Return: - jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] - """ - inter = intersect(box_a, box_b) - area_a = ((box_a[:, 2]-box_a[:, 0]) * - (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B] - area_b = ((box_b[:, 2]-box_b[:, 0]) * - (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B] - union = area_a + area_b - inter - return inter / union # [A,B] - - -def matrix_iou(a, b): - """ - return iou of a and b, numpy version for data augenmentation - """ - lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) - rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) - - area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) - area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) - area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) - return area_i / (area_a[:, np.newaxis] + area_b - area_i) - - -def matrix_iof(a, b): - """ - return iof of a and b, numpy version for data augenmentation - """ - lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) - rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) - - area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) - area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) - return area_i / np.maximum(area_a[:, np.newaxis], 1) - - -def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx): - """Match each prior box with the ground truth box of the highest jaccard - overlap, encode the bounding boxes, then return the matched indices - corresponding to both confidence and location preds. - Args: - threshold: (float) The overlap threshold used when mathing boxes. - truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors]. - priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4]. - variances: (tensor) Variances corresponding to each prior coord, - Shape: [num_priors, 4]. - labels: (tensor) All the class labels for the image, Shape: [num_obj]. - loc_t: (tensor) Tensor to be filled w/ endcoded location targets. - conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. - idx: (int) current batch index - Return: - The matched indices corresponding to 1)location and 2)confidence preds. - """ - # jaccard index - overlaps = jaccard( - truths, - point_form(priors) - ) - # (Bipartite Matching) - # [1,num_objects] best prior for each ground truth - best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) - - # ignore hard gt - valid_gt_idx = best_prior_overlap[:, 0] >= 0.2 - best_prior_idx_filter = best_prior_idx[valid_gt_idx, :] - if best_prior_idx_filter.shape[0] <= 0: - loc_t[idx] = 0 - conf_t[idx] = 0 - return - - # [1,num_priors] best ground truth for each prior - best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) - best_truth_idx.squeeze_(0) - best_truth_overlap.squeeze_(0) - best_prior_idx.squeeze_(1) - best_prior_idx_filter.squeeze_(1) - best_prior_overlap.squeeze_(1) - best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior - # TODO refactor: index best_prior_idx with long tensor - # ensure every gt matches with its prior of max overlap - for j in range(best_prior_idx.size(0)): - best_truth_idx[best_prior_idx[j]] = j - matches = truths[best_truth_idx] # Shape: [num_priors,4] - conf = labels[best_truth_idx] # Shape: [num_priors] - conf[best_truth_overlap < threshold] = 0 # label as background - loc = encode(matches, priors, variances) - loc_t[idx] = loc # [num_priors,4] encoded offsets to learn - conf_t[idx] = conf # [num_priors] top class label for each prior - - -def encode(matched, priors, variances): - """Encode the variances from the priorbox layers into the ground truth boxes - we have matched (based on jaccard overlap) with the prior boxes. - Args: - matched: (tensor) Coords of ground truth for each prior in point-form - Shape: [num_priors, 4]. - priors: (tensor) Prior boxes in center-offset form - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - encoded boxes (tensor), Shape: [num_priors, 4] - """ - - # dist b/t match center and prior's center - g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2] - # encode variance - g_cxcy /= (variances[0] * priors[:, 2:]) - # match wh / prior wh - g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] - g_wh = torch.log(g_wh) / variances[1] - # return target for smooth_l1_loss - return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4] - - -# Adapted from https://github.com/Hakuyume/chainer-ssd -def decode(loc, priors, variances): - """Decode locations from predictions using priors to undo - the encoding we did for offset regression at train time. - Args: - loc (tensor): location predictions for loc layers, - Shape: [num_priors,4] - priors (tensor): Prior boxes in center-offset form. - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - decoded bounding box predictions - """ - - boxes = torch.cat(( - priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], - priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) - boxes[:, :2] -= boxes[:, 2:] / 2 - boxes[:, 2:] += boxes[:, :2] - return boxes - - -def log_sum_exp(x): - """Utility function for computing log_sum_exp while determining - This will be used to determine unaveraged confidence loss across - all examples in a batch. - Args: - x (Variable(tensor)): conf_preds from conf layers - """ - x_max = x.data.max() - return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max - - -# Original author: Francisco Massa: -# https://github.com/fmassa/object-detection.torch -# Ported to PyTorch by Max deGroot (02/01/2017) -def nms(boxes, scores, overlap=0.5, top_k=200): - """Apply non-maximum suppression at test time to avoid detecting too many - overlapping bounding boxes for a given object. - Args: - boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. - scores: (tensor) The class predscores for the img, Shape:[num_priors]. - overlap: (float) The overlap thresh for suppressing unnecessary boxes. - top_k: (int) The Maximum number of box preds to consider. - Return: - The indices of the kept boxes with respect to num_priors. - """ - - keep = torch.Tensor(scores.size(0)).fill_(0).long() - if boxes.numel() == 0: - return keep - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - area = torch.mul(x2 - x1, y2 - y1) - v, idx = scores.sort(0) # sort in ascending order - # I = I[v >= 0.01] - idx = idx[-top_k:] # indices of the top-k largest vals - xx1 = boxes.new() - yy1 = boxes.new() - xx2 = boxes.new() - yy2 = boxes.new() - w = boxes.new() - h = boxes.new() - - # keep = torch.Tensor() - count = 0 - while idx.numel() > 0: - i = idx[-1] # index of current largest val - # keep.append(i) - keep[count] = i - count += 1 - if idx.size(0) == 1: - break - idx = idx[:-1] # remove kept element from view - # load bboxes of next highest vals - torch.index_select(x1, 0, idx, out=xx1) - torch.index_select(y1, 0, idx, out=yy1) - torch.index_select(x2, 0, idx, out=xx2) - torch.index_select(y2, 0, idx, out=yy2) - # store element-wise max with next highest score - xx1 = torch.clamp(xx1, min=x1[i]) - yy1 = torch.clamp(yy1, min=y1[i]) - xx2 = torch.clamp(xx2, max=x2[i]) - yy2 = torch.clamp(yy2, max=y2[i]) - w.resize_as_(xx2) - h.resize_as_(yy2) - w = xx2 - xx1 - h = yy2 - yy1 - # check sizes of xx1 and xx2.. after each iteration - w = torch.clamp(w, min=0.0) - h = torch.clamp(h, min=0.0) - inter = w*h - # IoU = i / (area(a) + area(b) - i) - rem_areas = torch.index_select(area, 0, idx) # load remaining areas) - union = (rem_areas - inter) + area[i] - IoU = inter/union # store result in iou - # keep only elements with an IoU <= overlap - idx = idx[IoU.le(overlap)] - return keep, count - - diff --git a/face_recognition1/face_detect/utils/build.py b/face_recognition1/face_detect/utils/build.py deleted file mode 100644 index 848315a73949f53af996fcb80e2578c9a4f4ccf7..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/build.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -from os.path import join as pjoin -import numpy as np -from distutils.core import setup -from distutils.extension import Extension -from Cython.Distutils import build_ext - - -def find_in_path(name, path): - "Find a file in a search path" - # adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/ - for dir in path.split(os.pathsep): - binpath = pjoin(dir, name) - if os.path.exists(binpath): - return os.path.abspath(binpath) - return None - - -def locate_cuda(): - """Locate the CUDA environment on the system - - Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' - and values giving the absolute path to each directory. - - Starts by looking for the CUDAHOME env variable. If not found, everything - is based on finding 'nvcc' in the PATH. - """ - - # first check if the CUDAHOME env variable is in use - if 'CUDAHOME' in os.environ: - home = os.environ['CUDAHOME'] - nvcc = pjoin(home, 'bin', 'nvcc') - else: - # otherwise, search the PATH for NVCC - default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') - nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path) - if nvcc is None: - raise EnvironmentError('The nvcc binary could not be ' - 'located in your $PATH. Either add it to your path, or set $CUDAHOME') - home = os.path.dirname(os.path.dirname(nvcc)) - - cudaconfig = {'home': home, 'nvcc': nvcc, - 'include': pjoin(home, 'include'), - 'lib64': pjoin(home, 'lib64')} - for k, v in cudaconfig.items(): - if not os.path.exists(v): - raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) - - return cudaconfig - - -CUDA = locate_cuda() - -# Obtain the numpy include directory. This logic works across numpy versions. -try: - numpy_include = np.get_include() -except AttributeError: - numpy_include = np.get_numpy_include() - - -def customize_compiler_for_nvcc(self): - """inject deep into distutils to customize how the dispatch - to gcc/nvcc works. - - If you subclass UnixCCompiler, it's not trivial to get your subclass - injected in, and still have the right customizations (i.e. - distutils.sysconfig.customize_compiler) run on it. So instead of going - the OO route, I have this. Note, it's kindof like a wierd functional - subclassing going on.""" - - # tell the compiler it can processes .cu - self.src_extensions.append('.cu') - - # save references to the default compiler_so and _comple methods - default_compiler_so = self.compiler_so - super = self._compile - - # now redefine the _compile method. This gets executed for each - # object but distutils doesn't have the ability to change compilers - # based on source extension: we add it. - def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): - print(extra_postargs) - if os.path.splitext(src)[1] == '.cu': - # use the cuda for .cu files - self.set_executable('compiler_so', CUDA['nvcc']) - # use only a subset of the extra_postargs, which are 1-1 translated - # from the extra_compile_args in the Extension class - postargs = extra_postargs['nvcc'] - else: - postargs = extra_postargs['gcc'] - - super(obj, src, ext, cc_args, postargs, pp_opts) - # reset the default compiler_so, which we might have changed for cuda - self.compiler_so = default_compiler_so - - # inject our redefined _compile method into the class - self._compile = _compile - - -# run the customize_compiler -class custom_build_ext(build_ext): - def build_extensions(self): - customize_compiler_for_nvcc(self.compiler) - build_ext.build_extensions(self) - - -ext_modules = [ - Extension( - "nms.cpu_nms", - ["nms/cpu_nms.pyx"], - extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]}, - include_dirs=[numpy_include] - ), - Extension('nms.gpu_nms', - ['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'], - library_dirs=[CUDA['lib64']], - libraries=['cudart'], - language='c++', - runtime_library_dirs=[CUDA['lib64']], - # this syntax is specific to this build system - # we're only going to use certain compiler args with nvcc and not with gcc - # the implementation of this trick is in customize_compiler() below - extra_compile_args={'gcc': ["-Wno-unused-function"], - 'nvcc': ['-arch=sm_52', - '--ptxas-options=-v', - '-c', - '--compiler-options', - "'-fPIC'"]}, - include_dirs=[numpy_include, CUDA['include']] - ), -] - -setup( - name='mot_utils', - ext_modules=ext_modules, - # inject our custom trigger - cmdclass={'build_ext': custom_build_ext}, -) diff --git a/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/cpu_nms.o b/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/cpu_nms.o deleted file mode 100644 index 51907cca4dcef1c4ff5eb6fe8590cba4fca9b5ab..0000000000000000000000000000000000000000 Binary files a/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/cpu_nms.o and /dev/null differ diff --git a/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/gpu_nms.o b/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/gpu_nms.o deleted file mode 100644 index 95841b47abd4801dc367aacd0a2fc9eb1f011029..0000000000000000000000000000000000000000 Binary files a/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/gpu_nms.o and /dev/null differ diff --git a/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/nms_kernel.o b/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/nms_kernel.o deleted file mode 100644 index 2f23d46f1a2d4aad7fbe1a07cb8190c08f9f3b41..0000000000000000000000000000000000000000 Binary files a/face_recognition1/face_detect/utils/build/temp.linux-x86_64-3.6/nms/nms_kernel.o and /dev/null differ diff --git a/face_recognition1/face_detect/utils/nms/cpu_nms.c b/face_recognition1/face_detect/utils/nms/cpu_nms.c deleted file mode 100644 index fbdca64d9f140721f8f052cff069aac81701aa51..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms/cpu_nms.c +++ /dev/null @@ -1,9623 +0,0 @@ -/* Generated by Cython 0.29.24 */ - -#ifndef PY_SSIZE_T_CLEAN -#define PY_SSIZE_T_CLEAN -#endif /* PY_SSIZE_T_CLEAN */ -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.6+ or Python 3.3+. -#else -#define CYTHON_ABI "0_29_24" -#define CYTHON_HEX_VERSION 0x001D18F0 -#define CYTHON_FUTURE_DIVISION 0 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) - #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #include "longintrepr.h" - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) - #define __Pyx_DefaultClassType PyClass_Type -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif - #define __Pyx_DefaultClassType PyType_Type -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func)\ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -#else -#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) -#endif -#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #if defined(PyUnicode_IS_READY) - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #else - #define __Pyx_PyUnicode_READY(op) (0) - #endif - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #endif - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__nms__cpu_nms -#define __PYX_HAVE_API__nms__cpu_nms -/* Early includes */ -#include -#include -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" - - /* NumPy API declarations from "numpy/__init__.pxd" */ - -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - -/* Header.proto */ -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - - -static const char *__pyx_f[] = { - "nms/cpu_nms.pyx", - "__init__.pxd", - "type.pxd", -}; -/* BufferFormatStructs.proto */ -#define IS_UNSIGNED(type) (((type) -1) > 0) -struct __Pyx_StructField_; -#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - size_t arraysize[8]; - int ndim; - char typegroup; - char is_unsigned; - int flags; -} __Pyx_TypeInfo; -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - size_t struct_alignment; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; - char is_valid_array; -} __Pyx_BufFmt_Context; - - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":689 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":690 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":691 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":692 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":696 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":697 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":698 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":699 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":703 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":704 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":713 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":714 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":715 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":717 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":718 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":719 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":721 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":722 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":724 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":725 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":726 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; -/* Declarations.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -/* Declarations.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - - -/*--- Type declarations ---*/ - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":728 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":729 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":730 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":732 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ - const char* function_name); - -/* ArgTypeTest.proto */ -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ - ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ - __Pyx__ArgTypeTest(obj, type, name, exact)) -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); - -/* IsLittleEndian.proto */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); - -/* BufferFormatCheck.proto */ -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type); - -/* BufferGetAndValidate.proto */ -#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ - ((obj == Py_None || obj == NULL) ?\ - (__Pyx_ZeroBuffer(buf), 0) :\ - __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) -static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, - __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static void __Pyx_ZeroBuffer(Py_buffer* buf); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); -static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; -static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* ExtTypeTest.proto */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectCallNoArg.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); -#else -#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) -#endif - -/* PyCFunctionFastCall.proto */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* BufferIndexError.proto */ -static void __Pyx_RaiseBufferIndexError(int axis); - -#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) -/* ListAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) -/* PyObjectCall2Args.proto */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* TypeImport.proto */ -#ifndef __PYX_HAVE_RT_ImportType_proto -#define __PYX_HAVE_RT_ImportType_proto -enum __Pyx_ImportType_CheckSize { - __Pyx_ImportType_CheckSize_Error = 0, - __Pyx_ImportType_CheckSize_Warn = 1, - __Pyx_ImportType_CheckSize_Ignore = 2 -}; -static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -/* BufferStructDeclare.proto */ -typedef struct { - Py_ssize_t shape, strides, suboffsets; -} __Pyx_Buf_DimInfo; -typedef struct { - size_t refcount; - Py_buffer pybuffer; -} __Pyx_Buffer; -typedef struct { - __Pyx_Buffer *rcbuffer; - char *data; - __Pyx_Buf_DimInfo diminfo[8]; -} __Pyx_LocalBuf_ND; - -#if PY_MAJOR_VERSION < 3 - static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); - static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else - #define __Pyx_GetBuffer PyObject_GetBuffer - #define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - - -/* GCCDiagnostics.proto */ -#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) -#define __Pyx_HAS_GCC_DIAGNOSTIC -#endif - -/* RealImag.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif -#if defined(__cplusplus) && CYTHON_CCOMPLEX\ - && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -/* Arithmetic.proto */ -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq_float(a, b) ((a)==(b)) - #define __Pyx_c_sum_float(a, b) ((a)+(b)) - #define __Pyx_c_diff_float(a, b) ((a)-(b)) - #define __Pyx_c_prod_float(a, b) ((a)*(b)) - #define __Pyx_c_quot_float(a, b) ((a)/(b)) - #define __Pyx_c_neg_float(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero_float(z) ((z)==(float)0) - #define __Pyx_c_conj_float(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs_float(z) (::std::abs(z)) - #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero_float(z) ((z)==0) - #define __Pyx_c_conj_float(z) (conjf(z)) - #if 1 - #define __Pyx_c_abs_float(z) (cabsf(z)) - #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -/* Arithmetic.proto */ -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq_double(a, b) ((a)==(b)) - #define __Pyx_c_sum_double(a, b) ((a)+(b)) - #define __Pyx_c_diff_double(a, b) ((a)-(b)) - #define __Pyx_c_prod_double(a, b) ((a)*(b)) - #define __Pyx_c_quot_double(a, b) ((a)/(b)) - #define __Pyx_c_neg_double(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero_double(z) ((z)==(double)0) - #define __Pyx_c_conj_double(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs_double(z) (::std::abs(z)) - #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero_double(z) ((z)==0) - #define __Pyx_c_conj_double(z) (conj(z)) - #if 1 - #define __Pyx_c_abs_double(z) (cabs(z)) - #define __Pyx_c_pow_double(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -/* CIntFromPy.proto */ -static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'libc.string' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from '__builtin__' */ - -/* Module declarations from 'cpython.type' */ -static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; - -/* Module declarations from 'cpython' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'cpython.mem' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; - -/* Module declarations from 'nms.cpu_nms' */ -static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_3nms_7cpu_nms_max(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/ -static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_3nms_7cpu_nms_min(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int_t = { "int_t", NULL, sizeof(__pyx_t_5numpy_int_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int_t), 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; -#define __Pyx_MODULE_NAME "nms.cpu_nms" -extern int __pyx_module_is_main_nms__cpu_nms; -int __pyx_module_is_main_nms__cpu_nms = 0; - -/* Implementation of 'nms.cpu_nms' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ImportError; -static const char __pyx_k_N[] = "N"; -static const char __pyx_k_h[] = "h"; -static const char __pyx_k_i[] = "_i"; -static const char __pyx_k_j[] = "_j"; -static const char __pyx_k_s[] = "s"; -static const char __pyx_k_w[] = "w"; -static const char __pyx_k_Nt[] = "Nt"; -static const char __pyx_k_ih[] = "ih"; -static const char __pyx_k_iw[] = "iw"; -static const char __pyx_k_np[] = "np"; -static const char __pyx_k_ov[] = "ov"; -static const char __pyx_k_ts[] = "ts"; -static const char __pyx_k_ua[] = "ua"; -static const char __pyx_k_x1[] = "x1"; -static const char __pyx_k_x2[] = "x2"; -static const char __pyx_k_y1[] = "y1"; -static const char __pyx_k_y2[] = "y2"; -static const char __pyx_k_exp[] = "exp"; -static const char __pyx_k_i_2[] = "i"; -static const char __pyx_k_int[] = "int"; -static const char __pyx_k_ix1[] = "ix1"; -static const char __pyx_k_ix2[] = "ix2"; -static const char __pyx_k_iy1[] = "iy1"; -static const char __pyx_k_iy2[] = "iy2"; -static const char __pyx_k_j_2[] = "j"; -static const char __pyx_k_ovr[] = "ovr"; -static const char __pyx_k_pos[] = "pos"; -static const char __pyx_k_tx1[] = "tx1"; -static const char __pyx_k_tx2[] = "tx2"; -static const char __pyx_k_ty1[] = "ty1"; -static const char __pyx_k_ty2[] = "ty2"; -static const char __pyx_k_xx1[] = "xx1"; -static const char __pyx_k_xx2[] = "xx2"; -static const char __pyx_k_yy1[] = "yy1"; -static const char __pyx_k_yy2[] = "yy2"; -static const char __pyx_k_area[] = "area"; -static const char __pyx_k_dets[] = "dets"; -static const char __pyx_k_keep[] = "keep"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_name[] = "__name__"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_areas[] = "areas"; -static const char __pyx_k_boxes[] = "boxes"; -static const char __pyx_k_dtype[] = "dtype"; -static const char __pyx_k_iarea[] = "iarea"; -static const char __pyx_k_inter[] = "inter"; -static const char __pyx_k_ndets[] = "ndets"; -static const char __pyx_k_numpy[] = "numpy"; -static const char __pyx_k_order[] = "order"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_sigma[] = "sigma"; -static const char __pyx_k_zeros[] = "zeros"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_maxpos[] = "maxpos"; -static const char __pyx_k_method[] = "method"; -static const char __pyx_k_scores[] = "scores"; -static const char __pyx_k_thresh[] = "thresh"; -static const char __pyx_k_weight[] = "weight"; -static const char __pyx_k_argsort[] = "argsort"; -static const char __pyx_k_cpu_nms[] = "cpu_nms"; -static const char __pyx_k_box_area[] = "box_area"; -static const char __pyx_k_maxscore[] = "maxscore"; -static const char __pyx_k_threshold[] = "threshold"; -static const char __pyx_k_suppressed[] = "suppressed"; -static const char __pyx_k_ImportError[] = "ImportError"; -static const char __pyx_k_nms_cpu_nms[] = "nms.cpu_nms"; -static const char __pyx_k_cpu_soft_nms[] = "cpu_soft_nms"; -static const char __pyx_k_nms_cpu_nms_pyx[] = "nms/cpu_nms.pyx"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; -static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; -static PyObject *__pyx_n_s_ImportError; -static PyObject *__pyx_n_s_N; -static PyObject *__pyx_n_s_Nt; -static PyObject *__pyx_n_s_area; -static PyObject *__pyx_n_s_areas; -static PyObject *__pyx_n_s_argsort; -static PyObject *__pyx_n_s_box_area; -static PyObject *__pyx_n_s_boxes; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_n_s_cpu_nms; -static PyObject *__pyx_n_s_cpu_soft_nms; -static PyObject *__pyx_n_s_dets; -static PyObject *__pyx_n_s_dtype; -static PyObject *__pyx_n_s_exp; -static PyObject *__pyx_n_s_h; -static PyObject *__pyx_n_s_i; -static PyObject *__pyx_n_s_i_2; -static PyObject *__pyx_n_s_iarea; -static PyObject *__pyx_n_s_ih; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_int; -static PyObject *__pyx_n_s_inter; -static PyObject *__pyx_n_s_iw; -static PyObject *__pyx_n_s_ix1; -static PyObject *__pyx_n_s_ix2; -static PyObject *__pyx_n_s_iy1; -static PyObject *__pyx_n_s_iy2; -static PyObject *__pyx_n_s_j; -static PyObject *__pyx_n_s_j_2; -static PyObject *__pyx_n_s_keep; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_maxpos; -static PyObject *__pyx_n_s_maxscore; -static PyObject *__pyx_n_s_method; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_ndets; -static PyObject *__pyx_n_s_nms_cpu_nms; -static PyObject *__pyx_kp_s_nms_cpu_nms_pyx; -static PyObject *__pyx_n_s_np; -static PyObject *__pyx_n_s_numpy; -static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; -static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; -static PyObject *__pyx_n_s_order; -static PyObject *__pyx_n_s_ov; -static PyObject *__pyx_n_s_ovr; -static PyObject *__pyx_n_s_pos; -static PyObject *__pyx_n_s_range; -static PyObject *__pyx_n_s_s; -static PyObject *__pyx_n_s_scores; -static PyObject *__pyx_n_s_sigma; -static PyObject *__pyx_n_s_suppressed; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_n_s_thresh; -static PyObject *__pyx_n_s_threshold; -static PyObject *__pyx_n_s_ts; -static PyObject *__pyx_n_s_tx1; -static PyObject *__pyx_n_s_tx2; -static PyObject *__pyx_n_s_ty1; -static PyObject *__pyx_n_s_ty2; -static PyObject *__pyx_n_s_ua; -static PyObject *__pyx_n_s_w; -static PyObject *__pyx_n_s_weight; -static PyObject *__pyx_n_s_x1; -static PyObject *__pyx_n_s_x2; -static PyObject *__pyx_n_s_xx1; -static PyObject *__pyx_n_s_xx2; -static PyObject *__pyx_n_s_y1; -static PyObject *__pyx_n_s_y2; -static PyObject *__pyx_n_s_yy1; -static PyObject *__pyx_n_s_yy2; -static PyObject *__pyx_n_s_zeros; -static PyObject *__pyx_pf_3nms_7cpu_nms_cpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh); /* proto */ -static PyObject *__pyx_pf_3nms_7cpu_nms_2cpu_soft_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes, float __pyx_v_sigma, float __pyx_v_Nt, float __pyx_v_threshold, unsigned int __pyx_v_method); /* proto */ -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_3; -static PyObject *__pyx_int_4; -static PyObject *__pyx_int_neg_1; -static PyObject *__pyx_slice_; -static PyObject *__pyx_slice__7; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__3; -static PyObject *__pyx_tuple__4; -static PyObject *__pyx_tuple__5; -static PyObject *__pyx_tuple__6; -static PyObject *__pyx_tuple__8; -static PyObject *__pyx_tuple__9; -static PyObject *__pyx_tuple__10; -static PyObject *__pyx_tuple__12; -static PyObject *__pyx_codeobj__11; -static PyObject *__pyx_codeobj__13; -/* Late includes */ - -/* "nms/cpu_nms.pyx":11 - * cimport numpy as np - * - * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< - * return a if a >= b else b - * - */ - -static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_3nms_7cpu_nms_max(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) { - __pyx_t_5numpy_float32_t __pyx_r; - __Pyx_RefNannyDeclarations - __pyx_t_5numpy_float32_t __pyx_t_1; - __Pyx_RefNannySetupContext("max", 0); - - /* "nms/cpu_nms.pyx":12 - * - * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): - * return a if a >= b else b # <<<<<<<<<<<<<< - * - * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): - */ - if (((__pyx_v_a >= __pyx_v_b) != 0)) { - __pyx_t_1 = __pyx_v_a; - } else { - __pyx_t_1 = __pyx_v_b; - } - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "nms/cpu_nms.pyx":11 - * cimport numpy as np - * - * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< - * return a if a >= b else b - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "nms/cpu_nms.pyx":14 - * return a if a >= b else b - * - * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< - * return a if a <= b else b - * - */ - -static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_3nms_7cpu_nms_min(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) { - __pyx_t_5numpy_float32_t __pyx_r; - __Pyx_RefNannyDeclarations - __pyx_t_5numpy_float32_t __pyx_t_1; - __Pyx_RefNannySetupContext("min", 0); - - /* "nms/cpu_nms.pyx":15 - * - * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): - * return a if a <= b else b # <<<<<<<<<<<<<< - * - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): - */ - if (((__pyx_v_a <= __pyx_v_b) != 0)) { - __pyx_t_1 = __pyx_v_a; - } else { - __pyx_t_1 = __pyx_v_b; - } - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "nms/cpu_nms.pyx":14 - * return a if a >= b else b - * - * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< - * return a if a <= b else b - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "nms/cpu_nms.pyx":17 - * return a if a <= b else b - * - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_3nms_7cpu_nms_1cpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_3nms_7cpu_nms_1cpu_nms = {"cpu_nms", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3nms_7cpu_nms_1cpu_nms, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_3nms_7cpu_nms_1cpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_dets = 0; - PyObject *__pyx_v_thresh = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("cpu_nms (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,0}; - PyObject* values[2] = {0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("cpu_nms", 1, 2, 2, 1); __PYX_ERR(0, 17, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "cpu_nms") < 0)) __PYX_ERR(0, 17, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - } - __pyx_v_dets = ((PyArrayObject *)values[0]); - __pyx_v_thresh = ((PyObject*)values[1]); - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("cpu_nms", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 17, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("nms.cpu_nms.cpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 17, __pyx_L1_error) - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 17, __pyx_L1_error) - __pyx_r = __pyx_pf_3nms_7cpu_nms_cpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh); - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_3nms_7cpu_nms_cpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh) { - PyArrayObject *__pyx_v_x1 = 0; - PyArrayObject *__pyx_v_y1 = 0; - PyArrayObject *__pyx_v_x2 = 0; - PyArrayObject *__pyx_v_y2 = 0; - PyArrayObject *__pyx_v_scores = 0; - PyArrayObject *__pyx_v_areas = 0; - PyArrayObject *__pyx_v_order = 0; - int __pyx_v_ndets; - PyArrayObject *__pyx_v_suppressed = 0; - int __pyx_v__i; - int __pyx_v__j; - int __pyx_v_i; - int __pyx_v_j; - __pyx_t_5numpy_float32_t __pyx_v_ix1; - __pyx_t_5numpy_float32_t __pyx_v_iy1; - __pyx_t_5numpy_float32_t __pyx_v_ix2; - __pyx_t_5numpy_float32_t __pyx_v_iy2; - __pyx_t_5numpy_float32_t __pyx_v_iarea; - __pyx_t_5numpy_float32_t __pyx_v_xx1; - __pyx_t_5numpy_float32_t __pyx_v_yy1; - __pyx_t_5numpy_float32_t __pyx_v_xx2; - __pyx_t_5numpy_float32_t __pyx_v_yy2; - __pyx_t_5numpy_float32_t __pyx_v_w; - __pyx_t_5numpy_float32_t __pyx_v_h; - __pyx_t_5numpy_float32_t __pyx_v_inter; - __pyx_t_5numpy_float32_t __pyx_v_ovr; - PyObject *__pyx_v_keep = NULL; - __Pyx_LocalBuf_ND __pyx_pybuffernd_areas; - __Pyx_Buffer __pyx_pybuffer_areas; - __Pyx_LocalBuf_ND __pyx_pybuffernd_dets; - __Pyx_Buffer __pyx_pybuffer_dets; - __Pyx_LocalBuf_ND __pyx_pybuffernd_order; - __Pyx_Buffer __pyx_pybuffer_order; - __Pyx_LocalBuf_ND __pyx_pybuffernd_scores; - __Pyx_Buffer __pyx_pybuffer_scores; - __Pyx_LocalBuf_ND __pyx_pybuffernd_suppressed; - __Pyx_Buffer __pyx_pybuffer_suppressed; - __Pyx_LocalBuf_ND __pyx_pybuffernd_x1; - __Pyx_Buffer __pyx_pybuffer_x1; - __Pyx_LocalBuf_ND __pyx_pybuffernd_x2; - __Pyx_Buffer __pyx_pybuffer_x2; - __Pyx_LocalBuf_ND __pyx_pybuffernd_y1; - __Pyx_Buffer __pyx_pybuffer_y1; - __Pyx_LocalBuf_ND __pyx_pybuffernd_y2; - __Pyx_Buffer __pyx_pybuffer_y2; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyArrayObject *__pyx_t_2 = NULL; - PyArrayObject *__pyx_t_3 = NULL; - PyArrayObject *__pyx_t_4 = NULL; - PyArrayObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyArrayObject *__pyx_t_9 = NULL; - PyArrayObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyArrayObject *__pyx_t_13 = NULL; - int __pyx_t_14; - int __pyx_t_15; - int __pyx_t_16; - Py_ssize_t __pyx_t_17; - int __pyx_t_18; - int __pyx_t_19; - int __pyx_t_20; - int __pyx_t_21; - int __pyx_t_22; - int __pyx_t_23; - __pyx_t_5numpy_float32_t __pyx_t_24; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cpu_nms", 0); - __pyx_pybuffer_x1.pybuffer.buf = NULL; - __pyx_pybuffer_x1.refcount = 0; - __pyx_pybuffernd_x1.data = NULL; - __pyx_pybuffernd_x1.rcbuffer = &__pyx_pybuffer_x1; - __pyx_pybuffer_y1.pybuffer.buf = NULL; - __pyx_pybuffer_y1.refcount = 0; - __pyx_pybuffernd_y1.data = NULL; - __pyx_pybuffernd_y1.rcbuffer = &__pyx_pybuffer_y1; - __pyx_pybuffer_x2.pybuffer.buf = NULL; - __pyx_pybuffer_x2.refcount = 0; - __pyx_pybuffernd_x2.data = NULL; - __pyx_pybuffernd_x2.rcbuffer = &__pyx_pybuffer_x2; - __pyx_pybuffer_y2.pybuffer.buf = NULL; - __pyx_pybuffer_y2.refcount = 0; - __pyx_pybuffernd_y2.data = NULL; - __pyx_pybuffernd_y2.rcbuffer = &__pyx_pybuffer_y2; - __pyx_pybuffer_scores.pybuffer.buf = NULL; - __pyx_pybuffer_scores.refcount = 0; - __pyx_pybuffernd_scores.data = NULL; - __pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores; - __pyx_pybuffer_areas.pybuffer.buf = NULL; - __pyx_pybuffer_areas.refcount = 0; - __pyx_pybuffernd_areas.data = NULL; - __pyx_pybuffernd_areas.rcbuffer = &__pyx_pybuffer_areas; - __pyx_pybuffer_order.pybuffer.buf = NULL; - __pyx_pybuffer_order.refcount = 0; - __pyx_pybuffernd_order.data = NULL; - __pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order; - __pyx_pybuffer_suppressed.pybuffer.buf = NULL; - __pyx_pybuffer_suppressed.refcount = 0; - __pyx_pybuffernd_suppressed.data = NULL; - __pyx_pybuffernd_suppressed.rcbuffer = &__pyx_pybuffer_suppressed; - __pyx_pybuffer_dets.pybuffer.buf = NULL; - __pyx_pybuffer_dets.refcount = 0; - __pyx_pybuffernd_dets.data = NULL; - __pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets; - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 17, __pyx_L1_error) - } - __pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1]; - - /* "nms/cpu_nms.pyx":18 - * - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 18, __pyx_L1_error) - __pyx_t_2 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x1.rcbuffer->pybuffer, (PyObject*)__pyx_t_2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_x1 = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_x1.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 18, __pyx_L1_error) - } else {__pyx_pybuffernd_x1.diminfo[0].strides = __pyx_pybuffernd_x1.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x1.diminfo[0].shape = __pyx_pybuffernd_x1.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_2 = 0; - __pyx_v_x1 = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":19 - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 19, __pyx_L1_error) - __pyx_t_3 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y1.rcbuffer->pybuffer, (PyObject*)__pyx_t_3, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_y1 = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_y1.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 19, __pyx_L1_error) - } else {__pyx_pybuffernd_y1.diminfo[0].strides = __pyx_pybuffernd_y1.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y1.diminfo[0].shape = __pyx_pybuffernd_y1.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_3 = 0; - __pyx_v_y1 = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":20 - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] - * cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 20, __pyx_L1_error) - __pyx_t_4 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x2.rcbuffer->pybuffer, (PyObject*)__pyx_t_4, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_x2 = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_x2.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 20, __pyx_L1_error) - } else {__pyx_pybuffernd_x2.diminfo[0].strides = __pyx_pybuffernd_x2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x2.diminfo[0].shape = __pyx_pybuffernd_x2.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_4 = 0; - __pyx_v_x2 = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":21 - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] - * - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 21, __pyx_L1_error) - __pyx_t_5 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y2.rcbuffer->pybuffer, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_y2 = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_y2.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 21, __pyx_L1_error) - } else {__pyx_pybuffernd_y2.diminfo[0].strides = __pyx_pybuffernd_y2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y2.diminfo[0].shape = __pyx_pybuffernd_y2.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_5 = 0; - __pyx_v_y2 = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":22 - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] - * cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] # <<<<<<<<<<<<<< - * - * cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error) - __pyx_t_6 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 22, __pyx_L1_error) - } else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_6 = 0; - __pyx_v_scores = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":24 - * cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] - * - * cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) # <<<<<<<<<<<<<< - * cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1] - * - */ - __pyx_t_1 = PyNumber_Subtract(((PyObject *)__pyx_v_x2), ((PyObject *)__pyx_v_x1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Subtract(((PyObject *)__pyx_v_y2), ((PyObject *)__pyx_v_y1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_t_7, __pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error) - __pyx_t_9 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_areas.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_areas = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_areas.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 24, __pyx_L1_error) - } else {__pyx_pybuffernd_areas.diminfo[0].strides = __pyx_pybuffernd_areas.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_areas.diminfo[0].shape = __pyx_pybuffernd_areas.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_9 = 0; - __pyx_v_areas = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":25 - * - * cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) - * cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1] # <<<<<<<<<<<<<< - * - * cdef int ndets = dets.shape[0] - */ - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_7 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - } - } - __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7) : __Pyx_PyObject_CallNoArg(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_slice__7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_8) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_8, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 25, __pyx_L1_error) - __pyx_t_10 = ((PyArrayObject *)__pyx_t_8); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_10, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 25, __pyx_L1_error) - } else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_10 = 0; - __pyx_v_order = ((PyArrayObject *)__pyx_t_8); - __pyx_t_8 = 0; - - /* "nms/cpu_nms.pyx":27 - * cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1] - * - * cdef int ndets = dets.shape[0] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.int_t, ndim=1] suppressed = \ - * np.zeros((ndets), dtype=np.int) - */ - __pyx_v_ndets = (__pyx_v_dets->dimensions[0]); - - /* "nms/cpu_nms.pyx":29 - * cdef int ndets = dets.shape[0] - * cdef np.ndarray[np.int_t, ndim=1] suppressed = \ - * np.zeros((ndets), dtype=np.int) # <<<<<<<<<<<<<< - * - * # nominal indices - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_PyInt_From_int(__pyx_v_ndets); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_GIVEREF(__pyx_t_8); - PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8); - __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_n_s_np); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_int); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_dtype, __pyx_t_12) < 0) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, __pyx_t_8); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (!(likely(((__pyx_t_12) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_12, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 29, __pyx_L1_error) - __pyx_t_13 = ((PyArrayObject *)__pyx_t_12); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_suppressed.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { - __pyx_v_suppressed = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_suppressed.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 28, __pyx_L1_error) - } else {__pyx_pybuffernd_suppressed.diminfo[0].strides = __pyx_pybuffernd_suppressed.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_suppressed.diminfo[0].shape = __pyx_pybuffernd_suppressed.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_13 = 0; - __pyx_v_suppressed = ((PyArrayObject *)__pyx_t_12); - __pyx_t_12 = 0; - - /* "nms/cpu_nms.pyx":42 - * cdef np.float32_t inter, ovr - * - * keep = [] # <<<<<<<<<<<<<< - * for _i in range(ndets): - * i = order[_i] - */ - __pyx_t_12 = PyList_New(0); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_v_keep = ((PyObject*)__pyx_t_12); - __pyx_t_12 = 0; - - /* "nms/cpu_nms.pyx":43 - * - * keep = [] - * for _i in range(ndets): # <<<<<<<<<<<<<< - * i = order[_i] - * if suppressed[i] == 1: - */ - __pyx_t_14 = __pyx_v_ndets; - __pyx_t_15 = __pyx_t_14; - for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) { - __pyx_v__i = __pyx_t_16; - - /* "nms/cpu_nms.pyx":44 - * keep = [] - * for _i in range(ndets): - * i = order[_i] # <<<<<<<<<<<<<< - * if suppressed[i] == 1: - * continue - */ - __pyx_t_17 = __pyx_v__i; - __pyx_t_18 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_order.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_18 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_order.diminfo[0].shape)) __pyx_t_18 = 0; - if (unlikely(__pyx_t_18 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_18); - __PYX_ERR(0, 44, __pyx_L1_error) - } - __pyx_v_i = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int_t *, __pyx_pybuffernd_order.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_order.diminfo[0].strides)); - - /* "nms/cpu_nms.pyx":45 - * for _i in range(ndets): - * i = order[_i] - * if suppressed[i] == 1: # <<<<<<<<<<<<<< - * continue - * keep.append(i) - */ - __pyx_t_17 = __pyx_v_i; - __pyx_t_18 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_suppressed.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_18 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_suppressed.diminfo[0].shape)) __pyx_t_18 = 0; - if (unlikely(__pyx_t_18 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_18); - __PYX_ERR(0, 45, __pyx_L1_error) - } - __pyx_t_19 = (((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int_t *, __pyx_pybuffernd_suppressed.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_suppressed.diminfo[0].strides)) == 1) != 0); - if (__pyx_t_19) { - - /* "nms/cpu_nms.pyx":46 - * i = order[_i] - * if suppressed[i] == 1: - * continue # <<<<<<<<<<<<<< - * keep.append(i) - * ix1 = x1[i] - */ - goto __pyx_L3_continue; - - /* "nms/cpu_nms.pyx":45 - * for _i in range(ndets): - * i = order[_i] - * if suppressed[i] == 1: # <<<<<<<<<<<<<< - * continue - * keep.append(i) - */ - } - - /* "nms/cpu_nms.pyx":47 - * if suppressed[i] == 1: - * continue - * keep.append(i) # <<<<<<<<<<<<<< - * ix1 = x1[i] - * iy1 = y1[i] - */ - __pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_20 = __Pyx_PyList_Append(__pyx_v_keep, __pyx_t_12); if (unlikely(__pyx_t_20 == ((int)-1))) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - - /* "nms/cpu_nms.pyx":48 - * continue - * keep.append(i) - * ix1 = x1[i] # <<<<<<<<<<<<<< - * iy1 = y1[i] - * ix2 = x2[i] - */ - __pyx_t_17 = __pyx_v_i; - __pyx_t_18 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_x1.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_18 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_x1.diminfo[0].shape)) __pyx_t_18 = 0; - if (unlikely(__pyx_t_18 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_18); - __PYX_ERR(0, 48, __pyx_L1_error) - } - __pyx_v_ix1 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_x1.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_x1.diminfo[0].strides)); - - /* "nms/cpu_nms.pyx":49 - * keep.append(i) - * ix1 = x1[i] - * iy1 = y1[i] # <<<<<<<<<<<<<< - * ix2 = x2[i] - * iy2 = y2[i] - */ - __pyx_t_17 = __pyx_v_i; - __pyx_t_18 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_y1.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_18 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_y1.diminfo[0].shape)) __pyx_t_18 = 0; - if (unlikely(__pyx_t_18 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_18); - __PYX_ERR(0, 49, __pyx_L1_error) - } - __pyx_v_iy1 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_y1.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_y1.diminfo[0].strides)); - - /* "nms/cpu_nms.pyx":50 - * ix1 = x1[i] - * iy1 = y1[i] - * ix2 = x2[i] # <<<<<<<<<<<<<< - * iy2 = y2[i] - * iarea = areas[i] - */ - __pyx_t_17 = __pyx_v_i; - __pyx_t_18 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_x2.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_18 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_x2.diminfo[0].shape)) __pyx_t_18 = 0; - if (unlikely(__pyx_t_18 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_18); - __PYX_ERR(0, 50, __pyx_L1_error) - } - __pyx_v_ix2 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_x2.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_x2.diminfo[0].strides)); - - /* "nms/cpu_nms.pyx":51 - * iy1 = y1[i] - * ix2 = x2[i] - * iy2 = y2[i] # <<<<<<<<<<<<<< - * iarea = areas[i] - * for _j in range(_i + 1, ndets): - */ - __pyx_t_17 = __pyx_v_i; - __pyx_t_18 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_y2.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_18 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_y2.diminfo[0].shape)) __pyx_t_18 = 0; - if (unlikely(__pyx_t_18 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_18); - __PYX_ERR(0, 51, __pyx_L1_error) - } - __pyx_v_iy2 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_y2.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_y2.diminfo[0].strides)); - - /* "nms/cpu_nms.pyx":52 - * ix2 = x2[i] - * iy2 = y2[i] - * iarea = areas[i] # <<<<<<<<<<<<<< - * for _j in range(_i + 1, ndets): - * j = order[_j] - */ - __pyx_t_17 = __pyx_v_i; - __pyx_t_18 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_areas.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_18 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_areas.diminfo[0].shape)) __pyx_t_18 = 0; - if (unlikely(__pyx_t_18 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_18); - __PYX_ERR(0, 52, __pyx_L1_error) - } - __pyx_v_iarea = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_areas.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_areas.diminfo[0].strides)); - - /* "nms/cpu_nms.pyx":53 - * iy2 = y2[i] - * iarea = areas[i] - * for _j in range(_i + 1, ndets): # <<<<<<<<<<<<<< - * j = order[_j] - * if suppressed[j] == 1: - */ - __pyx_t_18 = __pyx_v_ndets; - __pyx_t_21 = __pyx_t_18; - for (__pyx_t_22 = (__pyx_v__i + 1); __pyx_t_22 < __pyx_t_21; __pyx_t_22+=1) { - __pyx_v__j = __pyx_t_22; - - /* "nms/cpu_nms.pyx":54 - * iarea = areas[i] - * for _j in range(_i + 1, ndets): - * j = order[_j] # <<<<<<<<<<<<<< - * if suppressed[j] == 1: - * continue - */ - __pyx_t_17 = __pyx_v__j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_order.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_order.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 54, __pyx_L1_error) - } - __pyx_v_j = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int_t *, __pyx_pybuffernd_order.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_order.diminfo[0].strides)); - - /* "nms/cpu_nms.pyx":55 - * for _j in range(_i + 1, ndets): - * j = order[_j] - * if suppressed[j] == 1: # <<<<<<<<<<<<<< - * continue - * xx1 = max(ix1, x1[j]) - */ - __pyx_t_17 = __pyx_v_j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_suppressed.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_suppressed.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 55, __pyx_L1_error) - } - __pyx_t_19 = (((*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int_t *, __pyx_pybuffernd_suppressed.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_suppressed.diminfo[0].strides)) == 1) != 0); - if (__pyx_t_19) { - - /* "nms/cpu_nms.pyx":56 - * j = order[_j] - * if suppressed[j] == 1: - * continue # <<<<<<<<<<<<<< - * xx1 = max(ix1, x1[j]) - * yy1 = max(iy1, y1[j]) - */ - goto __pyx_L6_continue; - - /* "nms/cpu_nms.pyx":55 - * for _j in range(_i + 1, ndets): - * j = order[_j] - * if suppressed[j] == 1: # <<<<<<<<<<<<<< - * continue - * xx1 = max(ix1, x1[j]) - */ - } - - /* "nms/cpu_nms.pyx":57 - * if suppressed[j] == 1: - * continue - * xx1 = max(ix1, x1[j]) # <<<<<<<<<<<<<< - * yy1 = max(iy1, y1[j]) - * xx2 = min(ix2, x2[j]) - */ - __pyx_t_17 = __pyx_v_j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_x1.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_x1.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 57, __pyx_L1_error) - } - __pyx_v_xx1 = __pyx_f_3nms_7cpu_nms_max(__pyx_v_ix1, (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_x1.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_x1.diminfo[0].strides))); - - /* "nms/cpu_nms.pyx":58 - * continue - * xx1 = max(ix1, x1[j]) - * yy1 = max(iy1, y1[j]) # <<<<<<<<<<<<<< - * xx2 = min(ix2, x2[j]) - * yy2 = min(iy2, y2[j]) - */ - __pyx_t_17 = __pyx_v_j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_y1.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_y1.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 58, __pyx_L1_error) - } - __pyx_v_yy1 = __pyx_f_3nms_7cpu_nms_max(__pyx_v_iy1, (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_y1.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_y1.diminfo[0].strides))); - - /* "nms/cpu_nms.pyx":59 - * xx1 = max(ix1, x1[j]) - * yy1 = max(iy1, y1[j]) - * xx2 = min(ix2, x2[j]) # <<<<<<<<<<<<<< - * yy2 = min(iy2, y2[j]) - * w = max(0.0, xx2 - xx1 + 1) - */ - __pyx_t_17 = __pyx_v_j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_x2.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_x2.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 59, __pyx_L1_error) - } - __pyx_v_xx2 = __pyx_f_3nms_7cpu_nms_min(__pyx_v_ix2, (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_x2.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_x2.diminfo[0].strides))); - - /* "nms/cpu_nms.pyx":60 - * yy1 = max(iy1, y1[j]) - * xx2 = min(ix2, x2[j]) - * yy2 = min(iy2, y2[j]) # <<<<<<<<<<<<<< - * w = max(0.0, xx2 - xx1 + 1) - * h = max(0.0, yy2 - yy1 + 1) - */ - __pyx_t_17 = __pyx_v_j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_y2.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_y2.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 60, __pyx_L1_error) - } - __pyx_v_yy2 = __pyx_f_3nms_7cpu_nms_min(__pyx_v_iy2, (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_y2.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_y2.diminfo[0].strides))); - - /* "nms/cpu_nms.pyx":61 - * xx2 = min(ix2, x2[j]) - * yy2 = min(iy2, y2[j]) - * w = max(0.0, xx2 - xx1 + 1) # <<<<<<<<<<<<<< - * h = max(0.0, yy2 - yy1 + 1) - * inter = w * h - */ - __pyx_v_w = __pyx_f_3nms_7cpu_nms_max(0.0, ((__pyx_v_xx2 - __pyx_v_xx1) + 1.0)); - - /* "nms/cpu_nms.pyx":62 - * yy2 = min(iy2, y2[j]) - * w = max(0.0, xx2 - xx1 + 1) - * h = max(0.0, yy2 - yy1 + 1) # <<<<<<<<<<<<<< - * inter = w * h - * ovr = inter / (iarea + areas[j] - inter) - */ - __pyx_v_h = __pyx_f_3nms_7cpu_nms_max(0.0, ((__pyx_v_yy2 - __pyx_v_yy1) + 1.0)); - - /* "nms/cpu_nms.pyx":63 - * w = max(0.0, xx2 - xx1 + 1) - * h = max(0.0, yy2 - yy1 + 1) - * inter = w * h # <<<<<<<<<<<<<< - * ovr = inter / (iarea + areas[j] - inter) - * if ovr >= thresh: - */ - __pyx_v_inter = (__pyx_v_w * __pyx_v_h); - - /* "nms/cpu_nms.pyx":64 - * h = max(0.0, yy2 - yy1 + 1) - * inter = w * h - * ovr = inter / (iarea + areas[j] - inter) # <<<<<<<<<<<<<< - * if ovr >= thresh: - * suppressed[j] = 1 - */ - __pyx_t_17 = __pyx_v_j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_areas.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_areas.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 64, __pyx_L1_error) - } - __pyx_t_24 = ((__pyx_v_iarea + (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_areas.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_areas.diminfo[0].strides))) - __pyx_v_inter); - if (unlikely(__pyx_t_24 == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "float division"); - __PYX_ERR(0, 64, __pyx_L1_error) - } - __pyx_v_ovr = (__pyx_v_inter / __pyx_t_24); - - /* "nms/cpu_nms.pyx":65 - * inter = w * h - * ovr = inter / (iarea + areas[j] - inter) - * if ovr >= thresh: # <<<<<<<<<<<<<< - * suppressed[j] = 1 - * - */ - __pyx_t_12 = PyFloat_FromDouble(__pyx_v_ovr); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_8 = PyObject_RichCompare(__pyx_t_12, __pyx_v_thresh, Py_GE); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __pyx_t_19 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_19 < 0)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (__pyx_t_19) { - - /* "nms/cpu_nms.pyx":66 - * ovr = inter / (iarea + areas[j] - inter) - * if ovr >= thresh: - * suppressed[j] = 1 # <<<<<<<<<<<<<< - * - * return keep - */ - __pyx_t_17 = __pyx_v_j; - __pyx_t_23 = -1; - if (__pyx_t_17 < 0) { - __pyx_t_17 += __pyx_pybuffernd_suppressed.diminfo[0].shape; - if (unlikely(__pyx_t_17 < 0)) __pyx_t_23 = 0; - } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_suppressed.diminfo[0].shape)) __pyx_t_23 = 0; - if (unlikely(__pyx_t_23 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_23); - __PYX_ERR(0, 66, __pyx_L1_error) - } - *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int_t *, __pyx_pybuffernd_suppressed.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_suppressed.diminfo[0].strides) = 1; - - /* "nms/cpu_nms.pyx":65 - * inter = w * h - * ovr = inter / (iarea + areas[j] - inter) - * if ovr >= thresh: # <<<<<<<<<<<<<< - * suppressed[j] = 1 - * - */ - } - __pyx_L6_continue:; - } - __pyx_L3_continue:; - } - - /* "nms/cpu_nms.pyx":68 - * suppressed[j] = 1 - * - * return keep # <<<<<<<<<<<<<< - * - * def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_keep); - __pyx_r = __pyx_v_keep; - goto __pyx_L0; - - /* "nms/cpu_nms.pyx":17 - * return a if a <= b else b - * - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_XDECREF(__pyx_t_12); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_areas.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_suppressed.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x1.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x2.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y1.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y2.rcbuffer->pybuffer); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("nms.cpu_nms.cpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_areas.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_suppressed.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x1.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x2.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y1.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y2.rcbuffer->pybuffer); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_x1); - __Pyx_XDECREF((PyObject *)__pyx_v_y1); - __Pyx_XDECREF((PyObject *)__pyx_v_x2); - __Pyx_XDECREF((PyObject *)__pyx_v_y2); - __Pyx_XDECREF((PyObject *)__pyx_v_scores); - __Pyx_XDECREF((PyObject *)__pyx_v_areas); - __Pyx_XDECREF((PyObject *)__pyx_v_order); - __Pyx_XDECREF((PyObject *)__pyx_v_suppressed); - __Pyx_XDECREF(__pyx_v_keep); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "nms/cpu_nms.pyx":70 - * return keep - * - * def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0): # <<<<<<<<<<<<<< - * cdef unsigned int N = boxes.shape[0] - * cdef float iw, ih, box_area - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_3nms_7cpu_nms_3cpu_soft_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_3nms_7cpu_nms_3cpu_soft_nms = {"cpu_soft_nms", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3nms_7cpu_nms_3cpu_soft_nms, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_3nms_7cpu_nms_3cpu_soft_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_boxes = 0; - float __pyx_v_sigma; - float __pyx_v_Nt; - float __pyx_v_threshold; - unsigned int __pyx_v_method; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("cpu_soft_nms (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes,&__pyx_n_s_sigma,&__pyx_n_s_Nt,&__pyx_n_s_threshold,&__pyx_n_s_method,0}; - PyObject* values[5] = {0,0,0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_boxes)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma); - if (value) { values[1] = value; kw_args--; } - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Nt); - if (value) { values[2] = value; kw_args--; } - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); - if (value) { values[3] = value; kw_args--; } - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_method); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "cpu_soft_nms") < 0)) __PYX_ERR(0, 70, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_boxes = ((PyArrayObject *)values[0]); - if (values[1]) { - __pyx_v_sigma = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_sigma == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) - } else { - __pyx_v_sigma = ((float)0.5); - } - if (values[2]) { - __pyx_v_Nt = __pyx_PyFloat_AsFloat(values[2]); if (unlikely((__pyx_v_Nt == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) - } else { - __pyx_v_Nt = ((float)0.3); - } - if (values[3]) { - __pyx_v_threshold = __pyx_PyFloat_AsFloat(values[3]); if (unlikely((__pyx_v_threshold == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) - } else { - __pyx_v_threshold = ((float)0.001); - } - if (values[4]) { - __pyx_v_method = __Pyx_PyInt_As_unsigned_int(values[4]); if (unlikely((__pyx_v_method == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) - } else { - __pyx_v_method = ((unsigned int)0); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("cpu_soft_nms", 0, 1, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 70, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("nms.cpu_nms.cpu_soft_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_boxes), __pyx_ptype_5numpy_ndarray, 1, "boxes", 0))) __PYX_ERR(0, 70, __pyx_L1_error) - __pyx_r = __pyx_pf_3nms_7cpu_nms_2cpu_soft_nms(__pyx_self, __pyx_v_boxes, __pyx_v_sigma, __pyx_v_Nt, __pyx_v_threshold, __pyx_v_method); - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_3nms_7cpu_nms_2cpu_soft_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes, float __pyx_v_sigma, float __pyx_v_Nt, float __pyx_v_threshold, unsigned int __pyx_v_method) { - unsigned int __pyx_v_N; - float __pyx_v_iw; - float __pyx_v_ih; - float __pyx_v_ua; - int __pyx_v_pos; - float __pyx_v_maxscore; - int __pyx_v_maxpos; - float __pyx_v_x1; - float __pyx_v_x2; - float __pyx_v_y1; - float __pyx_v_y2; - float __pyx_v_tx1; - float __pyx_v_tx2; - float __pyx_v_ty1; - float __pyx_v_ty2; - float __pyx_v_ts; - float __pyx_v_area; - float __pyx_v_weight; - float __pyx_v_ov; - PyObject *__pyx_v_i = NULL; - CYTHON_UNUSED PyObject *__pyx_v_s = NULL; - PyObject *__pyx_v_keep = NULL; - __Pyx_LocalBuf_ND __pyx_pybuffernd_boxes; - __Pyx_Buffer __pyx_pybuffer_boxes; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t __pyx_t_3; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; - float __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - Py_ssize_t __pyx_t_10; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - Py_ssize_t __pyx_t_13; - Py_ssize_t __pyx_t_14; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cpu_soft_nms", 0); - __pyx_pybuffer_boxes.pybuffer.buf = NULL; - __pyx_pybuffer_boxes.refcount = 0; - __pyx_pybuffernd_boxes.data = NULL; - __pyx_pybuffernd_boxes.rcbuffer = &__pyx_pybuffer_boxes; - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer, (PyObject*)__pyx_v_boxes, &__Pyx_TypeInfo_float, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 70, __pyx_L1_error) - } - __pyx_pybuffernd_boxes.diminfo[0].strides = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_boxes.diminfo[0].shape = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_boxes.diminfo[1].strides = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_boxes.diminfo[1].shape = __pyx_pybuffernd_boxes.rcbuffer->pybuffer.shape[1]; - - /* "nms/cpu_nms.pyx":71 - * - * def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0): - * cdef unsigned int N = boxes.shape[0] # <<<<<<<<<<<<<< - * cdef float iw, ih, box_area - * cdef float ua - */ - __pyx_v_N = (__pyx_v_boxes->dimensions[0]); - - /* "nms/cpu_nms.pyx":74 - * cdef float iw, ih, box_area - * cdef float ua - * cdef int pos = 0 # <<<<<<<<<<<<<< - * cdef float maxscore = 0 - * cdef int maxpos = 0 - */ - __pyx_v_pos = 0; - - /* "nms/cpu_nms.pyx":75 - * cdef float ua - * cdef int pos = 0 - * cdef float maxscore = 0 # <<<<<<<<<<<<<< - * cdef int maxpos = 0 - * cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov - */ - __pyx_v_maxscore = 0.0; - - /* "nms/cpu_nms.pyx":76 - * cdef int pos = 0 - * cdef float maxscore = 0 - * cdef int maxpos = 0 # <<<<<<<<<<<<<< - * cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov - * - */ - __pyx_v_maxpos = 0; - - /* "nms/cpu_nms.pyx":79 - * cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov - * - * for i in range(N): # <<<<<<<<<<<<<< - * maxscore = boxes[i, 4] - * maxpos = i - */ - __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { - __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); __pyx_t_3 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 79, __pyx_L1_error) - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - for (;;) { - if (likely(!__pyx_t_4)) { - if (likely(PyList_CheckExact(__pyx_t_1))) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_1)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 79, __pyx_L1_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } else { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_3); __Pyx_INCREF(__pyx_t_2); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 79, __pyx_L1_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } - } else { - __pyx_t_2 = __pyx_t_4(__pyx_t_1); - if (unlikely(!__pyx_t_2)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 79, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_2); - } - __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_2); - __pyx_t_2 = 0; - - /* "nms/cpu_nms.pyx":80 - * - * for i in range(N): - * maxscore = boxes[i, 4] # <<<<<<<<<<<<<< - * maxpos = i - * - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_4); - __Pyx_GIVEREF(__pyx_int_4); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_4); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_maxscore = __pyx_t_6; - - /* "nms/cpu_nms.pyx":81 - * for i in range(N): - * maxscore = boxes[i, 4] - * maxpos = i # <<<<<<<<<<<<<< - * - * tx1 = boxes[i,0] - */ - __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_v_i); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L1_error) - __pyx_v_maxpos = __pyx_t_7; - - /* "nms/cpu_nms.pyx":83 - * maxpos = i - * - * tx1 = boxes[i,0] # <<<<<<<<<<<<<< - * ty1 = boxes[i,1] - * tx2 = boxes[i,2] - */ - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0); - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_2); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_tx1 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":84 - * - * tx1 = boxes[i,0] - * ty1 = boxes[i,1] # <<<<<<<<<<<<<< - * tx2 = boxes[i,2] - * ty2 = boxes[i,3] - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 84, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_1); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 84, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_ty1 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":85 - * tx1 = boxes[i,0] - * ty1 = boxes[i,1] - * tx2 = boxes[i,2] # <<<<<<<<<<<<<< - * ty2 = boxes[i,3] - * ts = boxes[i,4] - */ - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_2); - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_2); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_tx2 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":86 - * ty1 = boxes[i,1] - * tx2 = boxes[i,2] - * ty2 = boxes[i,3] # <<<<<<<<<<<<<< - * ts = boxes[i,4] - * - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_3); - __Pyx_GIVEREF(__pyx_int_3); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_3); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_ty2 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":87 - * tx2 = boxes[i,2] - * ty2 = boxes[i,3] - * ts = boxes[i,4] # <<<<<<<<<<<<<< - * - * pos = i + 1 - */ - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_4); - __Pyx_GIVEREF(__pyx_int_4); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_4); - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_2); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_ts = __pyx_t_6; - - /* "nms/cpu_nms.pyx":89 - * ts = boxes[i,4] - * - * pos = i + 1 # <<<<<<<<<<<<<< - * # get max box - * while pos < N: - */ - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_pos = __pyx_t_7; - - /* "nms/cpu_nms.pyx":91 - * pos = i + 1 - * # get max box - * while pos < N: # <<<<<<<<<<<<<< - * if maxscore < boxes[pos, 4]: - * maxscore = boxes[pos, 4] - */ - while (1) { - __pyx_t_8 = ((__pyx_v_pos < __pyx_v_N) != 0); - if (!__pyx_t_8) break; - - /* "nms/cpu_nms.pyx":92 - * # get max box - * while pos < N: - * if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<< - * maxscore = boxes[pos, 4] - * maxpos = pos - */ - __pyx_t_9 = __pyx_v_pos; - __pyx_t_10 = 4; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 92, __pyx_L1_error) - } - __pyx_t_8 = ((__pyx_v_maxscore < (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides))) != 0); - if (__pyx_t_8) { - - /* "nms/cpu_nms.pyx":93 - * while pos < N: - * if maxscore < boxes[pos, 4]: - * maxscore = boxes[pos, 4] # <<<<<<<<<<<<<< - * maxpos = pos - * pos = pos + 1 - */ - __pyx_t_10 = __pyx_v_pos; - __pyx_t_9 = 4; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 93, __pyx_L1_error) - } - __pyx_v_maxscore = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":94 - * if maxscore < boxes[pos, 4]: - * maxscore = boxes[pos, 4] - * maxpos = pos # <<<<<<<<<<<<<< - * pos = pos + 1 - * - */ - __pyx_v_maxpos = __pyx_v_pos; - - /* "nms/cpu_nms.pyx":92 - * # get max box - * while pos < N: - * if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<< - * maxscore = boxes[pos, 4] - * maxpos = pos - */ - } - - /* "nms/cpu_nms.pyx":95 - * maxscore = boxes[pos, 4] - * maxpos = pos - * pos = pos + 1 # <<<<<<<<<<<<<< - * - * # add max box as a detection - */ - __pyx_v_pos = (__pyx_v_pos + 1); - } - - /* "nms/cpu_nms.pyx":98 - * - * # add max box as a detection - * boxes[i,0] = boxes[maxpos,0] # <<<<<<<<<<<<<< - * boxes[i,1] = boxes[maxpos,1] - * boxes[i,2] = boxes[maxpos,2] - */ - __pyx_t_9 = __pyx_v_maxpos; - __pyx_t_10 = 0; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 98, __pyx_L1_error) - } - __pyx_t_2 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_0); - if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5, __pyx_t_2) < 0)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "nms/cpu_nms.pyx":99 - * # add max box as a detection - * boxes[i,0] = boxes[maxpos,0] - * boxes[i,1] = boxes[maxpos,1] # <<<<<<<<<<<<<< - * boxes[i,2] = boxes[maxpos,2] - * boxes[i,3] = boxes[maxpos,3] - */ - __pyx_t_10 = __pyx_v_maxpos; - __pyx_t_9 = 1; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 99, __pyx_L1_error) - } - __pyx_t_2 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_1); - if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5, __pyx_t_2) < 0)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "nms/cpu_nms.pyx":100 - * boxes[i,0] = boxes[maxpos,0] - * boxes[i,1] = boxes[maxpos,1] - * boxes[i,2] = boxes[maxpos,2] # <<<<<<<<<<<<<< - * boxes[i,3] = boxes[maxpos,3] - * boxes[i,4] = boxes[maxpos,4] - */ - __pyx_t_9 = __pyx_v_maxpos; - __pyx_t_10 = 2; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 100, __pyx_L1_error) - } - __pyx_t_2 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_2); - if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5, __pyx_t_2) < 0)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "nms/cpu_nms.pyx":101 - * boxes[i,1] = boxes[maxpos,1] - * boxes[i,2] = boxes[maxpos,2] - * boxes[i,3] = boxes[maxpos,3] # <<<<<<<<<<<<<< - * boxes[i,4] = boxes[maxpos,4] - * - */ - __pyx_t_10 = __pyx_v_maxpos; - __pyx_t_9 = 3; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 101, __pyx_L1_error) - } - __pyx_t_2 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_3); - __Pyx_GIVEREF(__pyx_int_3); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_3); - if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5, __pyx_t_2) < 0)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "nms/cpu_nms.pyx":102 - * boxes[i,2] = boxes[maxpos,2] - * boxes[i,3] = boxes[maxpos,3] - * boxes[i,4] = boxes[maxpos,4] # <<<<<<<<<<<<<< - * - * # swap ith box with position of max box - */ - __pyx_t_9 = __pyx_v_maxpos; - __pyx_t_10 = 4; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 102, __pyx_L1_error) - } - __pyx_t_2 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides))); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_4); - __Pyx_GIVEREF(__pyx_int_4); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_4); - if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5, __pyx_t_2) < 0)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "nms/cpu_nms.pyx":105 - * - * # swap ith box with position of max box - * boxes[maxpos,0] = tx1 # <<<<<<<<<<<<<< - * boxes[maxpos,1] = ty1 - * boxes[maxpos,2] = tx2 - */ - __pyx_t_10 = __pyx_v_maxpos; - __pyx_t_9 = 0; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 105, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides) = __pyx_v_tx1; - - /* "nms/cpu_nms.pyx":106 - * # swap ith box with position of max box - * boxes[maxpos,0] = tx1 - * boxes[maxpos,1] = ty1 # <<<<<<<<<<<<<< - * boxes[maxpos,2] = tx2 - * boxes[maxpos,3] = ty2 - */ - __pyx_t_9 = __pyx_v_maxpos; - __pyx_t_10 = 1; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 106, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides) = __pyx_v_ty1; - - /* "nms/cpu_nms.pyx":107 - * boxes[maxpos,0] = tx1 - * boxes[maxpos,1] = ty1 - * boxes[maxpos,2] = tx2 # <<<<<<<<<<<<<< - * boxes[maxpos,3] = ty2 - * boxes[maxpos,4] = ts - */ - __pyx_t_10 = __pyx_v_maxpos; - __pyx_t_9 = 2; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 107, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides) = __pyx_v_tx2; - - /* "nms/cpu_nms.pyx":108 - * boxes[maxpos,1] = ty1 - * boxes[maxpos,2] = tx2 - * boxes[maxpos,3] = ty2 # <<<<<<<<<<<<<< - * boxes[maxpos,4] = ts - * - */ - __pyx_t_9 = __pyx_v_maxpos; - __pyx_t_10 = 3; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 108, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides) = __pyx_v_ty2; - - /* "nms/cpu_nms.pyx":109 - * boxes[maxpos,2] = tx2 - * boxes[maxpos,3] = ty2 - * boxes[maxpos,4] = ts # <<<<<<<<<<<<<< - * - * tx1 = boxes[i,0] - */ - __pyx_t_10 = __pyx_v_maxpos; - __pyx_t_9 = 4; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 109, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides) = __pyx_v_ts; - - /* "nms/cpu_nms.pyx":111 - * boxes[maxpos,4] = ts - * - * tx1 = boxes[i,0] # <<<<<<<<<<<<<< - * ty1 = boxes[i,1] - * tx2 = boxes[i,2] - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_0); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 111, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_tx1 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":112 - * - * tx1 = boxes[i,0] - * ty1 = boxes[i,1] # <<<<<<<<<<<<<< - * tx2 = boxes[i,2] - * ty2 = boxes[i,3] - */ - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 112, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_1); - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 112, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_2); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 112, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_ty1 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":113 - * tx1 = boxes[i,0] - * ty1 = boxes[i,1] - * tx2 = boxes[i,2] # <<<<<<<<<<<<<< - * ty2 = boxes[i,3] - * ts = boxes[i,4] - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 113, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_2); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 113, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 113, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_tx2 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":114 - * ty1 = boxes[i,1] - * tx2 = boxes[i,2] - * ty2 = boxes[i,3] # <<<<<<<<<<<<<< - * ts = boxes[i,4] - * - */ - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 114, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_3); - __Pyx_GIVEREF(__pyx_int_3); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_3); - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 114, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_2); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 114, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_ty2 = __pyx_t_6; - - /* "nms/cpu_nms.pyx":115 - * tx2 = boxes[i,2] - * ty2 = boxes[i,3] - * ts = boxes[i,4] # <<<<<<<<<<<<<< - * - * pos = i + 1 - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 115, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_4); - __Pyx_GIVEREF(__pyx_int_4); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_4); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_boxes), __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 115, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_ts = __pyx_t_6; - - /* "nms/cpu_nms.pyx":117 - * ts = boxes[i,4] - * - * pos = i + 1 # <<<<<<<<<<<<<< - * # NMS iterations, note that N changes if detection boxes fall below threshold - * while pos < N: - */ - __pyx_t_5 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 117, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_5); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 117, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_pos = __pyx_t_7; - - /* "nms/cpu_nms.pyx":119 - * pos = i + 1 - * # NMS iterations, note that N changes if detection boxes fall below threshold - * while pos < N: # <<<<<<<<<<<<<< - * x1 = boxes[pos, 0] - * y1 = boxes[pos, 1] - */ - while (1) { - __pyx_t_8 = ((__pyx_v_pos < __pyx_v_N) != 0); - if (!__pyx_t_8) break; - - /* "nms/cpu_nms.pyx":120 - * # NMS iterations, note that N changes if detection boxes fall below threshold - * while pos < N: - * x1 = boxes[pos, 0] # <<<<<<<<<<<<<< - * y1 = boxes[pos, 1] - * x2 = boxes[pos, 2] - */ - __pyx_t_9 = __pyx_v_pos; - __pyx_t_10 = 0; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 120, __pyx_L1_error) - } - __pyx_v_x1 = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":121 - * while pos < N: - * x1 = boxes[pos, 0] - * y1 = boxes[pos, 1] # <<<<<<<<<<<<<< - * x2 = boxes[pos, 2] - * y2 = boxes[pos, 3] - */ - __pyx_t_10 = __pyx_v_pos; - __pyx_t_9 = 1; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 121, __pyx_L1_error) - } - __pyx_v_y1 = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":122 - * x1 = boxes[pos, 0] - * y1 = boxes[pos, 1] - * x2 = boxes[pos, 2] # <<<<<<<<<<<<<< - * y2 = boxes[pos, 3] - * s = boxes[pos, 4] - */ - __pyx_t_9 = __pyx_v_pos; - __pyx_t_10 = 2; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 122, __pyx_L1_error) - } - __pyx_v_x2 = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":123 - * y1 = boxes[pos, 1] - * x2 = boxes[pos, 2] - * y2 = boxes[pos, 3] # <<<<<<<<<<<<<< - * s = boxes[pos, 4] - * - */ - __pyx_t_10 = __pyx_v_pos; - __pyx_t_9 = 3; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 123, __pyx_L1_error) - } - __pyx_v_y2 = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":124 - * x2 = boxes[pos, 2] - * y2 = boxes[pos, 3] - * s = boxes[pos, 4] # <<<<<<<<<<<<<< - * - * area = (x2 - x1 + 1) * (y2 - y1 + 1) - */ - __pyx_t_9 = __pyx_v_pos; - __pyx_t_10 = 4; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 124, __pyx_L1_error) - } - __pyx_t_5 = PyFloat_FromDouble((*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_XDECREF_SET(__pyx_v_s, __pyx_t_5); - __pyx_t_5 = 0; - - /* "nms/cpu_nms.pyx":126 - * s = boxes[pos, 4] - * - * area = (x2 - x1 + 1) * (y2 - y1 + 1) # <<<<<<<<<<<<<< - * iw = (min(tx2, x2) - max(tx1, x1) + 1) - * if iw > 0: - */ - __pyx_v_area = (((__pyx_v_x2 - __pyx_v_x1) + 1.0) * ((__pyx_v_y2 - __pyx_v_y1) + 1.0)); - - /* "nms/cpu_nms.pyx":127 - * - * area = (x2 - x1 + 1) * (y2 - y1 + 1) - * iw = (min(tx2, x2) - max(tx1, x1) + 1) # <<<<<<<<<<<<<< - * if iw > 0: - * ih = (min(ty2, y2) - max(ty1, y1) + 1) - */ - __pyx_v_iw = ((__pyx_f_3nms_7cpu_nms_min(__pyx_v_tx2, __pyx_v_x2) - __pyx_f_3nms_7cpu_nms_max(__pyx_v_tx1, __pyx_v_x1)) + 1.0); - - /* "nms/cpu_nms.pyx":128 - * area = (x2 - x1 + 1) * (y2 - y1 + 1) - * iw = (min(tx2, x2) - max(tx1, x1) + 1) - * if iw > 0: # <<<<<<<<<<<<<< - * ih = (min(ty2, y2) - max(ty1, y1) + 1) - * if ih > 0: - */ - __pyx_t_8 = ((__pyx_v_iw > 0.0) != 0); - if (__pyx_t_8) { - - /* "nms/cpu_nms.pyx":129 - * iw = (min(tx2, x2) - max(tx1, x1) + 1) - * if iw > 0: - * ih = (min(ty2, y2) - max(ty1, y1) + 1) # <<<<<<<<<<<<<< - * if ih > 0: - * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) - */ - __pyx_v_ih = ((__pyx_f_3nms_7cpu_nms_min(__pyx_v_ty2, __pyx_v_y2) - __pyx_f_3nms_7cpu_nms_max(__pyx_v_ty1, __pyx_v_y1)) + 1.0); - - /* "nms/cpu_nms.pyx":130 - * if iw > 0: - * ih = (min(ty2, y2) - max(ty1, y1) + 1) - * if ih > 0: # <<<<<<<<<<<<<< - * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) - * ov = iw * ih / ua #iou between max box and detection box - */ - __pyx_t_8 = ((__pyx_v_ih > 0.0) != 0); - if (__pyx_t_8) { - - /* "nms/cpu_nms.pyx":131 - * ih = (min(ty2, y2) - max(ty1, y1) + 1) - * if ih > 0: - * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) # <<<<<<<<<<<<<< - * ov = iw * ih / ua #iou between max box and detection box - * - */ - __pyx_v_ua = ((double)(((((__pyx_v_tx2 - __pyx_v_tx1) + 1.0) * ((__pyx_v_ty2 - __pyx_v_ty1) + 1.0)) + __pyx_v_area) - (__pyx_v_iw * __pyx_v_ih))); - - /* "nms/cpu_nms.pyx":132 - * if ih > 0: - * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) - * ov = iw * ih / ua #iou between max box and detection box # <<<<<<<<<<<<<< - * - * if method == 1: # linear - */ - __pyx_t_6 = (__pyx_v_iw * __pyx_v_ih); - if (unlikely(__pyx_v_ua == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "float division"); - __PYX_ERR(0, 132, __pyx_L1_error) - } - __pyx_v_ov = (__pyx_t_6 / __pyx_v_ua); - - /* "nms/cpu_nms.pyx":134 - * ov = iw * ih / ua #iou between max box and detection box - * - * if method == 1: # linear # <<<<<<<<<<<<<< - * if ov > Nt: - * weight = 1 - ov - */ - switch (__pyx_v_method) { - case 1: - - /* "nms/cpu_nms.pyx":135 - * - * if method == 1: # linear - * if ov > Nt: # <<<<<<<<<<<<<< - * weight = 1 - ov - * else: - */ - __pyx_t_8 = ((__pyx_v_ov > __pyx_v_Nt) != 0); - if (__pyx_t_8) { - - /* "nms/cpu_nms.pyx":136 - * if method == 1: # linear - * if ov > Nt: - * weight = 1 - ov # <<<<<<<<<<<<<< - * else: - * weight = 1 - */ - __pyx_v_weight = (1.0 - __pyx_v_ov); - - /* "nms/cpu_nms.pyx":135 - * - * if method == 1: # linear - * if ov > Nt: # <<<<<<<<<<<<<< - * weight = 1 - ov - * else: - */ - goto __pyx_L12; - } - - /* "nms/cpu_nms.pyx":138 - * weight = 1 - ov - * else: - * weight = 1 # <<<<<<<<<<<<<< - * elif method == 2: # gaussian - * weight = np.exp(-(ov * ov)/sigma) - */ - /*else*/ { - __pyx_v_weight = 1.0; - } - __pyx_L12:; - - /* "nms/cpu_nms.pyx":134 - * ov = iw * ih / ua #iou between max box and detection box - * - * if method == 1: # linear # <<<<<<<<<<<<<< - * if ov > Nt: - * weight = 1 - ov - */ - break; - case 2: - - /* "nms/cpu_nms.pyx":140 - * weight = 1 - * elif method == 2: # gaussian - * weight = np.exp(-(ov * ov)/sigma) # <<<<<<<<<<<<<< - * else: # original NMS - * if ov > Nt: - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_exp); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = (-(__pyx_v_ov * __pyx_v_ov)); - if (unlikely(__pyx_v_sigma == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "float division"); - __PYX_ERR(0, 140, __pyx_L1_error) - } - __pyx_t_2 = PyFloat_FromDouble((__pyx_t_6 / __pyx_v_sigma)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_12 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) { - __pyx_t_12 = PyMethod_GET_SELF(__pyx_t_11); - if (likely(__pyx_t_12)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); - __Pyx_INCREF(__pyx_t_12); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_11, function); - } - } - __pyx_t_5 = (__pyx_t_12) ? __Pyx_PyObject_Call2Args(__pyx_t_11, __pyx_t_12, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_2); - __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_6 = __pyx_PyFloat_AsFloat(__pyx_t_5); if (unlikely((__pyx_t_6 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_weight = __pyx_t_6; - - /* "nms/cpu_nms.pyx":139 - * else: - * weight = 1 - * elif method == 2: # gaussian # <<<<<<<<<<<<<< - * weight = np.exp(-(ov * ov)/sigma) - * else: # original NMS - */ - break; - default: - - /* "nms/cpu_nms.pyx":142 - * weight = np.exp(-(ov * ov)/sigma) - * else: # original NMS - * if ov > Nt: # <<<<<<<<<<<<<< - * weight = 0 - * else: - */ - __pyx_t_8 = ((__pyx_v_ov > __pyx_v_Nt) != 0); - if (__pyx_t_8) { - - /* "nms/cpu_nms.pyx":143 - * else: # original NMS - * if ov > Nt: - * weight = 0 # <<<<<<<<<<<<<< - * else: - * weight = 1 - */ - __pyx_v_weight = 0.0; - - /* "nms/cpu_nms.pyx":142 - * weight = np.exp(-(ov * ov)/sigma) - * else: # original NMS - * if ov > Nt: # <<<<<<<<<<<<<< - * weight = 0 - * else: - */ - goto __pyx_L13; - } - - /* "nms/cpu_nms.pyx":145 - * weight = 0 - * else: - * weight = 1 # <<<<<<<<<<<<<< - * - * boxes[pos, 4] = weight*boxes[pos, 4] - */ - /*else*/ { - __pyx_v_weight = 1.0; - } - __pyx_L13:; - break; - } - - /* "nms/cpu_nms.pyx":147 - * weight = 1 - * - * boxes[pos, 4] = weight*boxes[pos, 4] # <<<<<<<<<<<<<< - * - * # if box score falls below threshold, discard the box by swapping with last box - */ - __pyx_t_10 = __pyx_v_pos; - __pyx_t_9 = 4; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 147, __pyx_L1_error) - } - __pyx_t_13 = __pyx_v_pos; - __pyx_t_14 = 4; - __pyx_t_7 = -1; - if (__pyx_t_13 < 0) { - __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_13 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_14 < 0) { - __pyx_t_14 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_14 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 147, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[1].strides) = (__pyx_v_weight * (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides))); - - /* "nms/cpu_nms.pyx":151 - * # if box score falls below threshold, discard the box by swapping with last box - * # update N - * if boxes[pos, 4] < threshold: # <<<<<<<<<<<<<< - * boxes[pos,0] = boxes[N-1, 0] - * boxes[pos,1] = boxes[N-1, 1] - */ - __pyx_t_9 = __pyx_v_pos; - __pyx_t_10 = 4; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 151, __pyx_L1_error) - } - __pyx_t_8 = (((*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides)) < __pyx_v_threshold) != 0); - if (__pyx_t_8) { - - /* "nms/cpu_nms.pyx":152 - * # update N - * if boxes[pos, 4] < threshold: - * boxes[pos,0] = boxes[N-1, 0] # <<<<<<<<<<<<<< - * boxes[pos,1] = boxes[N-1, 1] - * boxes[pos,2] = boxes[N-1, 2] - */ - __pyx_t_10 = (__pyx_v_N - 1); - __pyx_t_9 = 0; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 152, __pyx_L1_error) - } - __pyx_t_14 = __pyx_v_pos; - __pyx_t_13 = 0; - __pyx_t_7 = -1; - if (__pyx_t_14 < 0) { - __pyx_t_14 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_14 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_13 < 0) { - __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_13 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 152, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":153 - * if boxes[pos, 4] < threshold: - * boxes[pos,0] = boxes[N-1, 0] - * boxes[pos,1] = boxes[N-1, 1] # <<<<<<<<<<<<<< - * boxes[pos,2] = boxes[N-1, 2] - * boxes[pos,3] = boxes[N-1, 3] - */ - __pyx_t_9 = (__pyx_v_N - 1); - __pyx_t_10 = 1; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 153, __pyx_L1_error) - } - __pyx_t_13 = __pyx_v_pos; - __pyx_t_14 = 1; - __pyx_t_7 = -1; - if (__pyx_t_13 < 0) { - __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_13 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_14 < 0) { - __pyx_t_14 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_14 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 153, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":154 - * boxes[pos,0] = boxes[N-1, 0] - * boxes[pos,1] = boxes[N-1, 1] - * boxes[pos,2] = boxes[N-1, 2] # <<<<<<<<<<<<<< - * boxes[pos,3] = boxes[N-1, 3] - * boxes[pos,4] = boxes[N-1, 4] - */ - __pyx_t_10 = (__pyx_v_N - 1); - __pyx_t_9 = 2; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 154, __pyx_L1_error) - } - __pyx_t_14 = __pyx_v_pos; - __pyx_t_13 = 2; - __pyx_t_7 = -1; - if (__pyx_t_14 < 0) { - __pyx_t_14 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_14 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_13 < 0) { - __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_13 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 154, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":155 - * boxes[pos,1] = boxes[N-1, 1] - * boxes[pos,2] = boxes[N-1, 2] - * boxes[pos,3] = boxes[N-1, 3] # <<<<<<<<<<<<<< - * boxes[pos,4] = boxes[N-1, 4] - * N = N - 1 - */ - __pyx_t_9 = (__pyx_v_N - 1); - __pyx_t_10 = 3; - __pyx_t_7 = -1; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 155, __pyx_L1_error) - } - __pyx_t_13 = __pyx_v_pos; - __pyx_t_14 = 3; - __pyx_t_7 = -1; - if (__pyx_t_13 < 0) { - __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_13 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_14 < 0) { - __pyx_t_14 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_14 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 155, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":156 - * boxes[pos,2] = boxes[N-1, 2] - * boxes[pos,3] = boxes[N-1, 3] - * boxes[pos,4] = boxes[N-1, 4] # <<<<<<<<<<<<<< - * N = N - 1 - * pos = pos - 1 - */ - __pyx_t_10 = (__pyx_v_N - 1); - __pyx_t_9 = 4; - __pyx_t_7 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_9 < 0) { - __pyx_t_9 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_9 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 156, __pyx_L1_error) - } - __pyx_t_14 = __pyx_v_pos; - __pyx_t_13 = 4; - __pyx_t_7 = -1; - if (__pyx_t_14 < 0) { - __pyx_t_14 += __pyx_pybuffernd_boxes.diminfo[0].shape; - if (unlikely(__pyx_t_14 < 0)) __pyx_t_7 = 0; - } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_boxes.diminfo[0].shape)) __pyx_t_7 = 0; - if (__pyx_t_13 < 0) { - __pyx_t_13 += __pyx_pybuffernd_boxes.diminfo[1].shape; - if (unlikely(__pyx_t_13 < 0)) __pyx_t_7 = 1; - } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_boxes.diminfo[1].shape)) __pyx_t_7 = 1; - if (unlikely(__pyx_t_7 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_7); - __PYX_ERR(0, 156, __pyx_L1_error) - } - *__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_boxes.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(float *, __pyx_pybuffernd_boxes.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_boxes.diminfo[0].strides, __pyx_t_9, __pyx_pybuffernd_boxes.diminfo[1].strides)); - - /* "nms/cpu_nms.pyx":157 - * boxes[pos,3] = boxes[N-1, 3] - * boxes[pos,4] = boxes[N-1, 4] - * N = N - 1 # <<<<<<<<<<<<<< - * pos = pos - 1 - * - */ - __pyx_v_N = (__pyx_v_N - 1); - - /* "nms/cpu_nms.pyx":158 - * boxes[pos,4] = boxes[N-1, 4] - * N = N - 1 - * pos = pos - 1 # <<<<<<<<<<<<<< - * - * pos = pos + 1 - */ - __pyx_v_pos = (__pyx_v_pos - 1); - - /* "nms/cpu_nms.pyx":151 - * # if box score falls below threshold, discard the box by swapping with last box - * # update N - * if boxes[pos, 4] < threshold: # <<<<<<<<<<<<<< - * boxes[pos,0] = boxes[N-1, 0] - * boxes[pos,1] = boxes[N-1, 1] - */ - } - - /* "nms/cpu_nms.pyx":130 - * if iw > 0: - * ih = (min(ty2, y2) - max(ty1, y1) + 1) - * if ih > 0: # <<<<<<<<<<<<<< - * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) - * ov = iw * ih / ua #iou between max box and detection box - */ - } - - /* "nms/cpu_nms.pyx":128 - * area = (x2 - x1 + 1) * (y2 - y1 + 1) - * iw = (min(tx2, x2) - max(tx1, x1) + 1) - * if iw > 0: # <<<<<<<<<<<<<< - * ih = (min(ty2, y2) - max(ty1, y1) + 1) - * if ih > 0: - */ - } - - /* "nms/cpu_nms.pyx":160 - * pos = pos - 1 - * - * pos = pos + 1 # <<<<<<<<<<<<<< - * - * keep = [i for i in range(N)] - */ - __pyx_v_pos = (__pyx_v_pos + 1); - } - - /* "nms/cpu_nms.pyx":79 - * cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov - * - * for i in range(N): # <<<<<<<<<<<<<< - * maxscore = boxes[i, 4] - * maxpos = i - */ - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":162 - * pos = pos + 1 - * - * keep = [i for i in range(N)] # <<<<<<<<<<<<<< - * return keep - */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 162, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 162, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 162, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (likely(PyList_CheckExact(__pyx_t_11)) || PyTuple_CheckExact(__pyx_t_11)) { - __pyx_t_5 = __pyx_t_11; __Pyx_INCREF(__pyx_t_5); __pyx_t_3 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 162, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = Py_TYPE(__pyx_t_5)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 162, __pyx_L1_error) - } - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - for (;;) { - if (likely(!__pyx_t_4)) { - if (likely(PyList_CheckExact(__pyx_t_5))) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_5)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_11 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_3); __Pyx_INCREF(__pyx_t_11); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 162, __pyx_L1_error) - #else - __pyx_t_11 = PySequence_ITEM(__pyx_t_5, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 162, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - #endif - } else { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_5)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_11 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_3); __Pyx_INCREF(__pyx_t_11); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 162, __pyx_L1_error) - #else - __pyx_t_11 = PySequence_ITEM(__pyx_t_5, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 162, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - #endif - } - } else { - __pyx_t_11 = __pyx_t_4(__pyx_t_5); - if (unlikely(!__pyx_t_11)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 162, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_11); - } - __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_11); - __pyx_t_11 = 0; - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_v_i))) __PYX_ERR(0, 162, __pyx_L1_error) - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_keep = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":163 - * - * keep = [i for i in range(N)] - * return keep # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_keep); - __pyx_r = __pyx_v_keep; - goto __pyx_L0; - - /* "nms/cpu_nms.pyx":70 - * return keep - * - * def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0): # <<<<<<<<<<<<<< - * cdef unsigned int N = boxes.shape[0] - * cdef float iw, ih, box_area - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_XDECREF(__pyx_t_12); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("nms.cpu_nms.cpu_soft_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes.rcbuffer->pybuffer); - __pyx_L2:; - __Pyx_XDECREF(__pyx_v_i); - __Pyx_XDECREF(__pyx_v_s); - __Pyx_XDECREF(__pyx_v_keep); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":734 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":735 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 735, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":734 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":737 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":738 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 738, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":737 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":740 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":741 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 741, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":740 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":743 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":744 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 744, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":743 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":746 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":747 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 747, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":746 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":749 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< - * if PyDataType_HASSUBARRAY(d): - * return d.subarray.shape - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":750 - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< - * return d.subarray.shape - * else: - */ - __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); - if (__pyx_t_1) { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":751 - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): - * return d.subarray.shape # <<<<<<<<<<<<<< - * else: - * return () - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); - __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":750 - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< - * return d.subarray.shape - * else: - */ - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":753 - * return d.subarray.shape - * else: - * return () # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_empty_tuple); - __pyx_r = __pyx_empty_tuple; - goto __pyx_L0; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":749 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< - * if PyDataType_HASSUBARRAY(d): - * return d.subarray.shape - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":868 - * int _import_umath() except -1 - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("set_array_base", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":869 - * - * cdef inline void set_array_base(ndarray arr, object base): - * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< - * PyArray_SetBaseObject(arr, base) - * - */ - Py_INCREF(__pyx_v_base); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":870 - * cdef inline void set_array_base(ndarray arr, object base): - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":868 - * int _import_umath() except -1 - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":872 - * PyArray_SetBaseObject(arr, base) - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * base = PyArray_BASE(arr) - * if base is NULL: - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_v_base; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":873 - * - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< - * if base is NULL: - * return None - */ - __pyx_v_base = PyArray_BASE(__pyx_v_arr); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":874 - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) - * if base is NULL: # <<<<<<<<<<<<<< - * return None - * return base - */ - __pyx_t_1 = ((__pyx_v_base == NULL) != 0); - if (__pyx_t_1) { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":875 - * base = PyArray_BASE(arr) - * if base is NULL: - * return None # <<<<<<<<<<<<<< - * return base - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":874 - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) - * if base is NULL: # <<<<<<<<<<<<<< - * return None - * return base - */ - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":876 - * if base is NULL: - * return None - * return base # <<<<<<<<<<<<<< - * - * # Versions of the import_* functions which are more suitable for - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_base)); - __pyx_r = ((PyObject *)__pyx_v_base); - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":872 - * PyArray_SetBaseObject(arr, base) - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * base = PyArray_BASE(arr) - * if base is NULL: - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":880 - * # Versions of the import_* functions which are more suitable for - * # Cython code. - * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< - * try: - * __pyx_import_array() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_array", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":881 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":882 - * cdef inline int import_array() except -1: - * try: - * __pyx_import_array() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") - */ - __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 882, __pyx_L3_error) - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":881 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":883 - * try: - * __pyx_import_array() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.multiarray failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 883, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":884 - * __pyx_import_array() - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_umath() except -1: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 884, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 884, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":881 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":880 - * # Versions of the import_* functions which are more suitable for - * # Cython code. - * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< - * try: - * __pyx_import_array() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":886 - * raise ImportError("numpy.core.multiarray failed to import") - * - * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_umath", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":887 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":888 - * cdef inline int import_umath() except -1: - * try: - * _import_umath() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.umath failed to import") - */ - __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 888, __pyx_L3_error) - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":887 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":889 - * try: - * _import_umath() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.umath failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 889, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":890 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_ufunc() except -1: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 890, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 890, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":887 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":886 - * raise ImportError("numpy.core.multiarray failed to import") - * - * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":892 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_ufunc", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":893 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":894 - * cdef inline int import_ufunc() except -1: - * try: - * _import_umath() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.umath failed to import") - */ - __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 894, __pyx_L3_error) - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":893 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":895 - * try: - * _import_umath() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.umath failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 895, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":896 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef extern from *: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 896, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 896, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":893 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":892 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_cpu_nms(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_cpu_nms}, - {0, NULL} -}; -#endif - -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - "cpu_nms", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, - {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, - {&__pyx_n_s_Nt, __pyx_k_Nt, sizeof(__pyx_k_Nt), 0, 0, 1, 1}, - {&__pyx_n_s_area, __pyx_k_area, sizeof(__pyx_k_area), 0, 0, 1, 1}, - {&__pyx_n_s_areas, __pyx_k_areas, sizeof(__pyx_k_areas), 0, 0, 1, 1}, - {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, - {&__pyx_n_s_box_area, __pyx_k_box_area, sizeof(__pyx_k_box_area), 0, 0, 1, 1}, - {&__pyx_n_s_boxes, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_cpu_nms, __pyx_k_cpu_nms, sizeof(__pyx_k_cpu_nms), 0, 0, 1, 1}, - {&__pyx_n_s_cpu_soft_nms, __pyx_k_cpu_soft_nms, sizeof(__pyx_k_cpu_soft_nms), 0, 0, 1, 1}, - {&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1}, - {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, - {&__pyx_n_s_exp, __pyx_k_exp, sizeof(__pyx_k_exp), 0, 0, 1, 1}, - {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, - {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, - {&__pyx_n_s_i_2, __pyx_k_i_2, sizeof(__pyx_k_i_2), 0, 0, 1, 1}, - {&__pyx_n_s_iarea, __pyx_k_iarea, sizeof(__pyx_k_iarea), 0, 0, 1, 1}, - {&__pyx_n_s_ih, __pyx_k_ih, sizeof(__pyx_k_ih), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_int, __pyx_k_int, sizeof(__pyx_k_int), 0, 0, 1, 1}, - {&__pyx_n_s_inter, __pyx_k_inter, sizeof(__pyx_k_inter), 0, 0, 1, 1}, - {&__pyx_n_s_iw, __pyx_k_iw, sizeof(__pyx_k_iw), 0, 0, 1, 1}, - {&__pyx_n_s_ix1, __pyx_k_ix1, sizeof(__pyx_k_ix1), 0, 0, 1, 1}, - {&__pyx_n_s_ix2, __pyx_k_ix2, sizeof(__pyx_k_ix2), 0, 0, 1, 1}, - {&__pyx_n_s_iy1, __pyx_k_iy1, sizeof(__pyx_k_iy1), 0, 0, 1, 1}, - {&__pyx_n_s_iy2, __pyx_k_iy2, sizeof(__pyx_k_iy2), 0, 0, 1, 1}, - {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, - {&__pyx_n_s_j_2, __pyx_k_j_2, sizeof(__pyx_k_j_2), 0, 0, 1, 1}, - {&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_maxpos, __pyx_k_maxpos, sizeof(__pyx_k_maxpos), 0, 0, 1, 1}, - {&__pyx_n_s_maxscore, __pyx_k_maxscore, sizeof(__pyx_k_maxscore), 0, 0, 1, 1}, - {&__pyx_n_s_method, __pyx_k_method, sizeof(__pyx_k_method), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_ndets, __pyx_k_ndets, sizeof(__pyx_k_ndets), 0, 0, 1, 1}, - {&__pyx_n_s_nms_cpu_nms, __pyx_k_nms_cpu_nms, sizeof(__pyx_k_nms_cpu_nms), 0, 0, 1, 1}, - {&__pyx_kp_s_nms_cpu_nms_pyx, __pyx_k_nms_cpu_nms_pyx, sizeof(__pyx_k_nms_cpu_nms_pyx), 0, 0, 1, 0}, - {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, - {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, - {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, - {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, - {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, - {&__pyx_n_s_ov, __pyx_k_ov, sizeof(__pyx_k_ov), 0, 0, 1, 1}, - {&__pyx_n_s_ovr, __pyx_k_ovr, sizeof(__pyx_k_ovr), 0, 0, 1, 1}, - {&__pyx_n_s_pos, __pyx_k_pos, sizeof(__pyx_k_pos), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, - {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, - {&__pyx_n_s_sigma, __pyx_k_sigma, sizeof(__pyx_k_sigma), 0, 0, 1, 1}, - {&__pyx_n_s_suppressed, __pyx_k_suppressed, sizeof(__pyx_k_suppressed), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1}, - {&__pyx_n_s_threshold, __pyx_k_threshold, sizeof(__pyx_k_threshold), 0, 0, 1, 1}, - {&__pyx_n_s_ts, __pyx_k_ts, sizeof(__pyx_k_ts), 0, 0, 1, 1}, - {&__pyx_n_s_tx1, __pyx_k_tx1, sizeof(__pyx_k_tx1), 0, 0, 1, 1}, - {&__pyx_n_s_tx2, __pyx_k_tx2, sizeof(__pyx_k_tx2), 0, 0, 1, 1}, - {&__pyx_n_s_ty1, __pyx_k_ty1, sizeof(__pyx_k_ty1), 0, 0, 1, 1}, - {&__pyx_n_s_ty2, __pyx_k_ty2, sizeof(__pyx_k_ty2), 0, 0, 1, 1}, - {&__pyx_n_s_ua, __pyx_k_ua, sizeof(__pyx_k_ua), 0, 0, 1, 1}, - {&__pyx_n_s_w, __pyx_k_w, sizeof(__pyx_k_w), 0, 0, 1, 1}, - {&__pyx_n_s_weight, __pyx_k_weight, sizeof(__pyx_k_weight), 0, 0, 1, 1}, - {&__pyx_n_s_x1, __pyx_k_x1, sizeof(__pyx_k_x1), 0, 0, 1, 1}, - {&__pyx_n_s_x2, __pyx_k_x2, sizeof(__pyx_k_x2), 0, 0, 1, 1}, - {&__pyx_n_s_xx1, __pyx_k_xx1, sizeof(__pyx_k_xx1), 0, 0, 1, 1}, - {&__pyx_n_s_xx2, __pyx_k_xx2, sizeof(__pyx_k_xx2), 0, 0, 1, 1}, - {&__pyx_n_s_y1, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1}, - {&__pyx_n_s_y2, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1}, - {&__pyx_n_s_yy1, __pyx_k_yy1, sizeof(__pyx_k_yy1), 0, 0, 1, 1}, - {&__pyx_n_s_yy2, __pyx_k_yy2, sizeof(__pyx_k_yy2), 0, 0, 1, 1}, - {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 43, __pyx_L1_error) - __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 884, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "nms/cpu_nms.pyx":18 - * - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - */ - __pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 18, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice_); - __Pyx_GIVEREF(__pyx_slice_); - __pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_0); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 18, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - - /* "nms/cpu_nms.pyx":19 - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] - */ - __pyx_tuple__3 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_1); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__3); - __Pyx_GIVEREF(__pyx_tuple__3); - - /* "nms/cpu_nms.pyx":20 - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] - * cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] - */ - __pyx_tuple__4 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_2); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__4); - __Pyx_GIVEREF(__pyx_tuple__4); - - /* "nms/cpu_nms.pyx":21 - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] - * - */ - __pyx_tuple__5 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_3); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 21, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__5); - __Pyx_GIVEREF(__pyx_tuple__5); - - /* "nms/cpu_nms.pyx":22 - * cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - * cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] - * cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] # <<<<<<<<<<<<<< - * - * cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) - */ - __pyx_tuple__6 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__6); - __Pyx_GIVEREF(__pyx_tuple__6); - - /* "nms/cpu_nms.pyx":25 - * - * cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) - * cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1] # <<<<<<<<<<<<<< - * - * cdef int ndets = dets.shape[0] - */ - __pyx_slice__7 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__7)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__7); - __Pyx_GIVEREF(__pyx_slice__7); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":884 - * __pyx_import_array() - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_umath() except -1: - */ - __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 884, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":890 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_ufunc() except -1: - */ - __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 890, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__9); - __Pyx_GIVEREF(__pyx_tuple__9); - - /* "nms/cpu_nms.pyx":17 - * return a if a <= b else b - * - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - */ - __pyx_tuple__10 = PyTuple_Pack(29, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_x1, __pyx_n_s_y1, __pyx_n_s_x2, __pyx_n_s_y2, __pyx_n_s_scores, __pyx_n_s_areas, __pyx_n_s_order, __pyx_n_s_ndets, __pyx_n_s_suppressed, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_i_2, __pyx_n_s_j_2, __pyx_n_s_ix1, __pyx_n_s_iy1, __pyx_n_s_ix2, __pyx_n_s_iy2, __pyx_n_s_iarea, __pyx_n_s_xx1, __pyx_n_s_yy1, __pyx_n_s_xx2, __pyx_n_s_yy2, __pyx_n_s_w, __pyx_n_s_h, __pyx_n_s_inter, __pyx_n_s_ovr, __pyx_n_s_keep); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__10); - __Pyx_GIVEREF(__pyx_tuple__10); - __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(2, 0, 29, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_nms_cpu_nms_pyx, __pyx_n_s_cpu_nms, 17, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 17, __pyx_L1_error) - - /* "nms/cpu_nms.pyx":70 - * return keep - * - * def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0): # <<<<<<<<<<<<<< - * cdef unsigned int N = boxes.shape[0] - * cdef float iw, ih, box_area - */ - __pyx_tuple__12 = PyTuple_Pack(28, __pyx_n_s_boxes, __pyx_n_s_sigma, __pyx_n_s_Nt, __pyx_n_s_threshold, __pyx_n_s_method, __pyx_n_s_N, __pyx_n_s_iw, __pyx_n_s_ih, __pyx_n_s_box_area, __pyx_n_s_ua, __pyx_n_s_pos, __pyx_n_s_maxscore, __pyx_n_s_maxpos, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_tx1, __pyx_n_s_tx2, __pyx_n_s_ty1, __pyx_n_s_ty2, __pyx_n_s_ts, __pyx_n_s_area, __pyx_n_s_weight, __pyx_n_s_ov, __pyx_n_s_i_2, __pyx_n_s_s, __pyx_n_s_keep); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__12); - __Pyx_GIVEREF(__pyx_tuple__12); - __pyx_codeobj__13 = (PyObject*)__Pyx_PyCode_New(5, 0, 28, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__12, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_nms_cpu_nms_pyx, __pyx_n_s_cpu_soft_nms, 70, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__13)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", - #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 - sizeof(PyTypeObject), - #else - sizeof(PyHeapTypeObject), - #endif - __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 199, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 199, __pyx_L1_error) - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 222, __pyx_L1_error) - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 226, __pyx_L1_error) - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 238, __pyx_L1_error) - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 764, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initcpu_nms(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initcpu_nms(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_cpu_nms(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_cpu_nms(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_cpu_nms(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'cpu_nms' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_cpu_nms(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - PyEval_InitThreads(); - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("cpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_nms__cpu_nms) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "nms.cpu_nms")) { - if (unlikely(PyDict_SetItemString(modules, "nms.cpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - (void)__Pyx_modinit_type_init_code(); - if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "nms/cpu_nms.pyx":8 - * # -------------------------------------------------------- - * - * import numpy as np # <<<<<<<<<<<<<< - * cimport numpy as np - * - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":17 - * return a if a <= b else b - * - * def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - * cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7cpu_nms_1cpu_nms, NULL, __pyx_n_s_nms_cpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_cpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 17, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":70 - * return keep - * - * def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0): # <<<<<<<<<<<<<< - * cdef unsigned int N = boxes.shape[0] - * cdef float iw, ih, box_area - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7cpu_nms_3cpu_soft_nms, NULL, __pyx_n_s_nms_cpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_cpu_soft_nms, __pyx_t_1) < 0) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "nms/cpu_nms.pyx":1 - * # -------------------------------------------------------- # <<<<<<<<<<<<<< - * # Fast R-CNN - * # Copyright (c) 2015 Microsoft - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":892 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - if (__pyx_d) { - __Pyx_AddTraceback("init nms.cpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - Py_CLEAR(__pyx_m); - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init nms.cpu_nms"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* ArgTypeTest */ -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -/* IsLittleEndian */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) -{ - union { - uint32_t u32; - uint8_t u8[4]; - } S; - S.u32 = 0x01020304; - return S.u8[0] == 4; -} - -/* BufferFormatCheck */ -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - ctx->is_valid_array = 0; - ctx->struct_alignment = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} -static int __Pyx_BufFmt_ExpectNumber(const char **ts) { - int number = __Pyx_BufFmt_ParseNumber(ts); - if (number == -1) - PyErr_Format(PyExc_ValueError,\ - "Does not understand character buffer dtype format string ('%c')", **ts); - return number; -} -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case '?': return "'bool'"; - case 'c': return "'char'"; - case 'b': return "'signed char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 's': case 'p': return "a string"; - case 0: return "end"; - default: return "unparseable format string"; - } -} -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -/* These are for computing the padding at the end of the struct to align - on the first member of the struct. This will probably the same as above, - but we don't have any guarantees. - */ -typedef struct { short x; char c; } __Pyx_pad_short; -typedef struct { int x; char c; } __Pyx_pad_int; -typedef struct { long x; char c; } __Pyx_pad_long; -typedef struct { float x; char c; } __Pyx_pad_float; -typedef struct { double x; char c; } __Pyx_pad_double; -typedef struct { long double x; char c; } __Pyx_pad_longdouble; -typedef struct { void *x; char c; } __Pyx_pad_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); - case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); - case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': - return 'H'; - case 'b': case 'h': case 'i': - case 'l': case 'q': case 's': case 'p': - return 'I'; - case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': - return 'U'; - case 'f': case 'd': case 'g': - return (is_complex ? 'C' : 'R'); - case 'O': - return 'O'; - case 'P': - return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset, arraysize = 1; - if (ctx->enc_type == 0) return 0; - if (ctx->head->field->type->arraysize[0]) { - int i, ndim = 0; - if (ctx->enc_type == 's' || ctx->enc_type == 'p') { - ctx->is_valid_array = ctx->head->field->type->ndim == 1; - ndim = 1; - if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { - PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %zu", - ctx->head->field->type->arraysize[0], ctx->enc_count); - return -1; - } - } - if (!ctx->is_valid_array) { - PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", - ctx->head->field->type->ndim, ndim); - return -1; - } - for (i = 0; i < ctx->head->field->type->ndim; i++) { - arraysize *= ctx->head->field->type->arraysize[i]; - } - ctx->is_valid_array = 0; - ctx->enc_count = 1; - } - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - if (ctx->struct_alignment == 0) - ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, - ctx->is_complex); - } - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - if ((type->typegroup == 'H' || group == 'H') && type->size == size) { - } else { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - } - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - ctx->fmt_offset += size; - if (arraysize) - ctx->fmt_offset += (arraysize - 1) * size; - --ctx->enc_count; - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} -static PyObject * -__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) -{ - const char *ts = *tsp; - int i = 0, number, ndim; - ++ts; - if (ctx->new_count != 1) { - PyErr_SetString(PyExc_ValueError, - "Cannot handle repeated arrays in format string"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ndim = ctx->head->field->type->ndim; - while (*ts && *ts != ')') { - switch (*ts) { - case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; - default: break; - } - number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) - return PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %d", - ctx->head->field->type->arraysize[i], number); - if (*ts != ',' && *ts != ')') - return PyErr_Format(PyExc_ValueError, - "Expected a comma in format string, got '%c'", *ts); - if (*ts == ',') ts++; - i++; - } - if (i != ndim) - return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", - ctx->head->field->type->ndim, i); - if (!*ts) { - PyErr_SetString(PyExc_ValueError, - "Unexpected end of format string, expected ')'"); - return NULL; - } - ctx->is_valid_array = 1; - ctx->new_count = 1; - *tsp = ++ts; - return Py_None; -} -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case '\r': - case '\n': - ++ts; - break; - case '<': - if (!__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - size_t struct_alignment = ctx->struct_alignment; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - ctx->enc_count = 0; - ctx->struct_alignment = 0; - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - if (struct_alignment) ctx->struct_alignment = struct_alignment; - } - break; - case '}': - { - size_t alignment = ctx->struct_alignment; - ++ts; - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - if (alignment && ctx->fmt_offset % alignment) { - ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); - } - } - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - CYTHON_FALLTHROUGH; - case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': case 'p': - if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && - (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { - ctx->enc_count += ctx->new_count; - ctx->new_count = 1; - got_Z = 0; - ++ts; - break; - } - CYTHON_FALLTHROUGH; - case 's': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - case '(': - if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; - break; - default: - { - int number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - ctx->new_count = (size_t)number; - } - } - } -} - -/* BufferGetAndValidate */ - static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (unlikely(info->buf == NULL)) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} -static void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} -static int __Pyx__GetBufferAndValidate( - Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, - int nd, int cast, __Pyx_BufFmt_StackElem* stack) -{ - buf->buf = NULL; - if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { - __Pyx_ZeroBuffer(buf); - return -1; - } - if (unlikely(buf->ndim != nd)) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if (unlikely((size_t)buf->itemsize != dtype->size)) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_SafeReleaseBuffer(buf); - return -1; -} - -/* GetItemInt */ - static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return m->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* ObjectGetItem */ - #if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { - PyObject *runerr; - Py_ssize_t key_value; - PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; - if (unlikely(!(m && m->sq_item))) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); - return NULL; - } - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { - PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; - if (likely(m && m->mp_subscript)) { - return m->mp_subscript(obj, key); - } - return __Pyx_PyObject_GetIndex(obj, key); -} -#endif - -/* ExtTypeTest */ - static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { - (void)inplace; - (void)zerodivision_check; - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - x = (long)((unsigned long)a + b); - if (likely((x^a) >= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; - double a = PyFloat_AS_DOUBLE(op1); - double result; - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* PyFunctionFastCall */ - #if CYTHON_FAST_PYCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif -#endif - -/* PyObjectCall */ - #if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = Py_TYPE(func)->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallMethO */ - #if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallNoArg */ - #if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, NULL, 0); - } -#endif -#ifdef __Pyx_CyFunction_USED - if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) -#else - if (likely(PyCFunction_Check(func))) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); -} -#endif - -/* PyCFunctionFastCall */ - #if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); - int flags = PyCFunction_GET_FLAGS(func); - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); - } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); - } -} -#endif - -/* PyObjectCallOneArg */ - #if CYTHON_COMPILING_IN_CPYTHON -static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_New(1); - if (unlikely(!args)) return NULL; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif - if (likely(PyCFunction_Check(func))) { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL - } else if (__Pyx_PyFastCFunction_Check(func)) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif - } - } - return __Pyx__PyObject_CallOneArg(func, arg); -} -#else -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -#endif - -/* PyDictVersioning */ - #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ - #if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* BufferIndexError */ - static void __Pyx_RaiseBufferIndexError(int axis) { - PyErr_Format(PyExc_IndexError, - "Out of bounds on buffer access (axis %d)", axis); -} - -/* PyErrFetchRestore */ - #if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* PyObjectCall2Args */ - static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args, *result = NULL; - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyFunction_FastCall(function, args, 2); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyCFunction_FastCall(function, args, 2); - } - #endif - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - Py_INCREF(function); - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); -done: - return result; -} - -/* GetTopmostException */ - #if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ - #if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - #endif - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -#endif - -/* PyErrExceptionMatches */ - #if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; icurexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* GetException */ - #if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* RaiseException */ - #if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, - CYTHON_UNUSED PyObject *cause) { - __Pyx_PyThreadState_declare - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { -#if CYTHON_COMPILING_IN_PYPY - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* TypeImport */ - #ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, - size_t size, enum __Pyx_ImportType_CheckSize check_size) -{ - PyObject *result = 0; - char warning[200]; - Py_ssize_t basicsize; -#ifdef Py_LIMITED_API - PyObject *py_basicsize; -#endif - result = PyObject_GetAttrString(module, class_name); - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%.200s.%.200s is not a type object", - module_name, class_name); - goto bad; - } -#ifndef Py_LIMITED_API - basicsize = ((PyTypeObject *)result)->tp_basicsize; -#else - py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); - if (!py_basicsize) - goto bad; - basicsize = PyLong_AsSsize_t(py_basicsize); - Py_DECREF(py_basicsize); - py_basicsize = 0; - if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) - goto bad; -#endif - if ((size_t)basicsize < size) { - PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - goto bad; - } - if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { - PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - goto bad; - } - else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(result); - return NULL; -} -#endif - -/* Import */ - static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (!py_import) - goto bad; - #endif - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, 1); - if (!module) { - if (!PyErr_ExceptionMatches(PyExc_ImportError)) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, level); - #endif - } - } -bad: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - Py_XDECREF(empty_list); - Py_XDECREF(empty_dict); - return module; -} - -/* CLineInTraceback */ - #ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ - static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} - -/* AddTraceback */ - #include "compile.h" -#include "frameobject.h" -#include "traceback.h" -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(filename); - #else - py_srcfile = PyUnicode_FromString(filename); - #endif - if (!py_srcfile) goto bad; - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - Py_DECREF(py_funcname); - return py_code; -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; -} -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject *obj = view->obj; - if (!obj) return; - if (PyObject_CheckBuffer(obj)) { - PyBuffer_Release(view); - return; - } - if ((0)) {} - view->obj = NULL; - Py_DECREF(obj); -} -#endif - - - /* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* Declarations */ - #if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -/* Arithmetic */ - #if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - #if 1 - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - if (b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); - } else if (fabsf(b.real) >= fabsf(b.imag)) { - if (b.real == 0 && b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); - } else { - float r = b.imag / b.real; - float s = (float)(1.0) / (b.real + b.imag * r); - return __pyx_t_float_complex_from_parts( - (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); - } - } else { - float r = b.real / b.imag; - float s = (float)(1.0) / (b.imag + b.real * r); - return __pyx_t_float_complex_from_parts( - (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); - } - } - #else - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - if (b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); - } else { - float denom = b.real * b.real + b.imag * b.imag; - return __pyx_t_float_complex_from_parts( - (a.real * b.real + a.imag * b.imag) / denom, - (a.imag * b.real - a.real * b.imag) / denom); - } - } - #endif - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - return __Pyx_c_prod_float(a, a); - case 3: - z = __Pyx_c_prod_float(a, a); - return __Pyx_c_prod_float(z, a); - case 4: - z = __Pyx_c_prod_float(a, a); - return __Pyx_c_prod_float(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } else if (b.imag == 0) { - z.real = powf(a.real, b.real); - z.imag = 0; - return z; - } else if (a.real > 0) { - r = a.real; - theta = 0; - } else { - r = -a.real; - theta = atan2f(0.0, -1.0); - } - } else { - r = __Pyx_c_abs_float(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -/* Declarations */ - #if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -/* Arithmetic */ - #if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - #if 1 - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - if (b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); - } else if (fabs(b.real) >= fabs(b.imag)) { - if (b.real == 0 && b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); - } else { - double r = b.imag / b.real; - double s = (double)(1.0) / (b.real + b.imag * r); - return __pyx_t_double_complex_from_parts( - (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); - } - } else { - double r = b.real / b.imag; - double s = (double)(1.0) / (b.imag + b.real * r); - return __pyx_t_double_complex_from_parts( - (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); - } - } - #else - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - if (b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); - } else { - double denom = b.real * b.real + b.imag * b.imag; - return __pyx_t_double_complex_from_parts( - (a.real * b.real + a.imag * b.imag) / denom, - (a.imag * b.real - a.real * b.imag) / denom); - } - } - #endif - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - return __Pyx_c_prod_double(a, a); - case 3: - z = __Pyx_c_prod_double(a, a); - return __Pyx_c_prod_double(z, a); - case 4: - z = __Pyx_c_prod_double(a, a); - return __Pyx_c_prod_double(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } else if (b.imag == 0) { - z.real = pow(a.real, b.real); - z.imag = 0; - return z; - } else if (a.real > 0) { - r = a.real; - theta = 0; - } else { - r = -a.real; - theta = atan2(0.0, -1.0); - } - } else { - r = __Pyx_c_abs_double(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -/* CIntFromPy */ - static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(unsigned int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (unsigned int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (unsigned int) 0; - case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) - case 2: - if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { - return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { - return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { - return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (unsigned int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(unsigned int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (unsigned int) 0; - case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) - case -2: - if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { - return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { - return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { - return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { - return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { - return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { - return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(unsigned int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - unsigned int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (unsigned int) -1; - } - } else { - unsigned int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (unsigned int) -1; - val = __Pyx_PyInt_As_unsigned_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to unsigned int"); - return (unsigned int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to unsigned int"); - return (unsigned int) -1; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(unsigned int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(unsigned int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(unsigned int), - little, !is_unsigned); - } -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(long) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(long) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if (sizeof(long) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* FastTypeChecks */ - #if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = a->tp_base; - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; - if (!res) { - res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } - return res; -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; ip) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type %.200s). " - "The ability to return an instance of a strict subclass of int " - "is deprecated, and may be removed in a future version of Python.", - Py_TYPE(result)->tp_name)) { - Py_DECREF(result); - return NULL; - } - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - type_name, type_name, Py_TYPE(result)->tp_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -#endif /* Py_PYTHON_H */ diff --git a/face_recognition1/face_detect/utils/nms/cpu_nms.cpython-36m-x86_64-linux-gnu.so b/face_recognition1/face_detect/utils/nms/cpu_nms.cpython-36m-x86_64-linux-gnu.so deleted file mode 100644 index 008bb0946cc6a6ac1f76cd186f23fce4e4128709..0000000000000000000000000000000000000000 Binary files a/face_recognition1/face_detect/utils/nms/cpu_nms.cpython-36m-x86_64-linux-gnu.so and /dev/null differ diff --git a/face_recognition1/face_detect/utils/nms/cpu_nms.pyx b/face_recognition1/face_detect/utils/nms/cpu_nms.pyx deleted file mode 100644 index 44aaed85b6c47d822a86cf92838e86e5fdf05e90..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms/cpu_nms.pyx +++ /dev/null @@ -1,156 +0,0 @@ -import numpy as np -cimport numpy as np - -cdef inline np.float32_t max(np.float32_t a, np.float32_t b): - return a if a >= b else b - -cdef inline np.float32_t min(np.float32_t a, np.float32_t b): - return a if a <= b else b - -def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): - cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] - cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] - cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] - cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] - cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] - - cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) - cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1] - - cdef int ndets = dets.shape[0] - cdef np.ndarray[np.int_t, ndim=1] suppressed = \ - np.zeros((ndets), dtype=np.int) - - # nominal indices - cdef int _i, _j - # sorted indices - cdef int i, j - # temp variables for box i's (the box currently under consideration) - cdef np.float32_t ix1, iy1, ix2, iy2, iarea - # variables for computing overlap with box j (lower scoring box) - cdef np.float32_t xx1, yy1, xx2, yy2 - cdef np.float32_t w, h - cdef np.float32_t inter, ovr - - keep = [] - for _i in range(ndets): - i = order[_i] - if suppressed[i] == 1: - continue - keep.append(i) - ix1 = x1[i] - iy1 = y1[i] - ix2 = x2[i] - iy2 = y2[i] - iarea = areas[i] - for _j in range(_i + 1, ndets): - j = order[_j] - if suppressed[j] == 1: - continue - xx1 = max(ix1, x1[j]) - yy1 = max(iy1, y1[j]) - xx2 = min(ix2, x2[j]) - yy2 = min(iy2, y2[j]) - w = max(0.0, xx2 - xx1 + 1) - h = max(0.0, yy2 - yy1 + 1) - inter = w * h - ovr = inter / (iarea + areas[j] - inter) - if ovr >= thresh: - suppressed[j] = 1 - - return keep - -def cpu_soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0): - cdef unsigned int N = boxes.shape[0] - cdef float iw, ih, box_area - cdef float ua - cdef int pos = 0 - cdef float maxscore = 0 - cdef int maxpos = 0 - cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov - - for i in range(N): - maxscore = boxes[i, 4] - maxpos = i - - tx1 = boxes[i,0] - ty1 = boxes[i,1] - tx2 = boxes[i,2] - ty2 = boxes[i,3] - ts = boxes[i,4] - - pos = i + 1 - # get max box - while pos < N: - if maxscore < boxes[pos, 4]: - maxscore = boxes[pos, 4] - maxpos = pos - pos = pos + 1 - - # add max box as a detection - boxes[i,0] = boxes[maxpos,0] - boxes[i,1] = boxes[maxpos,1] - boxes[i,2] = boxes[maxpos,2] - boxes[i,3] = boxes[maxpos,3] - boxes[i,4] = boxes[maxpos,4] - - # swap ith box with position of max box - boxes[maxpos,0] = tx1 - boxes[maxpos,1] = ty1 - boxes[maxpos,2] = tx2 - boxes[maxpos,3] = ty2 - boxes[maxpos,4] = ts - - tx1 = boxes[i,0] - ty1 = boxes[i,1] - tx2 = boxes[i,2] - ty2 = boxes[i,3] - ts = boxes[i,4] - - pos = i + 1 - # NMS iterations, note that N changes if detection boxes fall below threshold - while pos < N: - x1 = boxes[pos, 0] - y1 = boxes[pos, 1] - x2 = boxes[pos, 2] - y2 = boxes[pos, 3] - s = boxes[pos, 4] - - area = (x2 - x1 + 1) * (y2 - y1 + 1) - iw = (min(tx2, x2) - max(tx1, x1) + 1) - if iw > 0: - ih = (min(ty2, y2) - max(ty1, y1) + 1) - if ih > 0: - ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) - ov = iw * ih / ua #iou between max box and detection box - - if method == 1: # linear - if ov > Nt: - weight = 1 - ov - else: - weight = 1 - elif method == 2: # gaussian - weight = np.exp(-(ov * ov)/sigma) - else: # original NMS - if ov > Nt: - weight = 0 - else: - weight = 1 - - boxes[pos, 4] = weight*boxes[pos, 4] - - # if box score falls below threshold, discard the box by swapping with last box - # update N - if boxes[pos, 4] < threshold: - boxes[pos,0] = boxes[N-1, 0] - boxes[pos,1] = boxes[N-1, 1] - boxes[pos,2] = boxes[N-1, 2] - boxes[pos,3] = boxes[N-1, 3] - boxes[pos,4] = boxes[N-1, 4] - N = N - 1 - pos = pos - 1 - - pos = pos + 1 - - keep = [i for i in range(N)] - return keep diff --git a/face_recognition1/face_detect/utils/nms/gpu_nms.cpp b/face_recognition1/face_detect/utils/nms/gpu_nms.cpp deleted file mode 100644 index 871c3f7b29fa1997006a871c3b896654f2cabe21..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms/gpu_nms.cpp +++ /dev/null @@ -1,6899 +0,0 @@ -/* Generated by Cython 0.29.24 */ - -#ifndef PY_SSIZE_T_CLEAN -#define PY_SSIZE_T_CLEAN -#endif /* PY_SSIZE_T_CLEAN */ -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.6+ or Python 3.3+. -#else -#define CYTHON_ABI "0_29_24" -#define CYTHON_HEX_VERSION 0x001D18F0 -#define CYTHON_FUTURE_DIVISION 0 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) - #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #include "longintrepr.h" - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef __cplusplus - #error "Cython files generated with the C++ option must be compiled with a C++ compiler." -#endif -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #else - #define CYTHON_INLINE inline - #endif -#endif -template -void __Pyx_call_destructor(T& x) { - x.~T(); -} -template -class __Pyx_FakeReference { - public: - __Pyx_FakeReference() : ptr(NULL) { } - __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } - T *operator->() { return ptr; } - T *operator&() { return ptr; } - operator T&() { return *ptr; } - template bool operator ==(U other) { return *ptr == other; } - template bool operator !=(U other) { return *ptr != other; } - private: - T *ptr; -}; - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) - #define __Pyx_DefaultClassType PyClass_Type -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif - #define __Pyx_DefaultClassType PyType_Type -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func)\ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -#else -#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) -#endif -#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #if defined(PyUnicode_IS_READY) - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #else - #define __Pyx_PyUnicode_READY(op) (0) - #endif - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #endif - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__nms__gpu_nms -#define __PYX_HAVE_API__nms__gpu_nms -/* Early includes */ -#include -#include -#include "numpy/arrayobject.h" -#include "numpy/ufuncobject.h" - - /* NumPy API declarations from "numpy/__init__.pxd" */ - -#include "gpu_nms.hpp" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - -/* Header.proto */ -#if !defined(CYTHON_CCOMPLEX) - #if defined(__cplusplus) - #define CYTHON_CCOMPLEX 1 - #elif defined(_Complex_I) - #define CYTHON_CCOMPLEX 1 - #else - #define CYTHON_CCOMPLEX 0 - #endif -#endif -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #include - #else - #include - #endif -#endif -#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) - #undef _Complex_I - #define _Complex_I 1.0fj -#endif - - -static const char *__pyx_f[] = { - "nms/gpu_nms.pyx", - "__init__.pxd", - "type.pxd", -}; -/* BufferFormatStructs.proto */ -#define IS_UNSIGNED(type) (((type) -1) > 0) -struct __Pyx_StructField_; -#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - size_t arraysize[8]; - int ndim; - char typegroup; - char is_unsigned; - int flags; -} __Pyx_TypeInfo; -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - size_t struct_alignment; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; - char is_valid_array; -} __Pyx_BufFmt_Context; - - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":689 - * # in Cython to enable them only on the right systems. - * - * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - */ -typedef npy_int8 __pyx_t_5numpy_int8_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":690 - * - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t - */ -typedef npy_int16 __pyx_t_5numpy_int16_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":691 - * ctypedef npy_int8 int8_t - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< - * ctypedef npy_int64 int64_t - * #ctypedef npy_int96 int96_t - */ -typedef npy_int32 __pyx_t_5numpy_int32_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":692 - * ctypedef npy_int16 int16_t - * ctypedef npy_int32 int32_t - * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< - * #ctypedef npy_int96 int96_t - * #ctypedef npy_int128 int128_t - */ -typedef npy_int64 __pyx_t_5numpy_int64_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":696 - * #ctypedef npy_int128 int128_t - * - * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - */ -typedef npy_uint8 __pyx_t_5numpy_uint8_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":697 - * - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t - */ -typedef npy_uint16 __pyx_t_5numpy_uint16_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":698 - * ctypedef npy_uint8 uint8_t - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< - * ctypedef npy_uint64 uint64_t - * #ctypedef npy_uint96 uint96_t - */ -typedef npy_uint32 __pyx_t_5numpy_uint32_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":699 - * ctypedef npy_uint16 uint16_t - * ctypedef npy_uint32 uint32_t - * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< - * #ctypedef npy_uint96 uint96_t - * #ctypedef npy_uint128 uint128_t - */ -typedef npy_uint64 __pyx_t_5numpy_uint64_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":703 - * #ctypedef npy_uint128 uint128_t - * - * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< - * ctypedef npy_float64 float64_t - * #ctypedef npy_float80 float80_t - */ -typedef npy_float32 __pyx_t_5numpy_float32_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":704 - * - * ctypedef npy_float32 float32_t - * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< - * #ctypedef npy_float80 float80_t - * #ctypedef npy_float128 float128_t - */ -typedef npy_float64 __pyx_t_5numpy_float64_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":713 - * # The int types are mapped a bit surprising -- - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t - */ -typedef npy_long __pyx_t_5numpy_int_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":714 - * # numpy.int corresponds to 'l' and numpy.long to 'q' - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< - * ctypedef npy_longlong longlong_t - * - */ -typedef npy_longlong __pyx_t_5numpy_long_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":715 - * ctypedef npy_long int_t - * ctypedef npy_longlong long_t - * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_ulong uint_t - */ -typedef npy_longlong __pyx_t_5numpy_longlong_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":717 - * ctypedef npy_longlong longlong_t - * - * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t - */ -typedef npy_ulong __pyx_t_5numpy_uint_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":718 - * - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< - * ctypedef npy_ulonglong ulonglong_t - * - */ -typedef npy_ulonglong __pyx_t_5numpy_ulong_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":719 - * ctypedef npy_ulong uint_t - * ctypedef npy_ulonglong ulong_t - * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< - * - * ctypedef npy_intp intp_t - */ -typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":721 - * ctypedef npy_ulonglong ulonglong_t - * - * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< - * ctypedef npy_uintp uintp_t - * - */ -typedef npy_intp __pyx_t_5numpy_intp_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":722 - * - * ctypedef npy_intp intp_t - * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< - * - * ctypedef npy_double float_t - */ -typedef npy_uintp __pyx_t_5numpy_uintp_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":724 - * ctypedef npy_uintp uintp_t - * - * ctypedef npy_double float_t # <<<<<<<<<<<<<< - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t - */ -typedef npy_double __pyx_t_5numpy_float_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":725 - * - * ctypedef npy_double float_t - * ctypedef npy_double double_t # <<<<<<<<<<<<<< - * ctypedef npy_longdouble longdouble_t - * - */ -typedef npy_double __pyx_t_5numpy_double_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":726 - * ctypedef npy_double float_t - * ctypedef npy_double double_t - * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cfloat cfloat_t - */ -typedef npy_longdouble __pyx_t_5numpy_longdouble_t; -/* Declarations.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< float > __pyx_t_float_complex; - #else - typedef float _Complex __pyx_t_float_complex; - #endif -#else - typedef struct { float real, imag; } __pyx_t_float_complex; -#endif -static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); - -/* Declarations.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - typedef ::std::complex< double > __pyx_t_double_complex; - #else - typedef double _Complex __pyx_t_double_complex; - #endif -#else - typedef struct { double real, imag; } __pyx_t_double_complex; -#endif -static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); - - -/*--- Type declarations ---*/ - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":728 - * ctypedef npy_longdouble longdouble_t - * - * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t - */ -typedef npy_cfloat __pyx_t_5numpy_cfloat_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":729 - * - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< - * ctypedef npy_clongdouble clongdouble_t - * - */ -typedef npy_cdouble __pyx_t_5numpy_cdouble_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":730 - * ctypedef npy_cfloat cfloat_t - * ctypedef npy_cdouble cdouble_t - * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< - * - * ctypedef npy_cdouble complex_t - */ -typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":732 - * ctypedef npy_clongdouble clongdouble_t - * - * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew1(a): - */ -typedef npy_cdouble __pyx_t_5numpy_complex_t; - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ - const char* function_name); - -/* ArgTypeTest.proto */ -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ - ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ - __Pyx__ArgTypeTest(obj, type, name, exact)) -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); - -/* IsLittleEndian.proto */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); - -/* BufferFormatCheck.proto */ -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type); - -/* BufferGetAndValidate.proto */ -#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ - ((obj == Py_None || obj == NULL) ?\ - (__Pyx_ZeroBuffer(buf), 0) :\ - __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) -static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, - __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); -static void __Pyx_ZeroBuffer(Py_buffer* buf); -static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); -static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; -static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* ExtTypeTest.proto */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectCallNoArg.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); -#else -#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) -#endif - -/* PyCFunctionFastCall.proto */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* BufferIndexError.proto */ -static void __Pyx_RaiseBufferIndexError(int axis); - -#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) -#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) -/* BufferFallbackError.proto */ -static void __Pyx_RaiseBufferFallbackError(void); - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* TypeImport.proto */ -#ifndef __PYX_HAVE_RT_ImportType_proto -#define __PYX_HAVE_RT_ImportType_proto -enum __Pyx_ImportType_CheckSize { - __Pyx_ImportType_CheckSize_Error = 0, - __Pyx_ImportType_CheckSize_Warn = 1, - __Pyx_ImportType_CheckSize_Ignore = 2 -}; -static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -/* BufferStructDeclare.proto */ -typedef struct { - Py_ssize_t shape, strides, suboffsets; -} __Pyx_Buf_DimInfo; -typedef struct { - size_t refcount; - Py_buffer pybuffer; -} __Pyx_Buffer; -typedef struct { - __Pyx_Buffer *rcbuffer; - char *data; - __Pyx_Buf_DimInfo diminfo[8]; -} __Pyx_LocalBuf_ND; - -#if PY_MAJOR_VERSION < 3 - static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); - static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else - #define __Pyx_GetBuffer PyObject_GetBuffer - #define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - - -/* GCCDiagnostics.proto */ -#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) -#define __Pyx_HAS_GCC_DIAGNOSTIC -#endif - -/* RealImag.proto */ -#if CYTHON_CCOMPLEX - #ifdef __cplusplus - #define __Pyx_CREAL(z) ((z).real()) - #define __Pyx_CIMAG(z) ((z).imag()) - #else - #define __Pyx_CREAL(z) (__real__(z)) - #define __Pyx_CIMAG(z) (__imag__(z)) - #endif -#else - #define __Pyx_CREAL(z) ((z).real) - #define __Pyx_CIMAG(z) ((z).imag) -#endif -#if defined(__cplusplus) && CYTHON_CCOMPLEX\ - && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) - #define __Pyx_SET_CREAL(z,x) ((z).real(x)) - #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) -#else - #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) - #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) -#endif - -/* Arithmetic.proto */ -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq_float(a, b) ((a)==(b)) - #define __Pyx_c_sum_float(a, b) ((a)+(b)) - #define __Pyx_c_diff_float(a, b) ((a)-(b)) - #define __Pyx_c_prod_float(a, b) ((a)*(b)) - #define __Pyx_c_quot_float(a, b) ((a)/(b)) - #define __Pyx_c_neg_float(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero_float(z) ((z)==(float)0) - #define __Pyx_c_conj_float(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs_float(z) (::std::abs(z)) - #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero_float(z) ((z)==0) - #define __Pyx_c_conj_float(z) (conjf(z)) - #if 1 - #define __Pyx_c_abs_float(z) (cabsf(z)) - #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); - static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); - #if 1 - static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); - #endif -#endif - -/* Arithmetic.proto */ -#if CYTHON_CCOMPLEX - #define __Pyx_c_eq_double(a, b) ((a)==(b)) - #define __Pyx_c_sum_double(a, b) ((a)+(b)) - #define __Pyx_c_diff_double(a, b) ((a)-(b)) - #define __Pyx_c_prod_double(a, b) ((a)*(b)) - #define __Pyx_c_quot_double(a, b) ((a)/(b)) - #define __Pyx_c_neg_double(a) (-(a)) - #ifdef __cplusplus - #define __Pyx_c_is_zero_double(z) ((z)==(double)0) - #define __Pyx_c_conj_double(z) (::std::conj(z)) - #if 1 - #define __Pyx_c_abs_double(z) (::std::abs(z)) - #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) - #endif - #else - #define __Pyx_c_is_zero_double(z) ((z)==0) - #define __Pyx_c_conj_double(z) (conj(z)) - #if 1 - #define __Pyx_c_abs_double(z) (cabs(z)) - #define __Pyx_c_pow_double(a, b) (cpow(a, b)) - #endif - #endif -#else - static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); - static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); - #if 1 - static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); - #endif -#endif - -/* CIntFromPy.proto */ -static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - - -/* Module declarations from 'cpython.buffer' */ - -/* Module declarations from 'libc.string' */ - -/* Module declarations from 'libc.stdio' */ - -/* Module declarations from '__builtin__' */ - -/* Module declarations from 'cpython.type' */ -static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; - -/* Module declarations from 'cpython' */ - -/* Module declarations from 'cpython.object' */ - -/* Module declarations from 'cpython.ref' */ - -/* Module declarations from 'cpython.mem' */ - -/* Module declarations from 'numpy' */ - -/* Module declarations from 'numpy' */ -static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; -static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; -static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; -static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; -static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; - -/* Module declarations from 'nms.gpu_nms' */ -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int32_t), 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int_t = { "int_t", NULL, sizeof(__pyx_t_5numpy_int_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int_t), 0 }; -#define __Pyx_MODULE_NAME "nms.gpu_nms" -extern int __pyx_module_is_main_nms__gpu_nms; -int __pyx_module_is_main_nms__gpu_nms = 0; - -/* Implementation of 'nms.gpu_nms' */ -static PyObject *__pyx_builtin_ImportError; -static const char __pyx_k_np[] = "np"; -static const char __pyx_k_dets[] = "dets"; -static const char __pyx_k_keep[] = "keep"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_name[] = "__name__"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_dtype[] = "dtype"; -static const char __pyx_k_int32[] = "int32"; -static const char __pyx_k_numpy[] = "numpy"; -static const char __pyx_k_order[] = "order"; -static const char __pyx_k_zeros[] = "zeros"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_scores[] = "scores"; -static const char __pyx_k_thresh[] = "thresh"; -static const char __pyx_k_argsort[] = "argsort"; -static const char __pyx_k_gpu_nms[] = "gpu_nms"; -static const char __pyx_k_num_out[] = "num_out"; -static const char __pyx_k_boxes_dim[] = "boxes_dim"; -static const char __pyx_k_boxes_num[] = "boxes_num"; -static const char __pyx_k_device_id[] = "device_id"; -static const char __pyx_k_ImportError[] = "ImportError"; -static const char __pyx_k_nms_gpu_nms[] = "nms.gpu_nms"; -static const char __pyx_k_sorted_dets[] = "sorted_dets"; -static const char __pyx_k_nms_gpu_nms_pyx[] = "nms/gpu_nms.pyx"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; -static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; -static PyObject *__pyx_n_s_ImportError; -static PyObject *__pyx_n_s_argsort; -static PyObject *__pyx_n_s_boxes_dim; -static PyObject *__pyx_n_s_boxes_num; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_n_s_dets; -static PyObject *__pyx_n_s_device_id; -static PyObject *__pyx_n_s_dtype; -static PyObject *__pyx_n_s_gpu_nms; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_int32; -static PyObject *__pyx_n_s_keep; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_nms_gpu_nms; -static PyObject *__pyx_kp_s_nms_gpu_nms_pyx; -static PyObject *__pyx_n_s_np; -static PyObject *__pyx_n_s_num_out; -static PyObject *__pyx_n_s_numpy; -static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; -static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; -static PyObject *__pyx_n_s_order; -static PyObject *__pyx_n_s_scores; -static PyObject *__pyx_n_s_sorted_dets; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_n_s_thresh; -static PyObject *__pyx_n_s_zeros; -static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id); /* proto */ -static PyObject *__pyx_int_4; -static PyObject *__pyx_int_neg_1; -static PyObject *__pyx_slice_; -static PyObject *__pyx_slice__3; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__4; -static PyObject *__pyx_tuple__5; -static PyObject *__pyx_tuple__6; -static PyObject *__pyx_codeobj__7; -/* Late includes */ - -/* "nms/gpu_nms.pyx":16 - * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) - * - * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< - * np.int32_t device_id=0): - * cdef int boxes_num = dets.shape[0] - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_3nms_7gpu_nms_1gpu_nms = {"gpu_nms", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3nms_7gpu_nms_1gpu_nms, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyArrayObject *__pyx_v_dets = 0; - PyObject *__pyx_v_thresh = 0; - __pyx_t_5numpy_int32_t __pyx_v_device_id; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("gpu_nms (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,&__pyx_n_s_device_id,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_device_id); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gpu_nms") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_dets = ((PyArrayObject *)values[0]); - __pyx_v_thresh = ((PyObject*)values[1]); - if (values[2]) { - __pyx_v_device_id = __Pyx_PyInt_As_npy_int32(values[2]); if (unlikely((__pyx_v_device_id == ((npy_int32)-1)) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error) - } else { - __pyx_v_device_id = ((__pyx_t_5numpy_int32_t)0); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 16, __pyx_L1_error) - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 16, __pyx_L1_error) - __pyx_r = __pyx_pf_3nms_7gpu_nms_gpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh, __pyx_v_device_id); - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id) { - int __pyx_v_boxes_num; - int __pyx_v_boxes_dim; - int __pyx_v_num_out; - PyArrayObject *__pyx_v_keep = 0; - PyArrayObject *__pyx_v_scores = 0; - PyArrayObject *__pyx_v_order = 0; - PyArrayObject *__pyx_v_sorted_dets = 0; - __Pyx_LocalBuf_ND __pyx_pybuffernd_dets; - __Pyx_Buffer __pyx_pybuffer_dets; - __Pyx_LocalBuf_ND __pyx_pybuffernd_keep; - __Pyx_Buffer __pyx_pybuffer_keep; - __Pyx_LocalBuf_ND __pyx_pybuffernd_order; - __Pyx_Buffer __pyx_pybuffer_order; - __Pyx_LocalBuf_ND __pyx_pybuffernd_scores; - __Pyx_Buffer __pyx_pybuffer_scores; - __Pyx_LocalBuf_ND __pyx_pybuffernd_sorted_dets; - __Pyx_Buffer __pyx_pybuffer_sorted_dets; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyArrayObject *__pyx_t_6 = NULL; - PyArrayObject *__pyx_t_7 = NULL; - PyArrayObject *__pyx_t_8 = NULL; - PyArrayObject *__pyx_t_9 = NULL; - Py_ssize_t __pyx_t_10; - int __pyx_t_11; - Py_ssize_t __pyx_t_12; - Py_ssize_t __pyx_t_13; - float __pyx_t_14; - PyObject *__pyx_t_15 = NULL; - PyObject *__pyx_t_16 = NULL; - PyObject *__pyx_t_17 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("gpu_nms", 0); - __pyx_pybuffer_keep.pybuffer.buf = NULL; - __pyx_pybuffer_keep.refcount = 0; - __pyx_pybuffernd_keep.data = NULL; - __pyx_pybuffernd_keep.rcbuffer = &__pyx_pybuffer_keep; - __pyx_pybuffer_scores.pybuffer.buf = NULL; - __pyx_pybuffer_scores.refcount = 0; - __pyx_pybuffernd_scores.data = NULL; - __pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores; - __pyx_pybuffer_order.pybuffer.buf = NULL; - __pyx_pybuffer_order.refcount = 0; - __pyx_pybuffernd_order.data = NULL; - __pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order; - __pyx_pybuffer_sorted_dets.pybuffer.buf = NULL; - __pyx_pybuffer_sorted_dets.refcount = 0; - __pyx_pybuffernd_sorted_dets.data = NULL; - __pyx_pybuffernd_sorted_dets.rcbuffer = &__pyx_pybuffer_sorted_dets; - __pyx_pybuffer_dets.pybuffer.buf = NULL; - __pyx_pybuffer_dets.refcount = 0; - __pyx_pybuffernd_dets.data = NULL; - __pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets; - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error) - } - __pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1]; - - /* "nms/gpu_nms.pyx":18 - * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, - * np.int32_t device_id=0): - * cdef int boxes_num = dets.shape[0] # <<<<<<<<<<<<<< - * cdef int boxes_dim = dets.shape[1] - * cdef int num_out - */ - __pyx_v_boxes_num = (__pyx_v_dets->dimensions[0]); - - /* "nms/gpu_nms.pyx":19 - * np.int32_t device_id=0): - * cdef int boxes_num = dets.shape[0] - * cdef int boxes_dim = dets.shape[1] # <<<<<<<<<<<<<< - * cdef int num_out - * cdef np.ndarray[np.int32_t, ndim=1] \ - */ - __pyx_v_boxes_dim = (__pyx_v_dets->dimensions[1]); - - /* "nms/gpu_nms.pyx":22 - * cdef int num_out - * cdef np.ndarray[np.int32_t, ndim=1] \ - * keep = np.zeros(boxes_num, dtype=np.int32) # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=1] \ - * scores = dets[:, 4] - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_boxes_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error) - __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_keep = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 21, __pyx_L1_error) - } else {__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_6 = 0; - __pyx_v_keep = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "nms/gpu_nms.pyx":24 - * keep = np.zeros(boxes_num, dtype=np.int32) - * cdef np.ndarray[np.float32_t, ndim=1] \ - * scores = dets[:, 4] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.int_t, ndim=1] \ - * order = scores.argsort()[::-1] - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error) - __pyx_t_7 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 23, __pyx_L1_error) - } else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_7 = 0; - __pyx_v_scores = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "nms/gpu_nms.pyx":26 - * scores = dets[:, 4] - * cdef np.ndarray[np.int_t, ndim=1] \ - * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=2] \ - * sorted_dets = dets[order, :] - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - } - } - __pyx_t_5 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_slice__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 26, __pyx_L1_error) - __pyx_t_8 = ((PyArrayObject *)__pyx_t_1); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - __pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 25, __pyx_L1_error) - } else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0]; - } - } - __pyx_t_8 = 0; - __pyx_v_order = ((PyArrayObject *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "nms/gpu_nms.pyx":28 - * order = scores.argsort()[::-1] - * cdef np.ndarray[np.float32_t, ndim=2] \ - * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< - * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) - * keep = keep[:num_out] - */ - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)__pyx_v_order)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_order)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_order)); - __Pyx_INCREF(__pyx_slice_); - __Pyx_GIVEREF(__pyx_slice_); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice_); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error) - __pyx_t_9 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { - __pyx_v_sorted_dets = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf = NULL; - __PYX_ERR(0, 27, __pyx_L1_error) - } else {__pyx_pybuffernd_sorted_dets.diminfo[0].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sorted_dets.diminfo[0].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sorted_dets.diminfo[1].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sorted_dets.diminfo[1].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[1]; - } - } - __pyx_t_9 = 0; - __pyx_v_sorted_dets = ((PyArrayObject *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "nms/gpu_nms.pyx":29 - * cdef np.ndarray[np.float32_t, ndim=2] \ - * sorted_dets = dets[order, :] - * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) # <<<<<<<<<<<<<< - * keep = keep[:num_out] - * return list(order[keep]) - */ - __pyx_t_10 = 0; - __pyx_t_11 = -1; - if (__pyx_t_10 < 0) { - __pyx_t_10 += __pyx_pybuffernd_keep.diminfo[0].shape; - if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0; - } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_keep.diminfo[0].shape)) __pyx_t_11 = 0; - if (unlikely(__pyx_t_11 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_11); - __PYX_ERR(0, 29, __pyx_L1_error) - } - __pyx_t_12 = 0; - __pyx_t_13 = 0; - __pyx_t_11 = -1; - if (__pyx_t_12 < 0) { - __pyx_t_12 += __pyx_pybuffernd_sorted_dets.diminfo[0].shape; - if (unlikely(__pyx_t_12 < 0)) __pyx_t_11 = 0; - } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_sorted_dets.diminfo[0].shape)) __pyx_t_11 = 0; - if (__pyx_t_13 < 0) { - __pyx_t_13 += __pyx_pybuffernd_sorted_dets.diminfo[1].shape; - if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; - } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_sorted_dets.diminfo[1].shape)) __pyx_t_11 = 1; - if (unlikely(__pyx_t_11 != -1)) { - __Pyx_RaiseBufferIndexError(__pyx_t_11); - __PYX_ERR(0, 29, __pyx_L1_error) - } - __pyx_t_14 = __pyx_PyFloat_AsFloat(__pyx_v_thresh); if (unlikely((__pyx_t_14 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L1_error) - _nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_keep.diminfo[0].strides))), (&__pyx_v_num_out), (&(*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_sorted_dets.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_sorted_dets.diminfo[1].strides))), __pyx_v_boxes_num, __pyx_v_boxes_dim, __pyx_t_14, __pyx_v_device_id); - - /* "nms/gpu_nms.pyx":30 - * sorted_dets = dets[order, :] - * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) - * keep = keep[:num_out] # <<<<<<<<<<<<<< - * return list(order[keep]) - */ - __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_num_out); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PySlice_New(Py_None, __pyx_t_5, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_keep), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 30, __pyx_L1_error) - __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); - { - __Pyx_BufFmt_StackElem __pyx_stack[1]; - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); - __pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); - if (unlikely(__pyx_t_11 < 0)) { - PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17); - if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_v_keep, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { - Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17); - __Pyx_RaiseBufferFallbackError(); - } else { - PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17); - } - __pyx_t_15 = __pyx_t_16 = __pyx_t_17 = 0; - } - __pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; - if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 30, __pyx_L1_error) - } - __pyx_t_6 = 0; - __Pyx_DECREF_SET(__pyx_v_keep, ((PyArrayObject *)__pyx_t_5)); - __pyx_t_5 = 0; - - /* "nms/gpu_nms.pyx":31 - * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) - * keep = keep[:num_out] - * return list(order[keep]) # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_5 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_order), ((PyObject *)__pyx_v_keep)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PySequence_List(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "nms/gpu_nms.pyx":16 - * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) - * - * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< - * np.int32_t device_id=0): - * cdef int boxes_num = dets.shape[0] - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); - __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} - __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - goto __pyx_L2; - __pyx_L0:; - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); - __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); - __pyx_L2:; - __Pyx_XDECREF((PyObject *)__pyx_v_keep); - __Pyx_XDECREF((PyObject *)__pyx_v_scores); - __Pyx_XDECREF((PyObject *)__pyx_v_order); - __Pyx_XDECREF((PyObject *)__pyx_v_sorted_dets); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":734 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":735 - * - * cdef inline object PyArray_MultiIterNew1(a): - * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew2(a, b): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 735, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":734 - * ctypedef npy_cdouble complex_t - * - * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(1, a) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":737 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":738 - * - * cdef inline object PyArray_MultiIterNew2(a, b): - * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 738, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":737 - * return PyArray_MultiIterNew(1, a) - * - * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(2, a, b) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":740 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":741 - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): - * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 741, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":740 - * return PyArray_MultiIterNew(2, a, b) - * - * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(3, a, b, c) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":743 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":744 - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): - * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 744, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":743 - * return PyArray_MultiIterNew(3, a, b, c) - * - * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(4, a, b, c, d) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":746 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":747 - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 747, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":746 - * return PyArray_MultiIterNew(4, a, b, c, d) - * - * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":749 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< - * if PyDataType_HASSUBARRAY(d): - * return d.subarray.shape - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":750 - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< - * return d.subarray.shape - * else: - */ - __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); - if (__pyx_t_1) { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":751 - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): - * return d.subarray.shape # <<<<<<<<<<<<<< - * else: - * return () - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); - __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":750 - * - * cdef inline tuple PyDataType_SHAPE(dtype d): - * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< - * return d.subarray.shape - * else: - */ - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":753 - * return d.subarray.shape - * else: - * return () # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_empty_tuple); - __pyx_r = __pyx_empty_tuple; - goto __pyx_L0; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":749 - * return PyArray_MultiIterNew(5, a, b, c, d, e) - * - * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< - * if PyDataType_HASSUBARRAY(d): - * return d.subarray.shape - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":868 - * int _import_umath() except -1 - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) - */ - -static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("set_array_base", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":869 - * - * cdef inline void set_array_base(ndarray arr, object base): - * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< - * PyArray_SetBaseObject(arr, base) - * - */ - Py_INCREF(__pyx_v_base); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":870 - * cdef inline void set_array_base(ndarray arr, object base): - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< - * - * cdef inline object get_array_base(ndarray arr): - */ - (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":868 - * int _import_umath() except -1 - * - * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< - * Py_INCREF(base) # important to do this before stealing the reference below! - * PyArray_SetBaseObject(arr, base) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":872 - * PyArray_SetBaseObject(arr, base) - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * base = PyArray_BASE(arr) - * if base is NULL: - */ - -static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { - PyObject *__pyx_v_base; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("get_array_base", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":873 - * - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< - * if base is NULL: - * return None - */ - __pyx_v_base = PyArray_BASE(__pyx_v_arr); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":874 - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) - * if base is NULL: # <<<<<<<<<<<<<< - * return None - * return base - */ - __pyx_t_1 = ((__pyx_v_base == NULL) != 0); - if (__pyx_t_1) { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":875 - * base = PyArray_BASE(arr) - * if base is NULL: - * return None # <<<<<<<<<<<<<< - * return base - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":874 - * cdef inline object get_array_base(ndarray arr): - * base = PyArray_BASE(arr) - * if base is NULL: # <<<<<<<<<<<<<< - * return None - * return base - */ - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":876 - * if base is NULL: - * return None - * return base # <<<<<<<<<<<<<< - * - * # Versions of the import_* functions which are more suitable for - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_base)); - __pyx_r = ((PyObject *)__pyx_v_base); - goto __pyx_L0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":872 - * PyArray_SetBaseObject(arr, base) - * - * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< - * base = PyArray_BASE(arr) - * if base is NULL: - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":880 - * # Versions of the import_* functions which are more suitable for - * # Cython code. - * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< - * try: - * __pyx_import_array() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_array", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":881 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":882 - * cdef inline int import_array() except -1: - * try: - * __pyx_import_array() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") - */ - __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 882, __pyx_L3_error) - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":881 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":883 - * try: - * __pyx_import_array() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.multiarray failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 883, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":884 - * __pyx_import_array() - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_umath() except -1: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 884, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 884, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":881 - * # Cython code. - * cdef inline int import_array() except -1: - * try: # <<<<<<<<<<<<<< - * __pyx_import_array() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":880 - * # Versions of the import_* functions which are more suitable for - * # Cython code. - * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< - * try: - * __pyx_import_array() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":886 - * raise ImportError("numpy.core.multiarray failed to import") - * - * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_umath", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":887 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":888 - * cdef inline int import_umath() except -1: - * try: - * _import_umath() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.umath failed to import") - */ - __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 888, __pyx_L3_error) - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":887 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":889 - * try: - * _import_umath() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.umath failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 889, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":890 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_ufunc() except -1: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 890, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 890, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":887 - * - * cdef inline int import_umath() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":886 - * raise ImportError("numpy.core.multiarray failed to import") - * - * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":892 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - -static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("import_ufunc", 0); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":893 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":894 - * cdef inline int import_ufunc() except -1: - * try: - * _import_umath() # <<<<<<<<<<<<<< - * except Exception: - * raise ImportError("numpy.core.umath failed to import") - */ - __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 894, __pyx_L3_error) - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":893 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":895 - * try: - * _import_umath() - * except Exception: # <<<<<<<<<<<<<< - * raise ImportError("numpy.core.umath failed to import") - * - */ - __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_4) { - __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 895, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":896 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef extern from *: - */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 896, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_Raise(__pyx_t_8, 0, 0, 0); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(1, 896, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":893 - * - * cdef inline int import_ufunc() except -1: - * try: # <<<<<<<<<<<<<< - * _import_umath() - * except Exception: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L8_try_end:; - } - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":892 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_gpu_nms(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_gpu_nms}, - {0, NULL} -}; -#endif - -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - "gpu_nms", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, - {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, - {&__pyx_n_s_boxes_dim, __pyx_k_boxes_dim, sizeof(__pyx_k_boxes_dim), 0, 0, 1, 1}, - {&__pyx_n_s_boxes_num, __pyx_k_boxes_num, sizeof(__pyx_k_boxes_num), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1}, - {&__pyx_n_s_device_id, __pyx_k_device_id, sizeof(__pyx_k_device_id), 0, 0, 1, 1}, - {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, - {&__pyx_n_s_gpu_nms, __pyx_k_gpu_nms, sizeof(__pyx_k_gpu_nms), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, - {&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_nms_gpu_nms, __pyx_k_nms_gpu_nms, sizeof(__pyx_k_nms_gpu_nms), 0, 0, 1, 1}, - {&__pyx_kp_s_nms_gpu_nms_pyx, __pyx_k_nms_gpu_nms_pyx, sizeof(__pyx_k_nms_gpu_nms_pyx), 0, 0, 1, 0}, - {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, - {&__pyx_n_s_num_out, __pyx_k_num_out, sizeof(__pyx_k_num_out), 0, 0, 1, 1}, - {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, - {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, - {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, - {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, - {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, - {&__pyx_n_s_sorted_dets, __pyx_k_sorted_dets, sizeof(__pyx_k_sorted_dets), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1}, - {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 884, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "nms/gpu_nms.pyx":24 - * keep = np.zeros(boxes_num, dtype=np.int32) - * cdef np.ndarray[np.float32_t, ndim=1] \ - * scores = dets[:, 4] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.int_t, ndim=1] \ - * order = scores.argsort()[::-1] - */ - __pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice_); - __Pyx_GIVEREF(__pyx_slice_); - __pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - - /* "nms/gpu_nms.pyx":26 - * scores = dets[:, 4] - * cdef np.ndarray[np.int_t, ndim=1] \ - * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< - * cdef np.ndarray[np.float32_t, ndim=2] \ - * sorted_dets = dets[order, :] - */ - __pyx_slice__3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__3); - __Pyx_GIVEREF(__pyx_slice__3); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":884 - * __pyx_import_array() - * except Exception: - * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_umath() except -1: - */ - __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 884, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__4); - __Pyx_GIVEREF(__pyx_tuple__4); - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":890 - * _import_umath() - * except Exception: - * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< - * - * cdef inline int import_ufunc() except -1: - */ - __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 890, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__5); - __Pyx_GIVEREF(__pyx_tuple__5); - - /* "nms/gpu_nms.pyx":16 - * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) - * - * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< - * np.int32_t device_id=0): - * cdef int boxes_num = dets.shape[0] - */ - __pyx_tuple__6 = PyTuple_Pack(10, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_device_id, __pyx_n_s_boxes_num, __pyx_n_s_boxes_dim, __pyx_n_s_num_out, __pyx_n_s_keep, __pyx_n_s_scores, __pyx_n_s_order, __pyx_n_s_sorted_dets); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__6); - __Pyx_GIVEREF(__pyx_tuple__6); - __pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__6, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_nms_gpu_nms_pyx, __pyx_n_s_gpu_nms, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", - #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 - sizeof(PyTypeObject), - #else - sizeof(PyHeapTypeObject), - #endif - __Pyx_ImportType_CheckSize_Warn); - if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 199, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 199, __pyx_L1_error) - __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 222, __pyx_L1_error) - __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 226, __pyx_L1_error) - __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 238, __pyx_L1_error) - __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore); - if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 764, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initgpu_nms(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initgpu_nms(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_gpu_nms(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_gpu_nms(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_gpu_nms(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'gpu_nms' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_gpu_nms(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - PyEval_InitThreads(); - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("gpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_nms__gpu_nms) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "nms.gpu_nms")) { - if (unlikely(PyDict_SetItemString(modules, "nms.gpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - (void)__Pyx_modinit_type_init_code(); - if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "nms/gpu_nms.pyx":8 - * # -------------------------------------------------------- - * - * import numpy as np # <<<<<<<<<<<<<< - * cimport numpy as np - * - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "nms/gpu_nms.pyx":11 - * cimport numpy as np - * - * assert sizeof(int) == sizeof(np.int32_t) # <<<<<<<<<<<<<< - * - * cdef extern from "gpu_nms.hpp": - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(!Py_OptimizeFlag)) { - if (unlikely(!(((sizeof(int)) == (sizeof(__pyx_t_5numpy_int32_t))) != 0))) { - PyErr_SetNone(PyExc_AssertionError); - __PYX_ERR(0, 11, __pyx_L1_error) - } - } - #endif - - /* "nms/gpu_nms.pyx":16 - * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) - * - * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< - * np.int32_t device_id=0): - * cdef int boxes_num = dets.shape[0] - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7gpu_nms_1gpu_nms, NULL, __pyx_n_s_nms_gpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_gpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "nms/gpu_nms.pyx":1 - * # -------------------------------------------------------- # <<<<<<<<<<<<<< - * # Faster R-CNN - * # Copyright (c) 2015 Microsoft - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "../../../anaconda3/envs/DrillAndy/lib/python3.6/site-packages/numpy/__init__.pxd":892 - * raise ImportError("numpy.core.umath failed to import") - * - * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< - * try: - * _import_umath() - */ - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - if (__pyx_d) { - __Pyx_AddTraceback("init nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - Py_CLEAR(__pyx_m); - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init nms.gpu_nms"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* ArgTypeTest */ -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -/* IsLittleEndian */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) -{ - union { - uint32_t u32; - uint8_t u8[4]; - } S; - S.u32 = 0x01020304; - return S.u8[0] == 4; -} - -/* BufferFormatCheck */ -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - ctx->is_valid_array = 0; - ctx->struct_alignment = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} -static int __Pyx_BufFmt_ExpectNumber(const char **ts) { - int number = __Pyx_BufFmt_ParseNumber(ts); - if (number == -1) - PyErr_Format(PyExc_ValueError,\ - "Does not understand character buffer dtype format string ('%c')", **ts); - return number; -} -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case '?': return "'bool'"; - case 'c': return "'char'"; - case 'b': return "'signed char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 's': case 'p': return "a string"; - case 0: return "end"; - default: return "unparseable format string"; - } -} -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -/* These are for computing the padding at the end of the struct to align - on the first member of the struct. This will probably the same as above, - but we don't have any guarantees. - */ -typedef struct { short x; char c; } __Pyx_pad_short; -typedef struct { int x; char c; } __Pyx_pad_int; -typedef struct { long x; char c; } __Pyx_pad_long; -typedef struct { float x; char c; } __Pyx_pad_float; -typedef struct { double x; char c; } __Pyx_pad_double; -typedef struct { long double x; char c; } __Pyx_pad_longdouble; -typedef struct { void *x; char c; } __Pyx_pad_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); - case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); - case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': - return 'H'; - case 'b': case 'h': case 'i': - case 'l': case 'q': case 's': case 'p': - return 'I'; - case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': - return 'U'; - case 'f': case 'd': case 'g': - return (is_complex ? 'C' : 'R'); - case 'O': - return 'O'; - case 'P': - return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset, arraysize = 1; - if (ctx->enc_type == 0) return 0; - if (ctx->head->field->type->arraysize[0]) { - int i, ndim = 0; - if (ctx->enc_type == 's' || ctx->enc_type == 'p') { - ctx->is_valid_array = ctx->head->field->type->ndim == 1; - ndim = 1; - if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { - PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %zu", - ctx->head->field->type->arraysize[0], ctx->enc_count); - return -1; - } - } - if (!ctx->is_valid_array) { - PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", - ctx->head->field->type->ndim, ndim); - return -1; - } - for (i = 0; i < ctx->head->field->type->ndim; i++) { - arraysize *= ctx->head->field->type->arraysize[i]; - } - ctx->is_valid_array = 0; - ctx->enc_count = 1; - } - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - if (ctx->struct_alignment == 0) - ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, - ctx->is_complex); - } - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - if ((type->typegroup == 'H' || group == 'H') && type->size == size) { - } else { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - } - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - ctx->fmt_offset += size; - if (arraysize) - ctx->fmt_offset += (arraysize - 1) * size; - --ctx->enc_count; - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} -static PyObject * -__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) -{ - const char *ts = *tsp; - int i = 0, number, ndim; - ++ts; - if (ctx->new_count != 1) { - PyErr_SetString(PyExc_ValueError, - "Cannot handle repeated arrays in format string"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ndim = ctx->head->field->type->ndim; - while (*ts && *ts != ')') { - switch (*ts) { - case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; - default: break; - } - number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) - return PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %d", - ctx->head->field->type->arraysize[i], number); - if (*ts != ',' && *ts != ')') - return PyErr_Format(PyExc_ValueError, - "Expected a comma in format string, got '%c'", *ts); - if (*ts == ',') ts++; - i++; - } - if (i != ndim) - return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", - ctx->head->field->type->ndim, i); - if (!*ts) { - PyErr_SetString(PyExc_ValueError, - "Unexpected end of format string, expected ')'"); - return NULL; - } - ctx->is_valid_array = 1; - ctx->new_count = 1; - *tsp = ++ts; - return Py_None; -} -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case '\r': - case '\n': - ++ts; - break; - case '<': - if (!__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - size_t struct_alignment = ctx->struct_alignment; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - ctx->enc_count = 0; - ctx->struct_alignment = 0; - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - if (struct_alignment) ctx->struct_alignment = struct_alignment; - } - break; - case '}': - { - size_t alignment = ctx->struct_alignment; - ++ts; - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - if (alignment && ctx->fmt_offset % alignment) { - ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); - } - } - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - CYTHON_FALLTHROUGH; - case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': case 'p': - if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && - (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { - ctx->enc_count += ctx->new_count; - ctx->new_count = 1; - got_Z = 0; - ++ts; - break; - } - CYTHON_FALLTHROUGH; - case 's': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - case '(': - if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; - break; - default: - { - int number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - ctx->new_count = (size_t)number; - } - } - } -} - -/* BufferGetAndValidate */ - static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { - if (unlikely(info->buf == NULL)) return; - if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; - __Pyx_ReleaseBuffer(info); -} -static void __Pyx_ZeroBuffer(Py_buffer* buf) { - buf->buf = NULL; - buf->obj = NULL; - buf->strides = __Pyx_zeros; - buf->shape = __Pyx_zeros; - buf->suboffsets = __Pyx_minusones; -} -static int __Pyx__GetBufferAndValidate( - Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, - int nd, int cast, __Pyx_BufFmt_StackElem* stack) -{ - buf->buf = NULL; - if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { - __Pyx_ZeroBuffer(buf); - return -1; - } - if (unlikely(buf->ndim != nd)) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - nd, buf->ndim); - goto fail; - } - if (!cast) { - __Pyx_BufFmt_Context ctx; - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; - } - if (unlikely((size_t)buf->itemsize != dtype->size)) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", - buf->itemsize, (buf->itemsize > 1) ? "s" : "", - dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; - return 0; -fail:; - __Pyx_SafeReleaseBuffer(buf); - return -1; -} - -/* PyObjectGetAttrStr */ - #if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* GetBuiltinName */ - static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* PyDictVersioning */ - #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ - #if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* PyObjectCall */ - #if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = Py_TYPE(func)->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* ExtTypeTest */ - static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -/* GetItemInt */ - static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return m->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* ObjectGetItem */ - #if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { - PyObject *runerr; - Py_ssize_t key_value; - PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; - if (unlikely(!(m && m->sq_item))) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); - return NULL; - } - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { - PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; - if (likely(m && m->mp_subscript)) { - return m->mp_subscript(obj, key); - } - return __Pyx_PyObject_GetIndex(obj, key); -} -#endif - -/* PyFunctionFastCall */ - #if CYTHON_FAST_PYCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif -#endif - -/* PyObjectCallMethO */ - #if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallNoArg */ - #if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, NULL, 0); - } -#endif -#ifdef __Pyx_CyFunction_USED - if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) -#else - if (likely(PyCFunction_Check(func))) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); -} -#endif - -/* PyCFunctionFastCall */ - #if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); - int flags = PyCFunction_GET_FLAGS(func); - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); - } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); - } -} -#endif - -/* PyObjectCallOneArg */ - #if CYTHON_COMPILING_IN_CPYTHON -static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_New(1); - if (unlikely(!args)) return NULL; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif - if (likely(PyCFunction_Check(func))) { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL - } else if (__Pyx_PyFastCFunction_Check(func)) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif - } - } - return __Pyx__PyObject_CallOneArg(func, arg); -} -#else -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -#endif - -/* BufferIndexError */ - static void __Pyx_RaiseBufferIndexError(int axis) { - PyErr_Format(PyExc_IndexError, - "Out of bounds on buffer access (axis %d)", axis); -} - -/* BufferFallbackError */ - static void __Pyx_RaiseBufferFallbackError(void) { - PyErr_SetString(PyExc_ValueError, - "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); -} - -/* PyErrFetchRestore */ - #if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* GetTopmostException */ - #if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ - #if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - #endif - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -#endif - -/* PyErrExceptionMatches */ - #if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; icurexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* GetException */ - #if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* RaiseException */ - #if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, - CYTHON_UNUSED PyObject *cause) { - __Pyx_PyThreadState_declare - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { -#if CYTHON_COMPILING_IN_PYPY - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* TypeImport */ - #ifndef __PYX_HAVE_RT_ImportType -#define __PYX_HAVE_RT_ImportType -static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, - size_t size, enum __Pyx_ImportType_CheckSize check_size) -{ - PyObject *result = 0; - char warning[200]; - Py_ssize_t basicsize; -#ifdef Py_LIMITED_API - PyObject *py_basicsize; -#endif - result = PyObject_GetAttrString(module, class_name); - if (!result) - goto bad; - if (!PyType_Check(result)) { - PyErr_Format(PyExc_TypeError, - "%.200s.%.200s is not a type object", - module_name, class_name); - goto bad; - } -#ifndef Py_LIMITED_API - basicsize = ((PyTypeObject *)result)->tp_basicsize; -#else - py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); - if (!py_basicsize) - goto bad; - basicsize = PyLong_AsSsize_t(py_basicsize); - Py_DECREF(py_basicsize); - py_basicsize = 0; - if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) - goto bad; -#endif - if ((size_t)basicsize < size) { - PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - goto bad; - } - if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { - PyErr_Format(PyExc_ValueError, - "%.200s.%.200s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - goto bad; - } - else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { - PyOS_snprintf(warning, sizeof(warning), - "%s.%s size changed, may indicate binary incompatibility. " - "Expected %zd from C header, got %zd from PyObject", - module_name, class_name, size, basicsize); - if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; - } - return (PyTypeObject *)result; -bad: - Py_XDECREF(result); - return NULL; -} -#endif - -/* Import */ - static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (!py_import) - goto bad; - #endif - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, 1); - if (!module) { - if (!PyErr_ExceptionMatches(PyExc_ImportError)) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, level); - #endif - } - } -bad: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - Py_XDECREF(empty_list); - Py_XDECREF(empty_dict); - return module; -} - -/* CLineInTraceback */ - #ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ - static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} - -/* AddTraceback */ - #include "compile.h" -#include "frameobject.h" -#include "traceback.h" -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(filename); - #else - py_srcfile = PyUnicode_FromString(filename); - #endif - if (!py_srcfile) goto bad; - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - Py_DECREF(py_funcname); - return py_code; -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; -} -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject *obj = view->obj; - if (!obj) return; - if (PyObject_CheckBuffer(obj)) { - PyBuffer_Release(view); - return; - } - if ((0)) {} - view->obj = NULL; - Py_DECREF(obj); -} -#endif - - - /* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* Declarations */ - #if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return ::std::complex< float >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - return x + y*(__pyx_t_float_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { - __pyx_t_float_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -/* Arithmetic */ - #if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - #if 1 - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - if (b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); - } else if (fabsf(b.real) >= fabsf(b.imag)) { - if (b.real == 0 && b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); - } else { - float r = b.imag / b.real; - float s = (float)(1.0) / (b.real + b.imag * r); - return __pyx_t_float_complex_from_parts( - (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); - } - } else { - float r = b.real / b.imag; - float s = (float)(1.0) / (b.imag + b.real * r); - return __pyx_t_float_complex_from_parts( - (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); - } - } - #else - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - if (b.imag == 0) { - return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); - } else { - float denom = b.real * b.real + b.imag * b.imag; - return __pyx_t_float_complex_from_parts( - (a.real * b.real + a.imag * b.imag) / denom, - (a.imag * b.real - a.real * b.imag) / denom); - } - } - #endif - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { - __pyx_t_float_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrtf(z.real*z.real + z.imag*z.imag); - #else - return hypotf(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { - __pyx_t_float_complex z; - float r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - float denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - return __Pyx_c_prod_float(a, a); - case 3: - z = __Pyx_c_prod_float(a, a); - return __Pyx_c_prod_float(z, a); - case 4: - z = __Pyx_c_prod_float(a, a); - return __Pyx_c_prod_float(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } else if (b.imag == 0) { - z.real = powf(a.real, b.real); - z.imag = 0; - return z; - } else if (a.real > 0) { - r = a.real; - theta = 0; - } else { - r = -a.real; - theta = atan2f(0.0, -1.0); - } - } else { - r = __Pyx_c_abs_float(a); - theta = atan2f(a.imag, a.real); - } - lnr = logf(r); - z_r = expf(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cosf(z_theta); - z.imag = z_r * sinf(z_theta); - return z; - } - #endif -#endif - -/* Declarations */ - #if CYTHON_CCOMPLEX - #ifdef __cplusplus - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return ::std::complex< double >(x, y); - } - #else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - return x + y*(__pyx_t_double_complex)_Complex_I; - } - #endif -#else - static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { - __pyx_t_double_complex z; - z.real = x; - z.imag = y; - return z; - } -#endif - -/* Arithmetic */ - #if CYTHON_CCOMPLEX -#else - static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - return (a.real == b.real) && (a.imag == b.imag); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real + b.real; - z.imag = a.imag + b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real - b.real; - z.imag = a.imag - b.imag; - return z; - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - z.real = a.real * b.real - a.imag * b.imag; - z.imag = a.real * b.imag + a.imag * b.real; - return z; - } - #if 1 - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - if (b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); - } else if (fabs(b.real) >= fabs(b.imag)) { - if (b.real == 0 && b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); - } else { - double r = b.imag / b.real; - double s = (double)(1.0) / (b.real + b.imag * r); - return __pyx_t_double_complex_from_parts( - (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); - } - } else { - double r = b.real / b.imag; - double s = (double)(1.0) / (b.imag + b.real * r); - return __pyx_t_double_complex_from_parts( - (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); - } - } - #else - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - if (b.imag == 0) { - return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); - } else { - double denom = b.real * b.real + b.imag * b.imag; - return __pyx_t_double_complex_from_parts( - (a.real * b.real + a.imag * b.imag) / denom, - (a.imag * b.real - a.real * b.imag) / denom); - } - } - #endif - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = -a.real; - z.imag = -a.imag; - return z; - } - static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { - return (a.real == 0) && (a.imag == 0); - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { - __pyx_t_double_complex z; - z.real = a.real; - z.imag = -a.imag; - return z; - } - #if 1 - static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { - #if !defined(HAVE_HYPOT) || defined(_MSC_VER) - return sqrt(z.real*z.real + z.imag*z.imag); - #else - return hypot(z.real, z.imag); - #endif - } - static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { - __pyx_t_double_complex z; - double r, lnr, theta, z_r, z_theta; - if (b.imag == 0 && b.real == (int)b.real) { - if (b.real < 0) { - double denom = a.real * a.real + a.imag * a.imag; - a.real = a.real / denom; - a.imag = -a.imag / denom; - b.real = -b.real; - } - switch ((int)b.real) { - case 0: - z.real = 1; - z.imag = 0; - return z; - case 1: - return a; - case 2: - return __Pyx_c_prod_double(a, a); - case 3: - z = __Pyx_c_prod_double(a, a); - return __Pyx_c_prod_double(z, a); - case 4: - z = __Pyx_c_prod_double(a, a); - return __Pyx_c_prod_double(z, z); - } - } - if (a.imag == 0) { - if (a.real == 0) { - return a; - } else if (b.imag == 0) { - z.real = pow(a.real, b.real); - z.imag = 0; - return z; - } else if (a.real > 0) { - r = a.real; - theta = 0; - } else { - r = -a.real; - theta = atan2(0.0, -1.0); - } - } else { - r = __Pyx_c_abs_double(a); - theta = atan2(a.imag, a.real); - } - lnr = log(r); - z_r = exp(lnr * b.real - theta * b.imag); - z_theta = theta * b.real + lnr * b.imag; - z.real = z_r * cos(z_theta); - z.imag = z_r * sin(z_theta); - return z; - } - #endif -#endif - -/* CIntFromPy */ - static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(npy_int32) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (npy_int32) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (npy_int32) 0; - case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0]) - case 2: - if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) { - return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) { - return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) { - return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (npy_int32) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(npy_int32) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (npy_int32) 0; - case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, +digits[0]) - case -2: - if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { - return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { - return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { - return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { - return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { - return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { - return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); - } - } - break; - } -#endif - if (sizeof(npy_int32) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - npy_int32 val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (npy_int32) -1; - } - } else { - npy_int32 val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (npy_int32) -1; - val = __Pyx_PyInt_As_npy_int32(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to npy_int32"); - return (npy_int32) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to npy_int32"); - return (npy_int32) -1; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(long) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(long) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if (sizeof(long) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* FastTypeChecks */ - #if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = a->tp_base; - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; - if (!res) { - res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } - return res; -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; ip) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type %.200s). " - "The ability to return an instance of a strict subclass of int " - "is deprecated, and may be removed in a future version of Python.", - Py_TYPE(result)->tp_name)) { - Py_DECREF(result); - return NULL; - } - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - type_name, type_name, Py_TYPE(result)->tp_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -#endif /* Py_PYTHON_H */ diff --git a/face_recognition1/face_detect/utils/nms/gpu_nms.cpython-36m-x86_64-linux-gnu.so b/face_recognition1/face_detect/utils/nms/gpu_nms.cpython-36m-x86_64-linux-gnu.so deleted file mode 100644 index 59d01c79dd454d9d57a3d439a25c0ca585624f1a..0000000000000000000000000000000000000000 Binary files a/face_recognition1/face_detect/utils/nms/gpu_nms.cpython-36m-x86_64-linux-gnu.so and /dev/null differ diff --git a/face_recognition1/face_detect/utils/nms/gpu_nms.hpp b/face_recognition1/face_detect/utils/nms/gpu_nms.hpp deleted file mode 100644 index 68b6d42cd88b59496b22a9e77919abe529b09014..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms/gpu_nms.hpp +++ /dev/null @@ -1,2 +0,0 @@ -void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, - int boxes_dim, float nms_overlap_thresh, int device_id); diff --git a/face_recognition1/face_detect/utils/nms/gpu_nms.pyx b/face_recognition1/face_detect/utils/nms/gpu_nms.pyx deleted file mode 100644 index cb963e83cd37e7ba991e5675e73e6b1533ed8570..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms/gpu_nms.pyx +++ /dev/null @@ -1,24 +0,0 @@ -import numpy as np -cimport numpy as np - -assert sizeof(int) == sizeof(np.int32_t) - -cdef extern from "gpu_nms.hpp": - void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) - -def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, - np.int32_t device_id=0): - cdef int boxes_num = dets.shape[0] - cdef int boxes_dim = dets.shape[1] - cdef int num_out - cdef np.ndarray[np.int32_t, ndim=1] \ - keep = np.zeros(boxes_num, dtype=np.int32) - cdef np.ndarray[np.float32_t, ndim=1] \ - scores = dets[:, 4] - cdef np.ndarray[np.int_t, ndim=1] \ - order = scores.argsort()[::-1] - cdef np.ndarray[np.float32_t, ndim=2] \ - sorted_dets = dets[order, :] - _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) - keep = keep[:num_out] - return list(order[keep]) diff --git a/face_recognition1/face_detect/utils/nms/nms_kernel.cu b/face_recognition1/face_detect/utils/nms/nms_kernel.cu deleted file mode 100644 index 038a59012f60ebdf1182ecb778eb3b01a69bc5ed..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms/nms_kernel.cu +++ /dev/null @@ -1,144 +0,0 @@ -// ------------------------------------------------------------------ -// Faster R-CNN -// Copyright (c) 2015 Microsoft -// Licensed under The MIT License [see fast-rcnn/LICENSE for details] -// Written by Shaoqing Ren -// ------------------------------------------------------------------ - -#include "gpu_nms.hpp" -#include -#include - -#define CUDA_CHECK(condition) \ - /* Code block avoids redefinition of cudaError_t error */ \ - do { \ - cudaError_t error = condition; \ - if (error != cudaSuccess) { \ - std::cout << cudaGetErrorString(error) << std::endl; \ - } \ - } while (0) - -#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) -int const threadsPerBlock = sizeof(unsigned long long) * 8; - -__device__ inline float devIoU(float const * const a, float const * const b) { - float left = max(a[0], b[0]), right = min(a[2], b[2]); - float top = max(a[1], b[1]), bottom = min(a[3], b[3]); - float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); - float interS = width * height; - float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); - float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); - return interS / (Sa + Sb - interS); -} - -__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, - const float *dev_boxes, unsigned long long *dev_mask) { - const int row_start = blockIdx.y; - const int col_start = blockIdx.x; - - // if (row_start > col_start) return; - - const int row_size = - min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); - const int col_size = - min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); - - __shared__ float block_boxes[threadsPerBlock * 5]; - if (threadIdx.x < col_size) { - block_boxes[threadIdx.x * 5 + 0] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; - block_boxes[threadIdx.x * 5 + 1] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; - block_boxes[threadIdx.x * 5 + 2] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; - block_boxes[threadIdx.x * 5 + 3] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; - block_boxes[threadIdx.x * 5 + 4] = - dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; - } - __syncthreads(); - - if (threadIdx.x < row_size) { - const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; - const float *cur_box = dev_boxes + cur_box_idx * 5; - int i = 0; - unsigned long long t = 0; - int start = 0; - if (row_start == col_start) { - start = threadIdx.x + 1; - } - for (i = start; i < col_size; i++) { - if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { - t |= 1ULL << i; - } - } - const int col_blocks = DIVUP(n_boxes, threadsPerBlock); - dev_mask[cur_box_idx * col_blocks + col_start] = t; - } -} - -void _set_device(int device_id) { - int current_device; - CUDA_CHECK(cudaGetDevice(¤t_device)); - if (current_device == device_id) { - return; - } - // The call to cudaSetDevice must come before any calls to Get, which - // may perform initialization using the GPU. - CUDA_CHECK(cudaSetDevice(device_id)); -} - -void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, - int boxes_dim, float nms_overlap_thresh, int device_id) { - _set_device(device_id); - - float* boxes_dev = NULL; - unsigned long long* mask_dev = NULL; - - const int col_blocks = DIVUP(boxes_num, threadsPerBlock); - - CUDA_CHECK(cudaMalloc(&boxes_dev, - boxes_num * boxes_dim * sizeof(float))); - CUDA_CHECK(cudaMemcpy(boxes_dev, - boxes_host, - boxes_num * boxes_dim * sizeof(float), - cudaMemcpyHostToDevice)); - - CUDA_CHECK(cudaMalloc(&mask_dev, - boxes_num * col_blocks * sizeof(unsigned long long))); - - dim3 blocks(DIVUP(boxes_num, threadsPerBlock), - DIVUP(boxes_num, threadsPerBlock)); - dim3 threads(threadsPerBlock); - nms_kernel<<>>(boxes_num, - nms_overlap_thresh, - boxes_dev, - mask_dev); - - std::vector mask_host(boxes_num * col_blocks); - CUDA_CHECK(cudaMemcpy(&mask_host[0], - mask_dev, - sizeof(unsigned long long) * boxes_num * col_blocks, - cudaMemcpyDeviceToHost)); - - std::vector remv(col_blocks); - memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); - - int num_to_keep = 0; - for (int i = 0; i < boxes_num; i++) { - int nblock = i / threadsPerBlock; - int inblock = i % threadsPerBlock; - - if (!(remv[nblock] & (1ULL << inblock))) { - keep_out[num_to_keep++] = i; - unsigned long long *p = &mask_host[0] + i * col_blocks; - for (int j = nblock; j < col_blocks; j++) { - remv[j] |= p[j]; - } - } - } - *num_out = num_to_keep; - - CUDA_CHECK(cudaFree(boxes_dev)); - CUDA_CHECK(cudaFree(mask_dev)); -} diff --git a/face_recognition1/face_detect/utils/nms/py_cpu_nms.py b/face_recognition1/face_detect/utils/nms/py_cpu_nms.py deleted file mode 100644 index d8d6ab6dda05e0b667839498aeeece844f636e2d..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms/py_cpu_nms.py +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np - - -def py_cpu_nms(dets, thresh): - """Pure Python NMS baseline.""" - x1 = dets[:, 0] - y1 = dets[:, 1] - x2 = dets[:, 2] - y2 = dets[:, 3] - scores = dets[:, 4] - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = scores.argsort()[::-1] - - keep = [] - while order.size > 0: - i = order[0] - keep.append(i) - xx1 = np.maximum(x1[i], x1[order[1:]]) - yy1 = np.maximum(y1[i], y1[order[1:]]) - xx2 = np.minimum(x2[i], x2[order[1:]]) - yy2 = np.minimum(y2[i], y2[order[1:]]) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - inter = w * h - ovr = inter / (areas[i] + areas[order[1:]] - inter) - - inds = np.where(ovr <= thresh)[0] - order = order[inds + 1] - - return keep diff --git a/face_recognition1/face_detect/utils/nms_wrapper.py b/face_recognition1/face_detect/utils/nms_wrapper.py deleted file mode 100644 index a181cdb021da4df59ca2c4863797e6179d04fdbf..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/nms_wrapper.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import sys -sys.path.append(os.path.dirname(__file__)) - -# from nms.cpu_nms import cpu_nms, cpu_soft_nms -# from utils.nms.gpu_nms import gpu_nms -from nms.py_cpu_nms import py_cpu_nms - - -# def nms(dets, thresh, force_cpu=False): -# """Dispatch to either CPU or GPU NMS implementations.""" -# -# if dets.shape[0] == 0: -# return [] -# if cfg.USE_GPU_NMS and not force_cpu: -# return gpu_nms(dets, thresh, device_id=cfg.GPU_ID) -# else: -# return cpu_nms(dets, thresh) - - -def nms(dets, thresh, force_cpu=False): - """Dispatch to either CPU or GPU NMS implementations.""" - - if dets.shape[0] == 0: - return [] - # if force_cpu: - #return cpu_soft_nms(dets, thresh, method = 0) - return py_cpu_nms(dets, thresh) - # return gpu_nms(dets, thresh) diff --git a/face_recognition1/face_detect/utils/timer.py b/face_recognition1/face_detect/utils/timer.py deleted file mode 100644 index 581fbc536790c2cbfa137fe5772a56448c8ebbea..0000000000000000000000000000000000000000 --- a/face_recognition1/face_detect/utils/timer.py +++ /dev/null @@ -1,33 +0,0 @@ -import time - - -class Timer(object): - """A simple timer.""" - def __init__(self): - self.total_time = 0. - self.calls = 0 - self.start_time = 0. - self.diff = 0. - self.average_time = 0. - - def tic(self): - # using time.time instead of time.clock because time time.clock - # does not normalize for multithreading - self.start_time = time.time() - - def toc(self, average=True): - self.diff = time.time() - self.start_time - self.total_time += self.diff - self.calls += 1 - self.average_time = self.total_time / self.calls - if average: - return self.average_time - else: - return self.diff - - def clear(self): - self.total_time = 0. - self.calls = 0 - self.start_time = 0. - self.diff = 0. - self.average_time = 0. diff --git a/face_recognition1/face_feature/__init__.py b/face_recognition1/face_feature/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_feature/checkpoints/feat_net.ckpt b/face_recognition1/face_feature/checkpoints/feat_net.ckpt deleted file mode 100644 index 24522124312a0b79a6fc71a2bde2d9fe56b7cd03..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/checkpoints/feat_net.ckpt +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cbe719b05e516bfc7a3692f4edc448b4eac368e536676a2278cb4facafb8b3d0 -size 299884972 diff --git a/face_recognition1/face_feature/dataloader/__init__.py b/face_recognition1/face_feature/dataloader/__init__.py deleted file mode 100644 index 7b53b7d8e6981d9e7e9dbb03cab143237cf9234b..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/dataloader/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: __init__.py.py -@desc: -''' \ No newline at end of file diff --git a/face_recognition1/face_feature/dataloader/agedb.py b/face_recognition1/face_feature/dataloader/agedb.py deleted file mode 100644 index d934e02ba05d1700d93eea55b686a1fc8baf0d4d..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/dataloader/agedb.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: agedb.py.py -@desc: AgeDB-30 test data loader, agedb test protocol is the same with lfw -''' - -import os -import numpy as np -import cv2 -import torch.utils.data as data - -import torch -import torchvision.transforms as transforms - - -def img_loader(path): - try: - img = cv2.imread(path) - if len(img.shape) == 2: - img = np.stack([img] * 3, 2) - return img - except IOError: - print('Cannot load image ' + path) - return None - - -class AgeDB30(data.Dataset): - def __init__(self, root, file_list, transform=None, loader=img_loader): - super().__init__() - self.root = root - self.file_list = file_list - self.transform = transform - self.loader = loader - self.nameLs = [] - self.nameRs = [] - self.folds = [] - self.flags = [] - - with open(file_list) as f: - pairs = f.read().splitlines() - for i, p in enumerate(pairs): - p = p.split(' ') - nameL = p[0] - nameR = p[1] - fold = i // 600 - flag = int(p[2]) - - self.nameLs.append(nameL) - self.nameRs.append(nameR) - self.folds.append(fold) - self.flags.append(flag) - - def __getitem__(self, index): - - img_l = self.loader(os.path.join(self.root, self.nameLs[index])) - img_r = self.loader(os.path.join(self.root, self.nameRs[index])) - imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] - - if self.transform is not None: - for _, i in enumerate(imglist): - imglist[i] = self.transform(imglist[i]) - - imgs = imglist - return imgs - else: - imgs = [torch.from_numpy(i) for i in imglist] - return imgs - - def __len__(self): - return len(self.nameLs) - - -if __name__ == '__main__': - root = '/media/sda/AgeDB-30/agedb30_align_112' - file_list = '/media/sda/AgeDB-30/agedb_30_pair.txt' - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - - dataset = AgeDB30(root, file_list, transform=transform) - trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) - for data in trainloader: - for d in data: - print(d[0].shape) diff --git a/face_recognition1/face_feature/dataloader/casia_webface.py b/face_recognition1/face_feature/dataloader/casia_webface.py deleted file mode 100644 index b45b528f28423143a080cc43f6ac4ac2b6d5f454..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/dataloader/casia_webface.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: casia_webface.py -@desc: CASIA-WebFace dataloader loader -''' - -import os -import torch -import torch.utils.data as data -import torchvision.transforms as transforms -import numpy as np -import cv2 - - -def img_loader(path): - try: - img = cv2.imread(path) - if len(img.shape) == 2: - img = np.stack([img] * 3, 2) - return img - except IOError: - print('Cannot load image ' + path) - return None - - -class CASIAWebFace(data.Dataset): - def __init__(self, root, file_list, transform=None, loader=img_loader): - super().__init__() - self.root = root - self.transform = transform - self.loader = loader - - image_list = [] - label_list = [] - with open(file_list) as f: - img_label_list = f.read().splitlines() - for info in img_label_list: - image_path, label_name = info.split(' ') - image_list.append(image_path) - label_list.append(int(label_name)) - - self.image_list = image_list - self.label_list = label_list - self.class_nums = len(np.unique(self.label_list)) - print("dataloader size: ", len(self.image_list), '/', self.class_nums) - - def __getitem__(self, index): - img_path = self.image_list[index] - label = self.label_list[index] - - img = self.loader(os.path.join(self.root, img_path)) - - # random flip with ratio of 0.5 - flip = np.random.choice(2) * 2 - 1 - if flip == 1: - img = cv2.flip(img, 1) - - if self.transform is not None: - img = self.transform(img) - else: - img = torch.from_numpy(img) - - return img, label - - def __len__(self): - return len(self.image_list) - - -class CASIAWebFaceDataset: - def __init__(self, root, file_list, loader=img_loader): - super().__init__() - self.root = root - self.loader = loader - - image_list = [] - label_list = [] - with open(file_list) as f: - img_label_list = f.read().splitlines() - for i in range(0, len(img_label_list), 10): - info = img_label_list[i] - image_path, label_name = info.split(' ') - image_list.append(image_path) - label_list.append(int(label_name)) - - self.image_list = image_list - self.label_list = label_list - self.class_nums = len(np.unique(self.label_list)) - print("dataloader size: ", len(self.image_list), '/', self.class_nums) - - def __getitem__(self, index): - img_path = self.image_list[index] - label = self.label_list[index] - - img = self.loader(os.path.join(self.root, img_path)) - - # random flip with ratio of 0.5 - flip = np.random.choice(2) * 2 - 1 - if flip == 1: - img = cv2.flip(img, 1) - - img = img.astype(np.float32).transpose((2, 1, 0)) - img = (img - 127.5) / 127.5 - - return img, label - - def __len__(self): - return len(self.image_list) - - -if __name__ == '__main__': - root = 'D:/data/webface_align_112' - file_list = 'D:/data/webface_align_train.list' - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - dataset = CASIAWebFace(root, file_list, transform=transform) - trainloader = data.DataLoader(dataset, batch_size=64, shuffle=True, num_workers=2, drop_last=False) - print(len(dataset)) - for data in trainloader: - print(data[0].shape) diff --git a/face_recognition1/face_feature/dataloader/cfp.py b/face_recognition1/face_feature/dataloader/cfp.py deleted file mode 100644 index 75a7931fa65ec122fcbb857c4b5d3d934e5b9272..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/dataloader/cfp.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: cfp.py -@desc: the CFP-FP test dataloader loader, it's similar with lfw and adedb, except that it has 700 pairs every fold -''' - - -import os -import numpy as np -import cv2 -import torch.utils.data as data -import torch -import torchvision.transforms as transforms - - -def img_loader(path): - try: - img = cv2.imread(path) - if len(img.shape) == 2: - img = np.stack([img] * 3, 2) - return img - except IOError: - print('Cannot load image ' + path) - return None - - -class CFP_FP(data.Dataset): - def __init__(self, root, file_list, transform=None, loader=img_loader): - super().__init__() - self.root = root - self.file_list = file_list - self.transform = transform - self.loader = loader - self.nameLs = [] - self.nameRs = [] - self.folds = [] - self.flags = [] - - with open(file_list) as f: - pairs = f.read().splitlines() - for i, p in enumerate(pairs): - p = p.split(' ') - nameL = p[0] - nameR = p[1] - fold = i // 700 - flag = int(p[2]) - - self.nameLs.append(nameL) - self.nameRs.append(nameR) - self.folds.append(fold) - self.flags.append(flag) - - def __getitem__(self, index): - - img_l = self.loader(os.path.join(self.root, self.nameLs[index])) - img_r = self.loader(os.path.join(self.root, self.nameRs[index])) - imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] - - if self.transform is not None: - for _, i in enumerate(imglist): - imglist[i] = self.transform(imglist[i]) - - imgs = imglist - return imgs - else: - imgs = [torch.from_numpy(i) for i in imglist] - return imgs - - def __len__(self): - return len(self.nameLs) - - -if __name__ == '__main__': - root = '/media/sda/CFP-FP/CFP_FP_aligned_112' - file_list = '/media/sda/CFP-FP/cfp-fp-pair.txt' - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - - dataset = CFP_FP(root, file_list, transform=transform) - trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) - for data in trainloader: - for d in data: - print(d[0].shape) diff --git a/face_recognition1/face_feature/dataloader/lfw.py b/face_recognition1/face_feature/dataloader/lfw.py deleted file mode 100644 index d0a984c76e5cc1e9d9799cb810737a4e2e760c45..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/dataloader/lfw.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: lfw.py.py -@desc: lfw dataloader loader -''' - -import os -import numpy as np -import cv2 -import torch -import torch.utils.data as data -import torchvision.transforms as transforms - - -def img_loader(path): - try: - img = cv2.imread(path) - if len(img.shape) == 2: - img = np.stack([img] * 3, 2) - return img - except IOError: - print('Cannot load image ' + path) - return None - - -class LFW(data.Dataset): - def __init__(self, root, file_list, transform=None, loader=img_loader): - super().__init__() - self.root = root - self.file_list = file_list - self.transform = transform - self.loader = loader - self.nameLs = [] - self.nameRs = [] - self.folds = [] - self.flags = [] - - with open(file_list) as f: - pairs = f.read().splitlines()[1:] - for i, p in enumerate(pairs): - p = p.split(' ') - nameL = p[0] - nameR = p[1] - fold = i // 600 - flag = int(p[2]) - - self.nameLs.append(nameL) - self.nameRs.append(nameR) - self.folds.append(fold) - self.flags.append(flag) - - def __getitem__(self, index): - - img_l = self.loader(os.path.join(self.root, self.nameLs[index])) - img_r = self.loader(os.path.join(self.root, self.nameRs[index])) - imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] - - if self.transform is not None: - for _, i in enumerate(imglist): - imglist[i] = self.transform(imglist[i]) - - imgs = imglist - return imgs - else: - imgs = [torch.from_numpy(i) for i in imglist] - return imgs - - def __len__(self): - return len(self.nameLs) - - -class LFWDataset: - def __init__(self, root, file_list, loader=img_loader): - super().__init__() - self.root = root - self.file_list = file_list - self.loader = loader - self.nameLs = [] - self.nameRs = [] - self.folds = [] - self.flags = [] - - with open(file_list) as f: - pairs = f.read().splitlines()[1:] - for i, p in enumerate(pairs): - p = p.split(' ') - nameL = p[0] - nameR = p[1] - fold = i // 600 - flag = int(p[2]) - - self.nameLs.append(nameL) - self.nameRs.append(nameR) - self.folds.append(fold) - self.flags.append(flag) - - self.idx = 0 - self.len = len(self.nameLs) - - def __next__(self, index): - if self.idx > self.len: - raise StopIteration - - img_l = self.loader(os.path.join(self.root, self.nameLs[self.idx])) - img_r = self.loader(os.path.join(self.root, self.nameRs[self.idx])) - imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] - - outputs = [] - image_mean = np.array([127.5, 127.5, 127.5]) - for _, image in enumerate(imglist): - img = (image - image_mean) / 127.5 - img = img.astype(np.float32).transpose((2, 0, 1)) - img = np.expand_dims(img, axis=0) - - outputs.append(img) - - self.idx += 1 - return outputs - - def __iter__(self): - return self - - -if __name__ == '__main__': - root = 'D:/data/lfw_align_112' - file_list = 'D:/data/pairs.txt' - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0]+ - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - - dataset = LFW(root, file_list, transform=transform) - #dataloader = LFW(root, file_list) - trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) - print(len(dataset)) - for data in trainloader: - for d in data: - print(d[0].shape) diff --git a/face_recognition1/face_feature/dataloader/lfw_2.py b/face_recognition1/face_feature/dataloader/lfw_2.py deleted file mode 100644 index f302a9b9026835cce765f4bc58433685f528066c..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/dataloader/lfw_2.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: lfw_2.py -@desc: lfw dataloader from insightface ,just like agedb and cfp-fp -''' - -import os - -import torch -import torch.utils.data as data -import torchvision.transforms as transforms -import numpy as np -import cv2 - - -def img_loader(path): - try: - img = cv2.imread(path) - if len(img.shape) == 2: - img = np.stack([img] * 3, 2) - return img - except IOError: - print('Cannot load image ' + path) - return None - - -class LFW_2(data.Dataset): - def __init__(self, root, file_list, transform=None, loader=img_loader): - super().__init__() - self.root = root - self.file_list = file_list - self.transform = transform - self.loader = loader - self.nameLs = [] - self.nameRs = [] - self.folds = [] - self.flags = [] - - with open(file_list) as f: - pairs = f.read().splitlines() - for i, p in enumerate(pairs): - p = p.split(' ') - nameL = p[0] - nameR = p[1] - fold = i // 600 - flag = int(p[2]) - - self.nameLs.append(nameL) - self.nameRs.append(nameR) - self.folds.append(fold) - self.flags.append(flag) - - def __getitem__(self, index): - - img_l = self.loader(os.path.join(self.root, self.nameLs[index])) - img_r = self.loader(os.path.join(self.root, self.nameRs[index])) - imglist = [img_l, cv2.flip(img_l, 1), img_r, cv2.flip(img_r, 1)] - - if self.transform is not None: - for _, i in enumerate(imglist): - imglist[i] = self.transform(imglist[i]) - - imgs = imglist - return imgs - else: - imgs = [torch.from_numpy(i) for i in imglist] - return imgs - - def __len__(self): - return len(self.nameLs) - - -if __name__ == '__main__': - root = '/media/sda/insightface_emore/lfw' - file_list = '/media/sda/insightface_emore/pair_lfw.txt' - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - - dataset = LFW_2(root, file_list, transform=transform) - trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) - for data in trainloader: - for d in data: - print(d[0].shape) diff --git a/face_recognition1/face_feature/dataloader/megaface.py b/face_recognition1/face_feature/dataloader/megaface.py deleted file mode 100644 index 882a73774021e464fe9ae88b37690a9ec42b7fd7..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/dataloader/megaface.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: megaface.py -@desc: -''' - -import os -import numpy as np -import cv2 -import torch -import torch.utils.data as data -import torchvision.transforms as transforms - - -def img_loader(path): - try: - img = cv2.imread(path) - if len(img.shape) == 2: - img = np.stack([img] * 3, 2) - return img - except IOError: - print('Cannot load image ' + path) - return None - - -class MegaFace(data.Dataset): - def __init__(self, facescrub_dir, megaface_dir, transform=None, loader=img_loader): - super().__init__() - self.transform = transform - self.loader = loader - - test_image_file_list = [] - print('Scanning files under facescrub and megaface...') - for root, _, files in os.walk(facescrub_dir): - for e in files: - filename = os.path.join(root, e) - ext = os.path.splitext(filename)[1].lower() - if ext in ('.png', '.bmp', '.jpg', '.jpeg'): - test_image_file_list.append(filename) - for root, _, files in os.walk(megaface_dir): - for e in files: - filename = os.path.join(root, e) - ext = os.path.splitext(filename)[1].lower() - if ext in ('.png', '.bmp', '.jpg', '.jpeg'): - test_image_file_list.append(filename) - - self.image_list = test_image_file_list - - def __getitem__(self, index): - img_path = self.image_list[index] - img = self.loader(img_path) - - #水平翻转图像 - #img = cv2.flip(img, 1) - - if self.transform is not None: - img = self.transform(img) - else: - img = torch.from_numpy(img) - - return img, img_path - - def __len__(self): - return len(self.image_list) - - -if __name__ == '__main__': - facescrub = '/media/sda/megaface_test_kit/facescrub_align_112/' - megaface = '/media/sda/megaface_test_kit/megaface_align_112/' - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - dataset = MegaFace(facescrub, megaface, transform=transform) - trainloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=2, drop_last=False) - print(len(dataset)) - for data in trainloader: - print(data.shape) diff --git a/face_recognition1/face_feature/datasets/__init__.py b/face_recognition1/face_feature/datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_feature/datasets/prepare_dataset.py b/face_recognition1/face_feature/datasets/prepare_dataset.py deleted file mode 100644 index aae65b9054cb8616cc9917850492ddd73208a8e5..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/datasets/prepare_dataset.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -@author: MingDong -@file: prepare_dataset.py -@desc: merge the face align images (112x112) -""" -import sys - -import cv2 - -sys.path.append('../..') - -import os -import argparse -from tqdm import tqdm -from face_liveness.datasets.prepare_dataset import get_file_names -from face_detect.test import get_bbox -from face_pose.test import get_pose -from feature_api import align - -parser = argparse.ArgumentParser(description='split or merge') -parser.add_argument('--file_name', default='./glink360k/train.rec', help='source file name') -parser.add_argument('--start_no', type=int, default=1, help='start number for merge') -parser.add_argument('--end_no', type=int, default=34, help='end number for merge') -parser.add_argument('--remove', default=False, help='Flag for Remove') -parser.add_argument('--split_size', type=int, default=80000000, help='split file size') -parser.add_argument('--db_path', default='/datasets/public2/upload/faces_emore_images', help='source file name') -parser.add_argument('--label_file', default='/datasets/public2/upload/faces_emore/faces_emore.list', help='source file name') - -args = parser.parse_args() - - -def merge_files(args): - """ merge the split files in Azure """ - with open(args.file_name, 'ab') as f: - for i in range(args.start_no, args.end_no + 1): - fn = args.file_name + str(i) + '.rar' - with open(fn, 'rb') as chunk_file: - f.write(chunk_file.read()) - if args.remove: - os.remove(fn) - print(fn) - - print('ok') - - -def split_files(args): - file_number = 1 - with open(args.file_name, 'rb') as f: - chunk = f.read(args.split_size) - while chunk: - with open(args.file_name + str(file_number) + '.rar', 'wb') as chunk_file: - chunk_file.write(chunk) - file_number += 1 - chunk = f.read(args.split_size) - - print('ok') - - -def generate_train_label_file(args): - label_list = [] - file_list = get_file_names(args.db_path) - class_idx = -1 - dir_list = [] - for file_path in tqdm(file_list): - dirname = os.path.basename(os.path.dirname(file_path)) - if dirname not in dir_list: - dir_list.append(dirname) - class_idx += 1 - - label_list.append(f'{file_path} {class_idx}\n') - - with open(args.label_file, 'w') as f: - f.writelines(label_list) - - -def align_files(args): - """ align face images from the indian dataset and use it as the training dataset for feature extraction """ - file_list = get_file_names(args.db_path) - for path in tqdm(file_list): - image = cv2.imread(path) - - face_bbox = get_bbox(image) - if face_bbox is None: - continue - - yaw, pitch, roll = get_pose(image, face_bbox) - if abs(yaw.item()) > 25 or abs(pitch.item()) > 25 or abs(roll.item()) > 25: - continue - - face_image = align(image, output_size=(112, 112)) - dst_path = path.replace('indian_images', 'indian_align_images') - if not os.path.exists(os.path.dirname(dst_path)): - os.makedirs(os.path.dirname(dst_path)) - - if face_image is not None: - cv2.imwrite(dst_path, face_image) - - -def rename_umd(args): - """ rename folder name for umd dataset """ - folders = os.listdir(args.db_path) - for folder in folders: - os.rename(os.path.join(args.db_path, folder), f'{args.db_path}/umd{folder}') - - -if __name__ == '__main__': - # merge_files(args) - # split_files(args) - # align_files(args) - # generate_train_label_file(args) - rename_umd(args) diff --git a/face_recognition1/face_feature/eval_agedb30.py b/face_recognition1/face_feature/eval_agedb30.py deleted file mode 100644 index fa2e2aee57692d5c428b385c5a4c4b112623dbc4..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/eval_agedb30.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: eval_agedb30.py -@desc: The AgeDB-30 test protocol is same with LFW, so I just copy the code from eval_lfw.py -''' - - -import os -import argparse -import numpy as np -import scipy.io -import torch.utils.data -import torchvision.transforms as transforms -from torch.nn import DataParallel -from model import mobilefacenet, cbam -from dataloader.agedb import AgeDB30 - - -def getAccuracy(scores, flags, threshold): - p = np.sum(scores[flags == 1] > threshold) - n = np.sum(scores[flags == -1] < threshold) - return 1.0 * (p + n) / len(scores) - -def getThreshold(scores, flags, thrNum): - accuracys = np.zeros((2 * thrNum + 1, 1)) - thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum - for i in range(2 * thrNum + 1): - accuracys[i] = getAccuracy(scores, flags, thresholds[i]) - max_index = np.squeeze(accuracys == np.max(accuracys)) - bestThreshold = np.mean(thresholds[max_index]) - return bestThreshold - -def evaluation_10_fold(feature_path='./result/cur_epoch_agedb_result.mat'): - ACCs = np.zeros(10) - result = scipy.io.loadmat(feature_path) - for i in range(10): - fold = result['fold'] - flags = result['flag'] - featureLs = result['fl'] - featureRs = result['fr'] - - valFold = fold != i - testFold = fold == i - flags = np.squeeze(flags) - - mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) - mu = np.expand_dims(mu, 0) - featureLs = featureLs - mu - featureRs = featureRs - mu - featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) - featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) - - scores = np.sum(np.multiply(featureLs, featureRs), 1) - threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) - ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) - - return ACCs - -def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None): - - if backbone_net == 'MobileFace': - net = mobilefacenet.MobileFaceNet() - elif backbone_net == 'CBAM_50': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_50_SE': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') - elif backbone_net == 'CBAM_100': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_100_SE': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') - else: - print(backbone_net, ' is not available!') - - # gpu init - multi_gpus = False - if len(gpus.split(',')) > 1: - multi_gpus = True - os.environ['CUDA_VISIBLE_DEVICES'] = gpus - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - net.load_state_dict(torch.load(resume)['net_state_dict']) - - if multi_gpus: - net = DataParallel(net).to(device) - else: - net = net.to(device) - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - agedb_dataset = AgeDB30(data_root, file_list, transform=transform) - agedb_loader = torch.utils.data.DataLoader(agedb_dataset, batch_size=128, - shuffle=False, num_workers=2, drop_last=False) - - return net.eval(), device, agedb_dataset, agedb_loader - -def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): - featureLs = None - featureRs = None - count = 0 - for data in data_loader: - for _, i in enumerate(data): - data[i] = data[i].to(device) - count += data[0].size(0) - #print('extracing deep features from the face pair {}...'.format(count)) - with torch.no_grad(): - res = [net(d).data.cpu().numpy() for d in data] - featureL = np.concatenate((res[0], res[1]), 1) - featureR = np.concatenate((res[2], res[3]), 1) - # print(featureL.shape, featureR.shape) - if featureLs is None: - featureLs = featureL - else: - featureLs = np.concatenate((featureLs, featureL), 0) - if featureRs is None: - featureRs = featureR - else: - featureRs = np.concatenate((featureRs, featureR), 0) - # print(featureLs.shape, featureRs.shape) - - result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.flags} - scipy.io.savemat(feature_save_dir, result) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Testing') - parser.add_argument('--root', type=str, default='/media/sda/AgeDB-30/agedb30_align_112', help='The path of lfw data') - parser.add_argument('--file_list', type=str, default='/media/sda/AgeDB-30/agedb_30_pair.txt', help='The path of lfw data') - parser.add_argument('--resume', type=str, default='./model/SERES100_SERES100_IR_20190528_132635/Iter_342000_net.ckpt', help='The path pf save model') - parser.add_argument('--backbone_net', type=str, default='CBAM_100_SE', help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') - parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') - parser.add_argument('--feature_save_path', type=str, default='./result/cur_epoch_agedb_result.mat', - help='The path of the extract features save, must be .mat file') - parser.add_argument('--gpus', type=str, default='2,3', help='gpu list') - args = parser.parse_args() - - net, device, agedb_dataset, agedb_loader = loadModel(args.root, args.file_list, args.backbone_net, args.gpus, args.resume) - getFeatureFromTorch(args.feature_save_path, net, device, agedb_dataset, agedb_loader) - ACCs = evaluation_10_fold(args.feature_save_path) - for _, i in enumerate(ACCs): - print(f'{i + 1} {ACCs[i] * 100:.2f}') - print('--------') - print(f'AVE {np.mean(ACCs) * 100:.4f}') diff --git a/face_recognition1/face_feature/eval_cfp.py b/face_recognition1/face_feature/eval_cfp.py deleted file mode 100644 index d2434e64c307453449116f4d8cd6dc7fd6fb4479..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/eval_cfp.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -@author: MingDong -@file: eval_cfp.py -@desc: this code is very similar with eval_lfw.py and eval_agedb30.py -""" -import os -import argparse -import numpy as np -import scipy.io -import torch.utils.data -import torchvision.transforms as transforms -from torch.nn import DataParallel -from model import mobilefacenet, cbam -from dataloader.cfp import CFP_FP - - -def getAccuracy(scores, flags, threshold): - p = np.sum(scores[flags == 1] > threshold) - n = np.sum(scores[flags == -1] < threshold) - return 1.0 * (p + n) / len(scores) - - -def getThreshold(scores, flags, thrNum): - accuracys = np.zeros((2 * thrNum + 1, 1)) - thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum - for i in range(2 * thrNum + 1): - accuracys[i] = getAccuracy(scores, flags, thresholds[i]) - max_index = np.squeeze(accuracys == np.max(accuracys)) - bestThreshold = np.mean(thresholds[max_index]) - return bestThreshold - -def evaluation_10_fold(feature_path='./result/cur_epoch_cfp_result.mat'): - ACCs = np.zeros(10) - result = scipy.io.loadmat(feature_path) - for i in range(10): - fold = result['fold'] - flags = result['flag'] - featureLs = result['fl'] - featureRs = result['fr'] - - valFold = fold != i - testFold = fold == i - flags = np.squeeze(flags) - - mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) - mu = np.expand_dims(mu, 0) - featureLs = featureLs - mu - featureRs = featureRs - mu - featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) - featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) - - scores = np.sum(np.multiply(featureLs, featureRs), 1) - threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) - ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) - - return ACCs - -def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None): - - if backbone_net == 'MobileFace': - net = mobilefacenet.MobileFaceNet() - elif backbone_net == 'CBAM_50': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_50_SE': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') - elif backbone_net == 'CBAM_100': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_100_SE': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') - else: - print(backbone_net, ' is not available!') - - # gpu init - multi_gpus = False - if len(gpus.split(',')) > 1: - multi_gpus = True - os.environ['CUDA_VISIBLE_DEVICES'] = gpus - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - net.load_state_dict(torch.load(resume)['net_state_dict']) - - if multi_gpus: - net = DataParallel(net).to(device) - else: - net = net.to(device) - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - cfp_dataset = CFP_FP(data_root, file_list, transform=transform) - cfp_loader = torch.utils.data.DataLoader(cfp_dataset, batch_size=128, - shuffle=False, num_workers=4, drop_last=False) - - return net.eval(), device, cfp_dataset, cfp_loader - -def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): - featureLs = None - featureRs = None - count = 0 - for data in data_loader: - for _, i in enumerate(data): - data[i] = data[i].to(device) - count += data[0].size(0) - #print('extracing deep features from the face pair {}...'.format(count)) - with torch.no_grad(): - res = [net(d).data.cpu().numpy() for d in data] - featureL = np.concatenate((res[0], res[1]), 1) - featureR = np.concatenate((res[2], res[3]), 1) - # print(featureL.shape, featureR.shape) - if featureLs is None: - featureLs = featureL - else: - featureLs = np.concatenate((featureLs, featureL), 0) - if featureRs is None: - featureRs = featureR - else: - featureRs = np.concatenate((featureRs, featureR), 0) - # print(featureLs.shape, featureRs.shape) - - result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.flags} - scipy.io.savemat(feature_save_dir, result) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Testing') - parser.add_argument('--root', type=str, default='/media/sda/CFP-FP/cfp_fp_aligned_112', help='The path of lfw data') - parser.add_argument('--file_list', type=str, default='/media/sda/CFP-FP/cfp_fp_pair.txt', help='The path of lfw data') - parser.add_argument('--resume', type=str, default='./checkpoints/SERES100_SERES100_IR_20190528_132635/Iter_342000_net.ckpt', help='The path pf save checkpoints') - parser.add_argument('--backbone_net', type=str, default='CBAM_100_SE', help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') - parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') - parser.add_argument('--feature_save_path', type=str, default='./result/cur_epoch_cfp_result.mat', - help='The path of the extract features save, must be .mat file') - parser.add_argument('--gpus', type=str, default='2,3', help='gpu list') - args = parser.parse_args() - - net, device, agedb_dataset, agedb_loader = loadModel(args.root, args.file_list, args.backbone_net, args.gpus, args.resume) - getFeatureFromTorch(args.feature_save_path, net, device, agedb_dataset, agedb_loader) - ACCs = evaluation_10_fold(args.feature_save_path) - for _, i in enumerate(ACCs): - print(f'{i + 1} {ACCs[i] * 100:.2f}') - print('--------') - print(f'AVE {np.mean(ACCs) * 100:.4f}') diff --git a/face_recognition1/face_feature/eval_deepglint_merge.py b/face_recognition1/face_feature/eval_deepglint_merge.py deleted file mode 100644 index 26909bdc378b17ea57a28cbed4f718df55ae2c4e..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/eval_deepglint_merge.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -@author: MingDong -@file: eval_deepglint_merge.py.py -@desc: merge the feature of deepglint test data to one file. original deepglint feature is generated by the protocol of megaface. - -We use the same format as Megaface(http://megaface.cs.washington.edu) -except that we merge all files into a single binary file. - -for examples: - -when megaface: N * (512, 1) -while deepglint:(N, 512) - -""" -import os -import argparse -import struct -import numpy as np - -cv_type_to_dtype = { - 5: np.dtype('float32') -} - -dtype_to_cv_type = {v: k for k, v in cv_type_to_dtype.items()} - - -def write_mat(f, m): - """Write mat m to file f""" - if len(m.shape) == 1: - rows = m.shape[0] - cols = 1 - else: - rows, cols = m.shape - header = struct.pack('iiii', rows, cols, cols * 4, dtype_to_cv_type[m.dtype]) - f.write(header) - f.write(m.data) - - -def read_mat(f): - """ - Reads an OpenCV mat from the given file opened in binary mode - """ - rows, cols, stride, type_ = struct.unpack('iiii', f.read(4 * 4)) - mat = np.fromstring(f.read(rows * stride), dtype=cv_type_to_dtype[type_]) - return mat.reshape(rows, cols) - - -def load_mat(filename): - """ - Reads a OpenCV Mat from the given filename - """ - return read_mat(open(filename, 'rb')) - - -def save_mat(filename, m): - """Saves mat m to the given filename""" - return write_mat(open(filename, 'wb'), m) - - - -def main(args): - - deepglint_features = args.deepglint_features_path - # merge all features into one file - total_feature = [] - total_files = [] - for root, _, files in os.walk(deepglint_features): - for file in files: - filename = os.path.join(root, file) - ext = os.path.splitext(filename)[1] - ext = ext.lower() - if ext in ('.feat'): - total_files.append(filename) - - assert len(total_files) == 1862120 - total_files.sort() # important - - for _, i in enumerate(total_files): - filename = total_files[i] - tmp_feature = load_mat(filename) - # print(filename) - # print(tmp_feature.shape) - tmp_feature = tmp_feature.T - total_feature.append(tmp_feature) - print(i + 1, tmp_feature.shape) - # write_mat(feature_path_out, feature_fusion) - - print('total feature number: ', len(total_feature)) - total_feature = np.array(total_feature).squeeze() - print(total_feature.shape, total_feature.dtype, type(total_feature)) - save_mat('deepglint_test_feature.bin', total_feature) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--deepglint_features_path", type=str, default="/home/mingdong/deepglint/deepglint_feature_ir+ws/") - args = parser.parse_args() - - main(args) diff --git a/face_recognition1/face_feature/eval_lfw.py b/face_recognition1/face_feature/eval_lfw.py deleted file mode 100644 index 6ddac17d9a4fe768c8d845f3a96116a345fd8ba4..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/eval_lfw.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -@author: MingDong -@file: eval_lfw.py -@desc: -""" - -import os -import argparse -import numpy as np -import scipy.io -import onnxruntime as ort -import torch.utils.data -import torchvision.transforms as transforms -from torch.nn import DataParallel -from model import mobilefacenet, resnet, cbam -from dataloader.lfw import LFW, LFWDataset - - -def getAccuracy(scores, flags, threshold): - p = np.sum(scores[flags == 1] > threshold) - n = np.sum(scores[flags == -1] < threshold) - return 1.0 * (p + n) / len(scores) - -def getThreshold(scores, flags, thrNum): - accuracys = np.zeros((2 * thrNum + 1, 1)) - thresholds = np.arange(-thrNum, thrNum + 1) * 1.0 / thrNum - for i in range(2 * thrNum + 1): - accuracys[i] = getAccuracy(scores, flags, thresholds[i]) - max_index = np.squeeze(accuracys == np.max(accuracys)) - bestThreshold = np.mean(thresholds[max_index]) - return bestThreshold - -def evaluation_10_fold(feature_path='./result/cur_epoch_result.mat'): - ACCs = np.zeros(10) - result = scipy.io.loadmat(feature_path) - for i in range(10): - fold = result['fold'] - flags = result['flag'] - featureLs = result['fl'] - featureRs = result['fr'] - - valFold = fold != i - testFold = fold == i - flags = np.squeeze(flags) - - mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0) - mu = np.expand_dims(mu, 0) - featureLs = featureLs - mu - featureRs = featureRs - mu - featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1) - featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1) - - scores = np.sum(np.multiply(featureLs, featureRs), 1) - threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000) - ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold) - - return ACCs - - -def load_model(data_root, file_list, backbone_net, gpus='0', resume=None): - - if backbone_net == 'MobileFace': - net = mobilefacenet.MobileFaceNet() - elif backbone_net == 'Res50': - net = resnet.ResNet50() - elif backbone_net == 'CBAM_50': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_50_SE': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') - elif backbone_net == 'CBAM_100': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_100_SE': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') - else: - print(backbone_net, ' is not available!') - - # gpu init - multi_gpus = False - if len(gpus.split(',')) > 1: - multi_gpus = True - os.environ['CUDA_VISIBLE_DEVICES'] = gpus - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - net.load_state_dict(torch.load(resume)['net_state_dict']) - - if multi_gpus: - net = DataParallel(net).to(device) - else: - net = net.to(device) - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - lfw_dataset = LFW(data_root, file_list, transform=transform) - lfw_loader = torch.utils.data.DataLoader(lfw_dataset, batch_size=128, - shuffle=False, num_workers=2, drop_last=False) - - return net.eval(), device, lfw_dataset, lfw_loader - - -def load_onnx_model(data_root, file_list): - ort_session = ort.InferenceSession('checkpoints/resnet50_Quant.onnx') - - lfw_dataset = LFWDataset(data_root, file_list) - - return ort_session, lfw_dataset - - -def getFeatureFromTorch(feature_save_dir, net, device, data_set, data_loader): - featureLs = None - featureRs = None - count = 0 - for data in data_loader: - for i, _ in enumerate(data): - data[i] = data[i].to(device) - count += data[0].size(0) - #print('extracing deep features from the face pair {}...'.format(count)) - with torch.no_grad(): - res = [net(d).data.cpu().numpy() for d in data] - featureL = np.concatenate((res[0], res[1]), 1) - featureR = np.concatenate((res[2], res[3]), 1) - # print(featureL.shape, featureR.shape) - if featureLs is None: - featureLs = featureL - else: - featureLs = np.concatenate((featureLs, featureL), 0) - if featureRs is None: - featureRs = featureR - else: - featureRs = np.concatenate((featureRs, featureR), 0) - # print(featureLs.shape, featureRs.shape) - - result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.flags} - scipy.io.savemat(feature_save_dir, result) - - -def getFeatureFromOnnx(feature_save_dir, net, data_set): - featureLs = None - featureRs = None - count = 0 - - for data in data_set: - res = [] - for _, i in enumerate(data): - feat = net.run(None, {"input": data[i]}) - res.append(feat) - count += data[0].size(0) - - featureL = np.concatenate((res[0], res[1]), 1) - featureR = np.concatenate((res[2], res[3]), 1) - # print(featureL.shape, featureR.shape) - if featureLs is None: - featureLs = featureL - else: - featureLs = np.concatenate((featureLs, featureL), 0) - if featureRs is None: - featureRs = featureR - else: - featureRs = np.concatenate((featureRs, featureR), 0) - # print(featureLs.shape, featureRs.shape) - - result = {'fl': featureLs, 'fr': featureRs, 'fold': data_set.folds, 'flag': data_set.flags} - scipy.io.savemat(feature_save_dir, result) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Testing') - parser.add_argument('--root', type=str, default='/datasets/public1/upload/datasets/lfw', help='The path of lfw data') - parser.add_argument('--file_list', type=str, default='/datasets/public1/upload/datasets/lfw_pair.txt', help='The path of lfw data') - parser.add_argument('--backbone_net', type=str, default='Res50', help='MobileFace, Res50, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') - parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') - parser.add_argument('--resume', type=str, default='./checkpoints/Res50_RES50_20210711_091848/Iter_066000_net.ckpt', - help='The path pf save checkpoints') - parser.add_argument('--feature_save_path', type=str, default='./result/cur_epoch_lfw_result.mat', - help='The path of the extract features save, must be .mat file') - parser.add_argument('--gpus', type=str, default='0', help='gpu list') - args = parser.parse_args() - - # inference by torch - # net, device, lfw_dataset, lfw_loader = load_model(args.root, args.file_list, args.backbone_net, args.gpus, args.resume) - # getFeatureFromTorch(args.feature_save_path, net, device, lfw_dataset, lfw_loader) - # ACCs = evaluation_10_fold(args.feature_save_path) - - # inference by onnx - net, lfw_dataset = load_onnx_model(args.root, args.file_list) - getFeatureFromOnnx(args.feature_save_path, net, lfw_dataset) - ACCs = evaluation_10_fold(args.feature_save_path) - - for _, i in enumerate(ACCs): - print(f'{i + 1} {ACCs[i] * 100:.2f}') - print('--------') - print(f'AVE {np.mean(ACCs) * 100:.4f}') diff --git a/face_recognition1/face_feature/eval_lfw_blufr.py b/face_recognition1/face_feature/eval_lfw_blufr.py deleted file mode 100644 index fff2d7e73715e7e45d05a3e31a03b683d5c2a6f2..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/eval_lfw_blufr.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -@author: MingDong -@file: eval_lfw_blufr.py -@desc: test lfw accuracy on blufr protocol -""" - -import argparse -import scipy.io as sio - -def readName(file='pairs.txt'): - name_list = [] - f = open(file, 'r') - lines = f.readlines() - - for line in lines[1:]: - line_split = line.rstrip().split() - if len(line_split) == 3: - name_list.append(line_split[0]) - elif len(line_split) == 4: - name_list.append(line_split[0]) - name_list.append(line_split[2]) - else: - print('wrong file, please check again') - - return list(set(name_list)) - - -def main(args): - blufr_info = sio.loadmat(args.lfw_blufr_file) - #print(blufr_info) - name_list = readName() - - image = blufr_info['imageList'] - missing_files = [] - for i in range(image.shape[0]): - name = image[i][0][0] - index = name.rfind('_') - name = name[0:index] - if name not in name_list: - print(name) - missing_files.append(name) - print('lfw pairs.txt total persons: ', len(name_list)) - print('blufr_mat_missing persons: ', len(missing_files)) - - # Some of the missing file: - # Zdravko_Mucic - # Zelma_Novelo - # Zeng_Qinghong - # Zumrati_Juma - # lfw pairs.txt total persons: 4281 - # blufr_mat_missing persons: 1549 - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='lfw blufr test') - parser.add_argument('--lfw_blufr_file', type=str, default='./blufr_lfw_config.mat', help='feature dimension') - parser.add_argument('--lfw_pairs.txt', type=str, default='./pairs.txt', help='feature dimension') - parser.add_argument('--gpus', type=str, default='2,3', help='gpu list') - args = parser.parse_args() - - main(args) diff --git a/face_recognition1/face_feature/eval_megaface.py b/face_recognition1/face_feature/eval_megaface.py deleted file mode 100644 index c9f83f407b480d5c75bbcae443f2a8eb536a2182..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/eval_megaface.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: eval_megaface.py -@desc: megaface feature extractor -''' -import os -import argparse -import struct -import numpy as np -import torch.utils.data -import torchvision.transforms as transforms -from torch.nn import DataParallel -from model import mobilefacenet, cbam -from dataloader.megaface import MegaFace - -cv_type_to_dtype = {5: np.dtype('float32'), 6: np.dtype('float64')} -dtype_to_cv_type = {v: k for k, v in cv_type_to_dtype.items()} - - -def write_mat(filename, m): - """Write mat m to file f""" - if len(m.shape) == 1: - rows = m.shape[0] - cols = 1 - else: - rows, cols = m.shape - header = struct.pack('iiii', rows, cols, cols * 4, dtype_to_cv_type[m.dtype]) - - with open(filename, 'wb') as outfile: - outfile.write(header) - outfile.write(m.data) - - -def read_mat(filename): - """ - Reads an OpenCV mat from the given file opened in binary mode - """ - with open(filename, 'rb') as fin: - rows, cols, stride, type_ = struct.unpack('iiii', fin.read(4 * 4)) - mat = np.fromstring(str(fin.read(rows * stride)), dtype=cv_type_to_dtype[type_]) - return mat.reshape(rows, cols) - - -def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=32, gpus='0', do_norm=False): - - if backbone_net == 'MobileFace': - net = mobilefacenet.MobileFaceNet() - elif backbone_net == 'CBAM_50': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_50_SE': - net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') - elif backbone_net == 'CBAM_100': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') - elif backbone_net == 'CBAM_100_SE': - net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') - else: - print(args.backbone, ' is not available!') - - multi_gpus = False - if len(gpus.split(',')) > 1: - multi_gpus = True - os.environ['CUDA_VISIBLE_DEVICES'] = gpus - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - net.load_state_dict(torch.load(model_path)['net_state_dict']) - if multi_gpus: - net = DataParallel(net).to(device) - else: - net = net.to(device) - net.eval() - - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - megaface_dataset = MegaFace(face_scrub_path, megaface_path, transform=transform) - megaface_loader = torch.utils.data.DataLoader(megaface_dataset, batch_size=batch_size, - shuffle=False, num_workers=12, drop_last=False) - - for data in megaface_loader: - img, img_path= data[0].to(device), data[1] - with torch.no_grad(): - output = net(img).data.cpu().numpy() - - if do_norm is False: - for _, i in enumerate(img_path): - abs_path = img_path[i] + '.feat' - write_mat(abs_path, output[i]) - print('extract 1 batch...without feature normalization') - else: - for _, i in enumerate(img_path): - abs_path = img_path[i] + '.feat' - feat = output[i] - feat = feat / np.sqrt((np.dot(feat, feat))) - write_mat(abs_path, feat) - print('extract 1 batch...with feature normalization') - print('all images have been processed!') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Testing') - parser.add_argument('--model_path', type=str, default='./checkpoints/RES100_RES100_IR_20190423_100728/Iter_333000_net.ckpt', help='The path of trained checkpoints') - parser.add_argument('--backbone_net', type=str, default='CBAM_100', help='MobileFace, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') - parser.add_argument('--facescrub_dir', type=str, default='/media/sda/megaface_test_kit/facescrub_align_112/', help='facescrub data') - parser.add_argument('--megaface_dir', type=str, default='/media/sda/megaface_test_kit/megaface_align_112/', help='megaface data') - parser.add_argument('--batch_size', type=int, default=1024, help='batch size') - parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') - parser.add_argument('--gpus', type=str, default='0,1,2,3', help='gpu list') - parser.add_argument("--do_norm", type=int, default=1, help="1 if normalize feature, 0 do nothing(Default case)") - args = parser.parse_args() - - extract_feature(args.model_path, args.backbone_net, args.facescrub_dir, args.megaface_dir, args.batch_size, args.gpus, args.do_norm) diff --git a/face_recognition1/face_feature/log.log b/face_recognition1/face_feature/log.log deleted file mode 100644 index f1e7798cc12b265e53ad3f471fa0ebea0adecec2..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/log.log +++ /dev/null @@ -1,8614 +0,0 @@ -20220630-16:38:42 Setting up a new session... -20220630-16:38:42 Visdom successfully connected to server -20220630-16:38:42 Train Epoch: 1/18 ... -20220630-16:40:49 Iters: 000100/[01], loss: 20.0625, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-16:42:56 Iters: 000200/[01], loss: 19.2577, train_accuracy: 0.0000, time: 1.27 s/iter, learning rate: 0.05 -20220630-16:45:05 Iters: 000300/[01], loss: 19.5273, train_accuracy: 0.0000, time: 1.29 s/iter, learning rate: 0.05 -20220630-16:47:11 Iters: 000400/[01], loss: 20.2498, train_accuracy: 0.0000, time: 1.27 s/iter, learning rate: 0.05 -20220630-16:49:17 Iters: 000500/[01], loss: 20.0103, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-16:51:23 Iters: 000600/[01], loss: 20.6090, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-16:53:27 Iters: 000700/[01], loss: 20.1846, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220630-16:55:32 Iters: 000800/[01], loss: 20.5324, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-16:57:37 Iters: 000900/[01], loss: 20.7694, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-16:59:44 Iters: 001000/[01], loss: 19.8633, train_accuracy: 0.0078, time: 1.27 s/iter, learning rate: 0.05 -20220630-17:01:47 Iters: 001100/[01], loss: 19.4772, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220630-17:03:51 Iters: 001200/[01], loss: 20.0557, train_accuracy: 0.0000, time: 1.24 s/iter, learning rate: 0.05 -20220630-17:05:59 Iters: 001300/[01], loss: 19.5078, train_accuracy: 0.0000, time: 1.27 s/iter, learning rate: 0.05 -20220630-17:08:05 Iters: 001400/[01], loss: 19.1199, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:10:11 Iters: 001500/[01], loss: 18.8668, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:12:17 Iters: 001600/[01], loss: 17.4952, train_accuracy: 0.0000, time: 1.27 s/iter, learning rate: 0.05 -20220630-17:14:23 Iters: 001700/[01], loss: 18.5381, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:16:29 Iters: 001800/[01], loss: 17.8031, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:18:35 Iters: 001900/[01], loss: 17.3867, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:20:41 Iters: 002000/[01], loss: 16.7205, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:22:48 Iters: 002100/[01], loss: 16.1001, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:24:54 Iters: 002200/[01], loss: 16.4960, train_accuracy: 0.0000, time: 1.27 s/iter, learning rate: 0.05 -20220630-17:26:59 Iters: 002300/[01], loss: 16.0385, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-17:29:05 Iters: 002400/[01], loss: 16.1072, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:31:08 Iters: 002500/[01], loss: 16.1451, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220630-17:33:13 Iters: 002600/[01], loss: 15.9080, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-17:35:17 Iters: 002700/[01], loss: 15.9791, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-17:37:20 Iters: 002800/[01], loss: 15.5529, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220630-17:39:27 Iters: 002900/[01], loss: 13.3363, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:41:31 Iters: 003000/[01], loss: 13.8127, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-17:43:37 Iters: 003100/[01], loss: 13.8907, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:45:44 Iters: 003200/[01], loss: 14.3198, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:47:50 Iters: 003300/[01], loss: 13.3309, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:49:56 Iters: 003400/[01], loss: 13.3648, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:52:02 Iters: 003500/[01], loss: 13.2147, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:54:08 Iters: 003600/[01], loss: 14.2585, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-17:56:15 Iters: 003700/[01], loss: 13.3423, train_accuracy: 0.0234, time: 1.27 s/iter, learning rate: 0.05 -20220630-17:58:21 Iters: 003800/[01], loss: 13.5289, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:00:27 Iters: 003900/[01], loss: 12.6478, train_accuracy: 0.0234, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:02:33 Iters: 004000/[01], loss: 12.4929, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:04:39 Iters: 004100/[01], loss: 12.5916, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:06:45 Iters: 004200/[01], loss: 12.7267, train_accuracy: 0.0234, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:08:50 Iters: 004300/[01], loss: 13.3752, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220630-18:10:56 Iters: 004400/[01], loss: 11.8711, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:13:02 Iters: 004500/[01], loss: 12.3482, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:15:08 Iters: 004600/[01], loss: 13.1396, train_accuracy: 0.0078, time: 1.27 s/iter, learning rate: 0.05 -20220630-18:17:14 Iters: 004700/[01], loss: 12.7013, train_accuracy: 0.0234, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:19:20 Iters: 004800/[01], loss: 12.2291, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:21:25 Iters: 004900/[01], loss: 12.5359, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:23:32 Iters: 005000/[01], loss: 12.5428, train_accuracy: 0.0078, time: 1.27 s/iter, learning rate: 0.05 -20220630-18:25:38 Iters: 005100/[01], loss: 13.0179, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:27:42 Iters: 005200/[01], loss: 12.0935, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-18:29:49 Iters: 005300/[01], loss: 11.5881, train_accuracy: 0.0234, time: 1.27 s/iter, learning rate: 0.05 -20220630-18:31:53 Iters: 005400/[01], loss: 12.4582, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-18:34:00 Iters: 005500/[01], loss: 11.6767, train_accuracy: 0.0469, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:36:05 Iters: 005600/[01], loss: 12.7326, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-18:38:10 Iters: 005700/[01], loss: 12.9674, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:40:16 Iters: 005800/[01], loss: 11.7937, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-18:42:20 Iters: 005900/[01], loss: 12.8238, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-18:44:26 Iters: 006000/[01], loss: 11.8075, train_accuracy: 0.0391, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:46:32 Iters: 006100/[01], loss: 12.5232, train_accuracy: 0.0234, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:48:38 Iters: 006200/[01], loss: 11.9104, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:50:44 Iters: 006300/[01], loss: 11.8216, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:52:50 Iters: 006400/[01], loss: 11.9315, train_accuracy: 0.0234, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:54:54 Iters: 006500/[01], loss: 11.9345, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-18:56:59 Iters: 006600/[01], loss: 12.2135, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-18:59:06 Iters: 006700/[01], loss: 11.7133, train_accuracy: 0.0156, time: 1.27 s/iter, learning rate: 0.05 -20220630-19:01:12 Iters: 006800/[01], loss: 11.9283, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:03:17 Iters: 006900/[01], loss: 11.7598, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:05:23 Iters: 007000/[01], loss: 11.8255, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:07:28 Iters: 007100/[01], loss: 11.9760, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:09:34 Iters: 007200/[01], loss: 12.3674, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:11:39 Iters: 007300/[01], loss: 11.9335, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:13:43 Iters: 007400/[01], loss: 11.2701, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220630-19:15:49 Iters: 007500/[01], loss: 11.3916, train_accuracy: 0.0391, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:17:53 Iters: 007600/[01], loss: 11.9061, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-19:19:58 Iters: 007700/[01], loss: 11.1933, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:22:04 Iters: 007800/[01], loss: 11.1660, train_accuracy: 0.0469, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:24:10 Iters: 007900/[01], loss: 11.8091, train_accuracy: 0.0547, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:26:15 Iters: 008000/[01], loss: 11.6959, train_accuracy: 0.0625, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:28:21 Iters: 008100/[01], loss: 11.1003, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:30:27 Iters: 008200/[01], loss: 12.0242, train_accuracy: 0.0234, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:32:32 Iters: 008300/[01], loss: 11.3920, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:34:38 Iters: 008400/[01], loss: 12.1156, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:36:44 Iters: 008500/[01], loss: 12.4508, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:38:49 Iters: 008600/[01], loss: 11.8956, train_accuracy: 0.0391, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:40:55 Iters: 008700/[01], loss: 12.1465, train_accuracy: 0.0000, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:42:59 Iters: 008800/[01], loss: 11.9722, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-19:45:06 Iters: 008900/[01], loss: 11.7257, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:47:10 Iters: 009000/[01], loss: 11.5601, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-19:49:15 Iters: 009100/[01], loss: 11.8287, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:51:20 Iters: 009200/[01], loss: 13.1837, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:53:26 Iters: 009300/[01], loss: 12.1132, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-19:55:31 Iters: 009400/[01], loss: 11.3624, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:57:37 Iters: 009500/[01], loss: 12.4750, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-19:59:43 Iters: 009600/[01], loss: 11.3134, train_accuracy: 0.0469, time: 1.26 s/iter, learning rate: 0.05 -20220630-20:01:48 Iters: 009700/[01], loss: 12.4367, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:03:53 Iters: 009800/[01], loss: 11.4048, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220630-20:05:59 Iters: 009900/[01], loss: 11.2492, train_accuracy: 0.0469, time: 1.26 s/iter, learning rate: 0.05 -20220630-20:08:04 Iters: 010000/[01], loss: 11.2687, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:08:04 Saving checkpoint: 10000 -20220630-20:09:20 LFW Ave Accuracy: 98.7165 -20220630-20:10:35 AgeDB-30 Ave Accuracy: 93.0167 -20220630-20:12:02 CFP-FP Ave Accuracy: 86.9000 -20220630-20:12:02 Current Best Accuracy: LFW: 98.7165 in iters: 10000, AgeDB-30: 93.0167 in iters: 10000 and CFP-FP: 86.9000 in iters: 10000 -20220630-20:14:07 Iters: 010100/[01], loss: 11.1374, train_accuracy: 0.0547, time: 3.63 s/iter, learning rate: 0.05 -20220630-20:16:12 Iters: 010200/[01], loss: 11.0682, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220630-20:18:17 Iters: 010300/[01], loss: 11.2882, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:20:23 Iters: 010400/[01], loss: 12.6716, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:22:28 Iters: 010500/[01], loss: 11.6022, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:24:33 Iters: 010600/[01], loss: 10.2224, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:26:39 Iters: 010700/[01], loss: 11.9506, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-20:28:44 Iters: 010800/[01], loss: 11.7347, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:30:49 Iters: 010900/[01], loss: 11.4881, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:32:54 Iters: 011000/[01], loss: 11.6399, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:35:00 Iters: 011100/[01], loss: 11.7817, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-20:37:04 Iters: 011200/[01], loss: 11.7797, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-20:39:09 Iters: 011300/[01], loss: 12.1715, train_accuracy: 0.0234, time: 1.26 s/iter, learning rate: 0.05 -20220630-20:41:14 Iters: 011400/[01], loss: 11.9413, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:43:19 Iters: 011500/[01], loss: 12.0871, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220630-20:45:24 Iters: 011600/[01], loss: 12.0167, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:47:29 Iters: 011700/[01], loss: 12.6158, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:49:32 Iters: 011800/[01], loss: 10.8940, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-20:51:37 Iters: 011900/[01], loss: 12.2561, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:53:41 Iters: 012000/[01], loss: 12.1594, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:55:46 Iters: 012100/[01], loss: 12.0740, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:57:51 Iters: 012200/[01], loss: 11.3215, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-20:59:54 Iters: 012300/[01], loss: 11.4316, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:02:00 Iters: 012400/[01], loss: 11.9833, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:04:04 Iters: 012500/[01], loss: 12.5649, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:06:08 Iters: 012600/[01], loss: 12.1056, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:08:13 Iters: 012700/[01], loss: 12.4893, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:10:18 Iters: 012800/[01], loss: 11.5958, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:12:23 Iters: 012900/[01], loss: 11.2983, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:14:28 Iters: 013000/[01], loss: 11.9740, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:16:34 Iters: 013100/[01], loss: 11.8278, train_accuracy: 0.0078, time: 1.26 s/iter, learning rate: 0.05 -20220630-21:18:38 Iters: 013200/[01], loss: 12.1489, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:20:43 Iters: 013300/[01], loss: 11.2506, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:22:47 Iters: 013400/[01], loss: 11.9923, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:24:50 Iters: 013500/[01], loss: 11.7996, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220630-21:26:55 Iters: 013600/[01], loss: 11.7998, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:28:59 Iters: 013700/[01], loss: 12.8082, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:31:04 Iters: 013800/[01], loss: 10.8490, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:33:09 Iters: 013900/[01], loss: 12.6718, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:35:13 Iters: 014000/[01], loss: 12.1663, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:37:17 Iters: 014100/[01], loss: 11.1713, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:39:22 Iters: 014200/[01], loss: 10.7771, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:41:26 Iters: 014300/[01], loss: 11.3951, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:43:31 Iters: 014400/[01], loss: 11.5433, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:45:36 Iters: 014500/[01], loss: 11.6415, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:47:41 Iters: 014600/[01], loss: 12.2698, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:49:46 Iters: 014700/[01], loss: 11.7846, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:51:50 Iters: 014800/[01], loss: 11.8545, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-21:53:55 Iters: 014900/[01], loss: 12.2453, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:55:59 Iters: 015000/[01], loss: 12.2990, train_accuracy: 0.0000, time: 1.25 s/iter, learning rate: 0.05 -20220630-21:58:04 Iters: 015100/[01], loss: 12.3534, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:00:08 Iters: 015200/[01], loss: 11.7781, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:02:13 Iters: 015300/[01], loss: 12.0465, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:04:18 Iters: 015400/[01], loss: 12.2721, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:06:22 Iters: 015500/[01], loss: 10.7219, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:08:25 Iters: 015600/[01], loss: 11.3623, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220630-22:10:31 Iters: 015700/[01], loss: 10.7974, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:12:34 Iters: 015800/[01], loss: 11.6881, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:14:39 Iters: 015900/[01], loss: 12.1142, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:16:44 Iters: 016000/[01], loss: 11.1876, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:18:48 Iters: 016100/[01], loss: 11.0110, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:20:53 Iters: 016200/[01], loss: 12.3639, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:22:57 Iters: 016300/[01], loss: 11.0680, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:25:02 Iters: 016400/[01], loss: 11.6412, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:27:07 Iters: 016500/[01], loss: 11.1368, train_accuracy: 0.0547, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:29:12 Iters: 016600/[01], loss: 11.5297, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:31:16 Iters: 016700/[01], loss: 12.7750, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:33:21 Iters: 016800/[01], loss: 11.4092, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:35:25 Iters: 016900/[01], loss: 12.1012, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:37:31 Iters: 017000/[01], loss: 11.6980, train_accuracy: 0.0312, time: 1.26 s/iter, learning rate: 0.05 -20220630-22:39:35 Iters: 017100/[01], loss: 11.4866, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:41:39 Iters: 017200/[01], loss: 11.2509, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:43:45 Iters: 017300/[01], loss: 11.7655, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:45:49 Iters: 017400/[01], loss: 12.3935, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:47:54 Iters: 017500/[01], loss: 11.3054, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:49:59 Iters: 017600/[01], loss: 12.3600, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:52:02 Iters: 017700/[01], loss: 12.1139, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220630-22:54:08 Iters: 017800/[01], loss: 11.8162, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:56:12 Iters: 017900/[01], loss: 11.0721, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-22:58:17 Iters: 018000/[01], loss: 10.6487, train_accuracy: 0.0703, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:00:21 Iters: 018100/[01], loss: 11.7918, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:02:25 Iters: 018200/[01], loss: 11.0600, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:04:31 Iters: 018300/[01], loss: 11.6251, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:06:35 Iters: 018400/[01], loss: 11.9639, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:08:40 Iters: 018500/[01], loss: 11.0882, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:10:44 Iters: 018600/[01], loss: 11.6982, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:12:48 Iters: 018700/[01], loss: 12.2908, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:14:53 Iters: 018800/[01], loss: 11.1374, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:16:58 Iters: 018900/[01], loss: 11.3076, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:19:02 Iters: 019000/[01], loss: 11.8681, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:21:06 Iters: 019100/[01], loss: 11.0559, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:23:09 Iters: 019200/[01], loss: 11.7909, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220630-23:25:15 Iters: 019300/[01], loss: 11.0974, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:27:18 Iters: 019400/[01], loss: 11.0624, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:29:23 Iters: 019500/[01], loss: 10.7418, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:31:27 Iters: 019600/[01], loss: 11.1713, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:33:31 Iters: 019700/[01], loss: 11.3269, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:35:36 Iters: 019800/[01], loss: 11.5654, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:37:40 Iters: 019900/[01], loss: 12.0226, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:39:44 Iters: 020000/[01], loss: 11.0354, train_accuracy: 0.0781, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:39:44 Saving checkpoint: 20000 -20220630-23:41:00 LFW Ave Accuracy: 99.0999 -20220630-23:42:15 AgeDB-30 Ave Accuracy: 93.4667 -20220630-23:43:41 CFP-FP Ave Accuracy: 87.1286 -20220630-23:43:41 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.4667 in iters: 20000 and CFP-FP: 87.1286 in iters: 20000 -20220630-23:45:45 Iters: 020100/[01], loss: 11.5514, train_accuracy: 0.0469, time: 3.61 s/iter, learning rate: 0.05 -20220630-23:47:50 Iters: 020200/[01], loss: 11.5414, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220630-23:49:53 Iters: 020300/[01], loss: 10.8997, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220630-23:51:57 Iters: 020400/[01], loss: 10.9668, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:54:01 Iters: 020500/[01], loss: 12.0623, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:56:05 Iters: 020600/[01], loss: 11.0889, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220630-23:58:09 Iters: 020700/[01], loss: 12.2413, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:00:13 Iters: 020800/[01], loss: 11.5123, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:02:17 Iters: 020900/[01], loss: 11.6141, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:04:22 Iters: 021000/[01], loss: 11.0537, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220701-00:06:25 Iters: 021100/[01], loss: 12.3202, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:08:30 Iters: 021200/[01], loss: 11.1309, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:10:34 Iters: 021300/[01], loss: 11.8202, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:12:39 Iters: 021400/[01], loss: 11.0946, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:14:42 Iters: 021500/[01], loss: 11.6951, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:16:47 Iters: 021600/[01], loss: 11.8300, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:18:51 Iters: 021700/[01], loss: 11.1428, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:20:55 Iters: 021800/[01], loss: 11.9226, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:22:59 Iters: 021900/[01], loss: 11.4334, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:25:04 Iters: 022000/[01], loss: 11.2505, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-00:27:08 Iters: 022100/[01], loss: 11.5022, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:29:12 Iters: 022200/[01], loss: 11.4580, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:31:15 Iters: 022300/[01], loss: 10.9963, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:33:20 Iters: 022400/[01], loss: 12.5162, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:35:24 Iters: 022500/[01], loss: 11.8478, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:37:28 Iters: 022600/[01], loss: 10.8928, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:39:32 Iters: 022700/[01], loss: 11.7991, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:41:37 Iters: 022800/[01], loss: 11.4535, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-00:43:42 Iters: 022900/[01], loss: 11.4141, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-00:45:45 Iters: 023000/[01], loss: 11.9053, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:47:50 Iters: 023100/[01], loss: 11.4387, train_accuracy: 0.0781, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:49:53 Iters: 023200/[01], loss: 11.5925, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-00:51:57 Iters: 023300/[01], loss: 10.9169, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:54:02 Iters: 023400/[01], loss: 11.0074, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220701-00:56:06 Iters: 023500/[01], loss: 11.6325, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-00:58:10 Iters: 023600/[01], loss: 11.3721, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:00:14 Iters: 023700/[01], loss: 11.4161, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-01:02:17 Iters: 023800/[01], loss: 11.4110, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-01:04:20 Iters: 023900/[01], loss: 11.1508, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-01:06:24 Iters: 024000/[01], loss: 11.5563, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:08:28 Iters: 024100/[01], loss: 11.6082, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:10:33 Iters: 024200/[01], loss: 11.6187, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220701-01:12:38 Iters: 024300/[01], loss: 11.3500, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:14:42 Iters: 024400/[01], loss: 11.6109, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:16:46 Iters: 024500/[01], loss: 11.7305, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:18:49 Iters: 024600/[01], loss: 11.6310, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-01:20:53 Iters: 024700/[01], loss: 11.2771, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:22:58 Iters: 024800/[01], loss: 11.5567, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220701-01:25:03 Iters: 024900/[01], loss: 11.3558, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-01:27:07 Iters: 025000/[01], loss: 11.2803, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:29:12 Iters: 025100/[01], loss: 10.7225, train_accuracy: 0.0547, time: 1.25 s/iter, learning rate: 0.05 -20220701-01:31:17 Iters: 025200/[01], loss: 12.2035, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-01:33:21 Iters: 025300/[01], loss: 11.3065, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:35:25 Iters: 025400/[01], loss: 12.0750, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:37:29 Iters: 025500/[01], loss: 11.8207, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:39:33 Iters: 025600/[01], loss: 11.7863, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:41:37 Iters: 025700/[01], loss: 11.8052, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:43:42 Iters: 025800/[01], loss: 11.6967, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-01:45:46 Iters: 025900/[01], loss: 11.0112, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:47:50 Iters: 026000/[01], loss: 11.3416, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:49:53 Iters: 026100/[01], loss: 10.8171, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-01:51:57 Iters: 026200/[01], loss: 11.8251, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:54:00 Iters: 026300/[01], loss: 11.8820, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:56:04 Iters: 026400/[01], loss: 10.9085, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-01:58:08 Iters: 026500/[01], loss: 11.5130, train_accuracy: 0.0000, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:00:12 Iters: 026600/[01], loss: 11.3758, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:02:17 Iters: 026700/[01], loss: 12.2802, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:04:21 Iters: 026800/[01], loss: 12.3203, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:06:25 Iters: 026900/[01], loss: 10.6449, train_accuracy: 0.0859, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:08:29 Iters: 027000/[01], loss: 12.0619, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:10:33 Iters: 027100/[01], loss: 11.4207, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:12:38 Iters: 027200/[01], loss: 13.1299, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:14:43 Iters: 027300/[01], loss: 10.8236, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:16:46 Iters: 027400/[01], loss: 11.8329, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:18:51 Iters: 027500/[01], loss: 10.4999, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:20:55 Iters: 027600/[01], loss: 11.5405, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:22:59 Iters: 027700/[01], loss: 10.9165, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:25:03 Iters: 027800/[01], loss: 11.8060, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:27:06 Iters: 027900/[01], loss: 12.3301, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:29:11 Iters: 028000/[01], loss: 11.5623, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:31:15 Iters: 028100/[01], loss: 11.5636, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:33:18 Iters: 028200/[01], loss: 11.4913, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-02:35:23 Iters: 028300/[01], loss: 11.1452, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:37:28 Iters: 028400/[01], loss: 10.8106, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:39:32 Iters: 028500/[01], loss: 11.5544, train_accuracy: 0.0625, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:41:36 Iters: 028600/[01], loss: 10.8337, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:43:40 Iters: 028700/[01], loss: 11.3281, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:45:44 Iters: 028800/[01], loss: 11.6489, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:47:48 Iters: 028900/[01], loss: 10.9198, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:49:52 Iters: 029000/[01], loss: 11.2658, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-02:51:57 Iters: 029100/[01], loss: 12.0812, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:54:02 Iters: 029200/[01], loss: 11.4445, train_accuracy: 0.0547, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:56:07 Iters: 029300/[01], loss: 11.4030, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-02:58:11 Iters: 029400/[01], loss: 11.1770, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:00:16 Iters: 029500/[01], loss: 12.2362, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-03:02:19 Iters: 029600/[01], loss: 11.6468, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:04:24 Iters: 029700/[01], loss: 11.4783, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:06:28 Iters: 029800/[01], loss: 10.6760, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:08:32 Iters: 029900/[01], loss: 11.2065, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220701-03:10:37 Iters: 030000/[01], loss: 11.3240, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-03:10:37 Saving checkpoint: 30000 -20220701-03:11:57 LFW Ave Accuracy: 99.0164 -20220701-03:13:12 AgeDB-30 Ave Accuracy: 92.6500 -20220701-03:14:38 CFP-FP Ave Accuracy: 88.3000 -20220701-03:14:38 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.4667 in iters: 20000 and CFP-FP: 88.3000 in iters: 30000 -20220701-03:16:42 Iters: 030100/[01], loss: 11.6138, train_accuracy: 0.0469, time: 3.65 s/iter, learning rate: 0.05 -20220701-03:18:46 Iters: 030200/[01], loss: 11.6062, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:20:50 Iters: 030300/[01], loss: 11.6169, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:22:54 Iters: 030400/[01], loss: 11.6023, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-03:25:00 Iters: 030500/[01], loss: 11.2769, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-03:27:03 Iters: 030600/[01], loss: 11.5895, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-03:29:07 Iters: 030700/[01], loss: 12.0915, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-03:31:11 Iters: 030800/[01], loss: 11.0746, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:33:15 Iters: 030900/[01], loss: 11.8210, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:35:18 Iters: 031000/[01], loss: 11.7391, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-03:37:22 Iters: 031100/[01], loss: 11.4027, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:39:26 Iters: 031200/[01], loss: 11.0473, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:41:30 Iters: 031300/[01], loss: 11.8716, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:43:34 Iters: 031400/[01], loss: 11.0186, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:45:38 Iters: 031500/[01], loss: 11.5562, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220701-03:47:43 Iters: 031600/[01], loss: 12.2051, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:49:46 Iters: 031700/[01], loss: 11.8673, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:51:50 Iters: 031800/[01], loss: 11.8609, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-03:53:54 Iters: 031900/[01], loss: 10.9192, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:55:58 Iters: 032000/[01], loss: 11.1613, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-03:58:02 Iters: 032100/[01], loss: 11.9953, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-04:00:06 Iters: 032200/[01], loss: 10.9818, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-04:02:11 Iters: 032300/[01], loss: 11.6221, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-04:04:15 Iters: 032400/[01], loss: 11.7292, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:06:19 Iters: 032500/[01], loss: 11.6653, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:08:23 Iters: 032600/[01], loss: 11.3497, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:10:27 Iters: 032700/[01], loss: 10.8980, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:12:31 Iters: 032800/[01], loss: 11.6778, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:14:34 Iters: 032900/[01], loss: 11.0522, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-04:16:38 Iters: 033000/[01], loss: 12.1940, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:18:42 Iters: 033100/[01], loss: 12.1898, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:20:47 Iters: 033200/[01], loss: 11.9182, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-04:22:50 Iters: 033300/[01], loss: 11.4907, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:24:54 Iters: 033400/[01], loss: 10.7558, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:26:57 Iters: 033500/[01], loss: 11.2069, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-04:29:02 Iters: 033600/[01], loss: 11.1885, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:31:05 Iters: 033700/[01], loss: 10.9452, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-04:33:08 Iters: 033800/[01], loss: 11.0576, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-04:35:12 Iters: 033900/[01], loss: 12.2320, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:37:16 Iters: 034000/[01], loss: 11.3396, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:39:20 Iters: 034100/[01], loss: 11.4776, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:41:24 Iters: 034200/[01], loss: 12.0859, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:43:27 Iters: 034300/[01], loss: 11.3382, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:45:31 Iters: 034400/[01], loss: 12.0628, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:47:35 Iters: 034500/[01], loss: 11.5695, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:49:40 Iters: 034600/[01], loss: 11.8723, train_accuracy: 0.0000, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:51:44 Iters: 034700/[01], loss: 11.1827, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220701-04:53:49 Iters: 034800/[01], loss: 11.1093, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:55:52 Iters: 034900/[01], loss: 10.9200, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-04:57:56 Iters: 035000/[01], loss: 10.4321, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-05:00:00 Iters: 035100/[01], loss: 11.8893, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:02:04 Iters: 035200/[01], loss: 11.2568, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220701-05:04:09 Iters: 035300/[01], loss: 10.4308, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:06:13 Iters: 035400/[01], loss: 10.9350, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:08:17 Iters: 035500/[01], loss: 11.4569, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:10:21 Iters: 035600/[01], loss: 11.2916, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:12:26 Iters: 035700/[01], loss: 11.2180, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:14:29 Iters: 035800/[01], loss: 11.9831, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:16:34 Iters: 035900/[01], loss: 11.0056, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-05:18:38 Iters: 036000/[01], loss: 10.7409, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:20:42 Iters: 036100/[01], loss: 11.7702, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:22:45 Iters: 036200/[01], loss: 12.1758, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:24:49 Iters: 036300/[01], loss: 10.9207, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-05:26:53 Iters: 036400/[01], loss: 11.2893, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:28:56 Iters: 036500/[01], loss: 11.6541, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:31:01 Iters: 036600/[01], loss: 10.3341, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-05:33:06 Iters: 036700/[01], loss: 11.8912, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:35:10 Iters: 036800/[01], loss: 11.6473, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:37:14 Iters: 036900/[01], loss: 11.3900, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:39:18 Iters: 037000/[01], loss: 12.0032, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:41:22 Iters: 037100/[01], loss: 11.5540, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:43:25 Iters: 037200/[01], loss: 11.7384, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:45:30 Iters: 037300/[01], loss: 11.3316, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:47:33 Iters: 037400/[01], loss: 11.1057, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:49:38 Iters: 037500/[01], loss: 11.9859, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:51:42 Iters: 037600/[01], loss: 10.7516, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:53:45 Iters: 037700/[01], loss: 10.9528, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-05:55:50 Iters: 037800/[01], loss: 11.8421, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-05:57:54 Iters: 037900/[01], loss: 11.8987, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-05:59:58 Iters: 038000/[01], loss: 10.8755, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:02:02 Iters: 038100/[01], loss: 10.9241, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:04:06 Iters: 038200/[01], loss: 11.6272, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:06:10 Iters: 038300/[01], loss: 11.2140, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:08:13 Iters: 038400/[01], loss: 11.5150, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-06:10:17 Iters: 038500/[01], loss: 11.7515, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:12:22 Iters: 038600/[01], loss: 11.9528, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-06:14:26 Iters: 038700/[01], loss: 12.2517, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:16:30 Iters: 038800/[01], loss: 11.5458, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:18:34 Iters: 038900/[01], loss: 10.9502, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:20:38 Iters: 039000/[01], loss: 11.1686, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-06:22:41 Iters: 039100/[01], loss: 11.1516, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-06:24:44 Iters: 039200/[01], loss: 11.4633, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:26:49 Iters: 039300/[01], loss: 11.4060, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:28:53 Iters: 039400/[01], loss: 10.8091, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:30:57 Iters: 039500/[01], loss: 11.4708, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:33:02 Iters: 039600/[01], loss: 11.1045, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:35:05 Iters: 039700/[01], loss: 11.4680, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-06:37:08 Iters: 039800/[01], loss: 10.8580, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-06:39:13 Iters: 039900/[01], loss: 11.8548, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:41:17 Iters: 040000/[01], loss: 11.0208, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-06:41:17 Saving checkpoint: 40000 -20220701-06:42:34 LFW Ave Accuracy: 98.8499 -20220701-06:43:50 AgeDB-30 Ave Accuracy: 93.3333 -20220701-06:45:17 CFP-FP Ave Accuracy: 88.2143 -20220701-06:45:17 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.4667 in iters: 20000 and CFP-FP: 88.3000 in iters: 30000 -20220701-06:47:21 Iters: 040100/[01], loss: 10.6595, train_accuracy: 0.0312, time: 3.63 s/iter, learning rate: 0.05 -20220701-06:49:24 Iters: 040200/[01], loss: 11.7789, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:51:29 Iters: 040300/[01], loss: 12.0041, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:53:32 Iters: 040400/[01], loss: 11.2319, train_accuracy: 0.0859, time: 1.23 s/iter, learning rate: 0.05 -20220701-06:55:36 Iters: 040500/[01], loss: 11.2636, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:57:40 Iters: 040600/[01], loss: 11.5869, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-06:59:44 Iters: 040700/[01], loss: 11.7784, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:01:48 Iters: 040800/[01], loss: 11.2890, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:03:51 Iters: 040900/[01], loss: 12.1085, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:05:55 Iters: 041000/[01], loss: 11.3644, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:07:59 Iters: 041100/[01], loss: 10.6851, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:10:04 Iters: 041200/[01], loss: 11.7198, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:12:07 Iters: 041300/[01], loss: 11.2463, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:14:12 Iters: 041400/[01], loss: 12.1332, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:16:16 Iters: 041500/[01], loss: 10.6909, train_accuracy: 0.0781, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:18:21 Iters: 041600/[01], loss: 11.4365, train_accuracy: 0.0312, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:20:25 Iters: 041700/[01], loss: 11.0170, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-07:22:29 Iters: 041800/[01], loss: 11.9191, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:24:34 Iters: 041900/[01], loss: 11.9884, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:26:39 Iters: 042000/[01], loss: 11.2622, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:28:44 Iters: 042100/[01], loss: 11.0805, train_accuracy: 0.0156, time: 1.26 s/iter, learning rate: 0.05 -20220701-07:30:48 Iters: 042200/[01], loss: 12.2300, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:32:53 Iters: 042300/[01], loss: 11.2316, train_accuracy: 0.0547, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:34:57 Iters: 042400/[01], loss: 10.8216, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:37:01 Iters: 042500/[01], loss: 11.2542, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:39:05 Iters: 042600/[01], loss: 10.7266, train_accuracy: 0.0781, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:41:09 Iters: 042700/[01], loss: 11.1635, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:43:14 Iters: 042800/[01], loss: 11.2245, train_accuracy: 0.0469, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:45:18 Iters: 042900/[01], loss: 11.1957, train_accuracy: 0.0547, time: 1.25 s/iter, learning rate: 0.05 -20220701-07:47:23 Iters: 043000/[01], loss: 11.9149, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:49:27 Iters: 043100/[01], loss: 11.4861, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:51:30 Iters: 043200/[01], loss: 10.4382, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-07:53:34 Iters: 043300/[01], loss: 11.6306, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:55:37 Iters: 043400/[01], loss: 11.3022, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:57:41 Iters: 043500/[01], loss: 11.1832, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-07:59:46 Iters: 043600/[01], loss: 11.3687, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-08:01:50 Iters: 043700/[01], loss: 11.2268, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:03:54 Iters: 043800/[01], loss: 12.1766, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:05:59 Iters: 043900/[01], loss: 11.4827, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-08:08:03 Iters: 044000/[01], loss: 11.3685, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:10:07 Iters: 044100/[01], loss: 11.6365, train_accuracy: 0.0234, time: 1.25 s/iter, learning rate: 0.05 -20220701-08:12:11 Iters: 044200/[01], loss: 10.5997, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:14:15 Iters: 044300/[01], loss: 10.4330, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:16:18 Iters: 044400/[01], loss: 11.3721, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:18:22 Iters: 044500/[01], loss: 11.8784, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:20:27 Iters: 044600/[01], loss: 10.9594, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-08:22:32 Iters: 044700/[01], loss: 10.9820, train_accuracy: 0.0391, time: 1.25 s/iter, learning rate: 0.05 -20220701-08:24:36 Iters: 044800/[01], loss: 11.6877, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:26:40 Iters: 044900/[01], loss: 11.7713, train_accuracy: 0.0078, time: 1.25 s/iter, learning rate: 0.05 -20220701-08:28:45 Iters: 045000/[01], loss: 11.3359, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:30:48 Iters: 045100/[01], loss: 11.4180, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:32:53 Iters: 045200/[01], loss: 11.3603, train_accuracy: 0.0156, time: 1.25 s/iter, learning rate: 0.05 -20220701-08:34:57 Iters: 045300/[01], loss: 11.3593, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:37:02 Iters: 045400/[01], loss: 11.0523, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220701-08:38:53 Train Epoch: 2/18 ... -20220701-08:39:06 Iters: 045500/[02], loss: 11.3777, train_accuracy: 0.0547, time: 0.13 s/iter, learning rate: 0.05 -20220701-08:41:09 Iters: 045600/[02], loss: 10.8130, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:43:12 Iters: 045700/[02], loss: 11.3874, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:45:15 Iters: 045800/[02], loss: 10.9940, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:47:18 Iters: 045900/[02], loss: 11.7009, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:49:22 Iters: 046000/[02], loss: 10.9632, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:51:25 Iters: 046100/[02], loss: 11.3342, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:53:28 Iters: 046200/[02], loss: 11.6632, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:55:31 Iters: 046300/[02], loss: 11.2327, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:57:34 Iters: 046400/[02], loss: 10.7878, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-08:59:38 Iters: 046500/[02], loss: 11.2125, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:01:41 Iters: 046600/[02], loss: 10.5475, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:03:44 Iters: 046700/[02], loss: 11.3504, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:05:47 Iters: 046800/[02], loss: 11.1472, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:07:50 Iters: 046900/[02], loss: 11.0970, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:09:53 Iters: 047000/[02], loss: 10.6553, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:11:56 Iters: 047100/[02], loss: 11.0271, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:13:59 Iters: 047200/[02], loss: 11.8924, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:16:02 Iters: 047300/[02], loss: 10.3753, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:18:06 Iters: 047400/[02], loss: 12.0026, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:20:09 Iters: 047500/[02], loss: 11.3175, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:22:12 Iters: 047600/[02], loss: 11.8772, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:24:15 Iters: 047700/[02], loss: 11.5293, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:26:18 Iters: 047800/[02], loss: 10.8375, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:28:21 Iters: 047900/[02], loss: 11.0596, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:30:25 Iters: 048000/[02], loss: 11.3227, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:32:28 Iters: 048100/[02], loss: 11.9806, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:34:31 Iters: 048200/[02], loss: 12.4077, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:36:34 Iters: 048300/[02], loss: 10.6686, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:38:37 Iters: 048400/[02], loss: 11.9728, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:40:41 Iters: 048500/[02], loss: 11.2181, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:42:44 Iters: 048600/[02], loss: 11.1506, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:44:47 Iters: 048700/[02], loss: 10.7332, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:46:50 Iters: 048800/[02], loss: 11.1280, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:48:54 Iters: 048900/[02], loss: 11.2126, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:50:57 Iters: 049000/[02], loss: 10.9195, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:53:00 Iters: 049100/[02], loss: 11.5358, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:55:03 Iters: 049200/[02], loss: 10.9355, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:57:07 Iters: 049300/[02], loss: 11.3713, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-09:59:10 Iters: 049400/[02], loss: 11.2712, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:01:13 Iters: 049500/[02], loss: 11.8483, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:03:17 Iters: 049600/[02], loss: 10.6599, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:05:20 Iters: 049700/[02], loss: 11.0727, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:07:23 Iters: 049800/[02], loss: 10.5531, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:09:26 Iters: 049900/[02], loss: 11.3240, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:11:30 Iters: 050000/[02], loss: 11.5065, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:11:30 Saving checkpoint: 50000 -20220701-10:12:46 LFW Ave Accuracy: 99.0832 -20220701-10:14:01 AgeDB-30 Ave Accuracy: 93.8500 -20220701-10:15:28 CFP-FP Ave Accuracy: 87.3571 -20220701-10:15:28 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 30000 -20220701-10:17:30 Iters: 050100/[02], loss: 11.4706, train_accuracy: 0.0469, time: 3.61 s/iter, learning rate: 0.05 -20220701-10:19:34 Iters: 050200/[02], loss: 11.0847, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:21:37 Iters: 050300/[02], loss: 10.3842, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:23:40 Iters: 050400/[02], loss: 10.2440, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:25:43 Iters: 050500/[02], loss: 11.3645, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:27:47 Iters: 050600/[02], loss: 11.0963, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:29:50 Iters: 050700/[02], loss: 12.0142, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:31:53 Iters: 050800/[02], loss: 11.2891, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:33:56 Iters: 050900/[02], loss: 11.2048, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:35:59 Iters: 051000/[02], loss: 11.1233, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:38:02 Iters: 051100/[02], loss: 10.9940, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:40:05 Iters: 051200/[02], loss: 10.9394, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:42:09 Iters: 051300/[02], loss: 11.7138, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:44:12 Iters: 051400/[02], loss: 11.8562, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:46:15 Iters: 051500/[02], loss: 10.9915, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:48:18 Iters: 051600/[02], loss: 12.0126, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:50:21 Iters: 051700/[02], loss: 12.1562, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:52:25 Iters: 051800/[02], loss: 11.7749, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:54:28 Iters: 051900/[02], loss: 11.6039, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:56:31 Iters: 052000/[02], loss: 11.3388, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-10:58:34 Iters: 052100/[02], loss: 11.7277, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:00:37 Iters: 052200/[02], loss: 11.0198, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:02:41 Iters: 052300/[02], loss: 11.5014, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:04:44 Iters: 052400/[02], loss: 11.5661, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:06:47 Iters: 052500/[02], loss: 11.7062, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:08:50 Iters: 052600/[02], loss: 10.8239, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:10:53 Iters: 052700/[02], loss: 10.4416, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:12:56 Iters: 052800/[02], loss: 11.1741, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:15:00 Iters: 052900/[02], loss: 11.1235, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:17:03 Iters: 053000/[02], loss: 11.2268, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:19:06 Iters: 053100/[02], loss: 10.9320, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:21:09 Iters: 053200/[02], loss: 11.2962, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:23:13 Iters: 053300/[02], loss: 11.4998, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:25:16 Iters: 053400/[02], loss: 11.3197, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:27:19 Iters: 053500/[02], loss: 10.9350, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:29:22 Iters: 053600/[02], loss: 10.6309, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:31:25 Iters: 053700/[02], loss: 10.4785, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:33:29 Iters: 053800/[02], loss: 12.0230, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:35:32 Iters: 053900/[02], loss: 11.9488, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:37:35 Iters: 054000/[02], loss: 11.8138, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:39:38 Iters: 054100/[02], loss: 10.2534, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:41:42 Iters: 054200/[02], loss: 11.5225, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:43:45 Iters: 054300/[02], loss: 10.9699, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:45:48 Iters: 054400/[02], loss: 11.1050, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:47:51 Iters: 054500/[02], loss: 11.1209, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:49:54 Iters: 054600/[02], loss: 10.9397, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:51:58 Iters: 054700/[02], loss: 11.1241, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:54:01 Iters: 054800/[02], loss: 11.2471, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:56:04 Iters: 054900/[02], loss: 11.5014, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-11:58:07 Iters: 055000/[02], loss: 11.2419, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:00:10 Iters: 055100/[02], loss: 11.2045, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:02:13 Iters: 055200/[02], loss: 11.6060, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:04:16 Iters: 055300/[02], loss: 12.0218, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:06:19 Iters: 055400/[02], loss: 12.6624, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:08:23 Iters: 055500/[02], loss: 12.4267, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:10:26 Iters: 055600/[02], loss: 11.1433, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:12:29 Iters: 055700/[02], loss: 11.4008, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:14:32 Iters: 055800/[02], loss: 10.7816, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:16:35 Iters: 055900/[02], loss: 11.2613, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:18:39 Iters: 056000/[02], loss: 11.7647, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:20:42 Iters: 056100/[02], loss: 11.7802, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:22:45 Iters: 056200/[02], loss: 11.7118, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:24:48 Iters: 056300/[02], loss: 11.9924, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:26:52 Iters: 056400/[02], loss: 10.9051, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:28:55 Iters: 056500/[02], loss: 11.2141, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:30:58 Iters: 056600/[02], loss: 11.8505, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:33:01 Iters: 056700/[02], loss: 10.5919, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:35:05 Iters: 056800/[02], loss: 11.4143, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:37:08 Iters: 056900/[02], loss: 10.8397, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:39:11 Iters: 057000/[02], loss: 11.4805, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:41:14 Iters: 057100/[02], loss: 11.2353, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:43:18 Iters: 057200/[02], loss: 11.6418, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:45:21 Iters: 057300/[02], loss: 11.9256, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:47:24 Iters: 057400/[02], loss: 11.5614, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:49:27 Iters: 057500/[02], loss: 11.4809, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:51:30 Iters: 057600/[02], loss: 10.6213, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:53:34 Iters: 057700/[02], loss: 10.7778, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:55:37 Iters: 057800/[02], loss: 10.9082, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:57:40 Iters: 057900/[02], loss: 11.8766, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-12:59:43 Iters: 058000/[02], loss: 11.4525, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:01:46 Iters: 058100/[02], loss: 11.1749, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:03:50 Iters: 058200/[02], loss: 11.1178, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:05:53 Iters: 058300/[02], loss: 10.9077, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:07:56 Iters: 058400/[02], loss: 10.8782, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:09:59 Iters: 058500/[02], loss: 11.5324, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:12:02 Iters: 058600/[02], loss: 11.3961, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:14:06 Iters: 058700/[02], loss: 11.1905, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:16:09 Iters: 058800/[02], loss: 10.6920, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:18:12 Iters: 058900/[02], loss: 12.5790, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:20:15 Iters: 059000/[02], loss: 11.3147, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:22:19 Iters: 059100/[02], loss: 11.3903, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:24:22 Iters: 059200/[02], loss: 10.8218, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:26:25 Iters: 059300/[02], loss: 11.7005, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:28:28 Iters: 059400/[02], loss: 11.1932, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:30:31 Iters: 059500/[02], loss: 11.0754, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:32:35 Iters: 059600/[02], loss: 11.9513, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:34:38 Iters: 059700/[02], loss: 11.3771, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:36:41 Iters: 059800/[02], loss: 10.9005, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:38:44 Iters: 059900/[02], loss: 11.1600, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:40:47 Iters: 060000/[02], loss: 11.2613, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:40:47 Saving checkpoint: 60000 -20220701-13:42:06 LFW Ave Accuracy: 99.0665 -20220701-13:43:23 AgeDB-30 Ave Accuracy: 93.4167 -20220701-13:44:52 CFP-FP Ave Accuracy: 87.0714 -20220701-13:44:52 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 30000 -20220701-13:46:55 Iters: 060100/[02], loss: 11.6218, train_accuracy: 0.0469, time: 3.67 s/iter, learning rate: 0.05 -20220701-13:48:58 Iters: 060200/[02], loss: 10.8459, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:51:01 Iters: 060300/[02], loss: 12.0334, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:53:04 Iters: 060400/[02], loss: 11.8395, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:55:07 Iters: 060500/[02], loss: 10.4572, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:57:11 Iters: 060600/[02], loss: 11.9049, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-13:59:14 Iters: 060700/[02], loss: 10.5665, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:01:17 Iters: 060800/[02], loss: 12.0553, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:03:20 Iters: 060900/[02], loss: 11.9882, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:05:23 Iters: 061000/[02], loss: 11.4557, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:07:26 Iters: 061100/[02], loss: 11.1476, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:09:30 Iters: 061200/[02], loss: 10.9017, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:11:33 Iters: 061300/[02], loss: 11.2218, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:13:37 Iters: 061400/[02], loss: 11.5096, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:15:40 Iters: 061500/[02], loss: 10.8786, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:17:43 Iters: 061600/[02], loss: 10.5790, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:19:46 Iters: 061700/[02], loss: 11.1113, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:21:49 Iters: 061800/[02], loss: 10.3030, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:23:52 Iters: 061900/[02], loss: 10.7226, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:25:56 Iters: 062000/[02], loss: 11.3425, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:27:59 Iters: 062100/[02], loss: 11.4352, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:30:02 Iters: 062200/[02], loss: 11.3326, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:32:05 Iters: 062300/[02], loss: 12.1565, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:34:08 Iters: 062400/[02], loss: 11.0912, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:36:11 Iters: 062500/[02], loss: 10.8905, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:38:14 Iters: 062600/[02], loss: 11.4491, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:40:17 Iters: 062700/[02], loss: 11.4861, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:42:21 Iters: 062800/[02], loss: 11.8881, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:44:24 Iters: 062900/[02], loss: 11.5941, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:46:27 Iters: 063000/[02], loss: 10.9631, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:48:30 Iters: 063100/[02], loss: 11.7712, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:50:33 Iters: 063200/[02], loss: 11.3094, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:52:36 Iters: 063300/[02], loss: 11.2082, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:54:40 Iters: 063400/[02], loss: 11.6057, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:56:43 Iters: 063500/[02], loss: 11.3491, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-14:58:46 Iters: 063600/[02], loss: 10.8315, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:00:49 Iters: 063700/[02], loss: 12.4770, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:02:53 Iters: 063800/[02], loss: 11.0696, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:04:56 Iters: 063900/[02], loss: 11.3321, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:06:59 Iters: 064000/[02], loss: 11.6057, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:09:02 Iters: 064100/[02], loss: 10.7432, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:11:05 Iters: 064200/[02], loss: 11.4880, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:13:09 Iters: 064300/[02], loss: 11.2105, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:15:12 Iters: 064400/[02], loss: 12.0593, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:17:15 Iters: 064500/[02], loss: 12.2498, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:19:18 Iters: 064600/[02], loss: 10.7997, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:21:22 Iters: 064700/[02], loss: 12.2014, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:23:25 Iters: 064800/[02], loss: 11.2451, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:25:28 Iters: 064900/[02], loss: 10.6202, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:27:31 Iters: 065000/[02], loss: 10.5707, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:29:35 Iters: 065100/[02], loss: 11.1211, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:31:38 Iters: 065200/[02], loss: 11.2001, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:33:41 Iters: 065300/[02], loss: 10.5994, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:35:44 Iters: 065400/[02], loss: 11.7002, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:37:47 Iters: 065500/[02], loss: 11.3764, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:39:51 Iters: 065600/[02], loss: 10.3652, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:41:54 Iters: 065700/[02], loss: 11.1825, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:43:57 Iters: 065800/[02], loss: 11.1303, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:46:00 Iters: 065900/[02], loss: 12.0685, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:48:04 Iters: 066000/[02], loss: 10.7738, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:50:07 Iters: 066100/[02], loss: 11.2373, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:52:10 Iters: 066200/[02], loss: 11.6033, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:54:13 Iters: 066300/[02], loss: 12.1365, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:56:16 Iters: 066400/[02], loss: 11.0229, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-15:58:19 Iters: 066500/[02], loss: 11.0899, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:00:23 Iters: 066600/[02], loss: 11.7759, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:02:26 Iters: 066700/[02], loss: 11.1959, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:04:29 Iters: 066800/[02], loss: 10.8188, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:06:32 Iters: 066900/[02], loss: 11.8492, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:08:36 Iters: 067000/[02], loss: 11.3015, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:10:39 Iters: 067100/[02], loss: 11.7709, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:12:42 Iters: 067200/[02], loss: 10.5634, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:14:45 Iters: 067300/[02], loss: 10.8215, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:16:48 Iters: 067400/[02], loss: 10.2530, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:18:52 Iters: 067500/[02], loss: 11.0018, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:20:55 Iters: 067600/[02], loss: 10.6240, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:22:58 Iters: 067700/[02], loss: 11.2951, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:25:01 Iters: 067800/[02], loss: 10.4171, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:27:04 Iters: 067900/[02], loss: 11.4265, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:29:07 Iters: 068000/[02], loss: 11.4521, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:31:10 Iters: 068100/[02], loss: 11.8178, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:33:13 Iters: 068200/[02], loss: 10.4364, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:35:16 Iters: 068300/[02], loss: 10.0415, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:37:20 Iters: 068400/[02], loss: 10.8054, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:39:23 Iters: 068500/[02], loss: 11.1383, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:41:26 Iters: 068600/[02], loss: 11.8221, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:43:29 Iters: 068700/[02], loss: 11.6900, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:45:32 Iters: 068800/[02], loss: 11.1314, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:47:35 Iters: 068900/[02], loss: 11.5156, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:49:38 Iters: 069000/[02], loss: 10.7400, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:51:41 Iters: 069100/[02], loss: 11.2625, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:53:45 Iters: 069200/[02], loss: 10.8511, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:55:48 Iters: 069300/[02], loss: 12.1566, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:57:51 Iters: 069400/[02], loss: 10.7608, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-16:59:54 Iters: 069500/[02], loss: 11.6498, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:01:57 Iters: 069600/[02], loss: 10.9578, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:04:00 Iters: 069700/[02], loss: 12.3631, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:06:03 Iters: 069800/[02], loss: 11.3112, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:08:07 Iters: 069900/[02], loss: 11.6831, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:10:10 Iters: 070000/[02], loss: 11.3448, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:10:10 Saving checkpoint: 70000 -20220701-17:11:26 LFW Ave Accuracy: 98.7999 -20220701-17:12:41 AgeDB-30 Ave Accuracy: 93.5500 -20220701-17:14:08 CFP-FP Ave Accuracy: 87.0143 -20220701-17:14:08 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 30000 -20220701-17:16:11 Iters: 070100/[02], loss: 12.3252, train_accuracy: 0.0234, time: 3.61 s/iter, learning rate: 0.05 -20220701-17:18:14 Iters: 070200/[02], loss: 11.3671, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:20:17 Iters: 070300/[02], loss: 10.9270, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:22:21 Iters: 070400/[02], loss: 10.6114, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:24:24 Iters: 070500/[02], loss: 11.5936, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:26:27 Iters: 070600/[02], loss: 10.8611, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:28:30 Iters: 070700/[02], loss: 12.1698, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:30:33 Iters: 070800/[02], loss: 12.2463, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:32:36 Iters: 070900/[02], loss: 10.6855, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:34:40 Iters: 071000/[02], loss: 11.3705, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:36:43 Iters: 071100/[02], loss: 11.6324, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:38:46 Iters: 071200/[02], loss: 10.9262, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:40:49 Iters: 071300/[02], loss: 11.4971, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:42:52 Iters: 071400/[02], loss: 11.7900, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:44:55 Iters: 071500/[02], loss: 11.2069, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:46:59 Iters: 071600/[02], loss: 11.0826, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:49:02 Iters: 071700/[02], loss: 12.0381, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:51:05 Iters: 071800/[02], loss: 10.9181, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:53:08 Iters: 071900/[02], loss: 11.0194, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:55:11 Iters: 072000/[02], loss: 11.4099, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:57:14 Iters: 072100/[02], loss: 11.5255, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-17:59:17 Iters: 072200/[02], loss: 12.1669, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:01:20 Iters: 072300/[02], loss: 11.9424, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:03:24 Iters: 072400/[02], loss: 11.5015, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:05:27 Iters: 072500/[02], loss: 11.6687, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:07:30 Iters: 072600/[02], loss: 10.6107, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:09:33 Iters: 072700/[02], loss: 11.7978, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:11:36 Iters: 072800/[02], loss: 11.4909, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:13:39 Iters: 072900/[02], loss: 10.7908, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:15:43 Iters: 073000/[02], loss: 10.6959, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:17:46 Iters: 073100/[02], loss: 10.7505, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:19:49 Iters: 073200/[02], loss: 11.8440, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:21:52 Iters: 073300/[02], loss: 11.4555, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:23:55 Iters: 073400/[02], loss: 10.1807, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:25:58 Iters: 073500/[02], loss: 10.4330, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:28:01 Iters: 073600/[02], loss: 11.1879, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:30:05 Iters: 073700/[02], loss: 11.3853, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:32:08 Iters: 073800/[02], loss: 11.3291, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:34:11 Iters: 073900/[02], loss: 11.5705, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:36:14 Iters: 074000/[02], loss: 11.0735, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:38:17 Iters: 074100/[02], loss: 11.0281, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:40:20 Iters: 074200/[02], loss: 10.6831, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:42:23 Iters: 074300/[02], loss: 11.8844, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:44:26 Iters: 074400/[02], loss: 10.8144, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:46:29 Iters: 074500/[02], loss: 11.4114, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:48:32 Iters: 074600/[02], loss: 11.2183, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:50:35 Iters: 074700/[02], loss: 11.9152, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:52:39 Iters: 074800/[02], loss: 11.5130, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:54:42 Iters: 074900/[02], loss: 11.4228, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:56:45 Iters: 075000/[02], loss: 10.9852, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-18:58:48 Iters: 075100/[02], loss: 11.3479, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:00:51 Iters: 075200/[02], loss: 11.0608, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:02:54 Iters: 075300/[02], loss: 11.7775, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:04:57 Iters: 075400/[02], loss: 11.3870, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:07:00 Iters: 075500/[02], loss: 11.5651, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:09:03 Iters: 075600/[02], loss: 10.7751, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:11:06 Iters: 075700/[02], loss: 11.7845, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:13:10 Iters: 075800/[02], loss: 11.1999, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:15:13 Iters: 075900/[02], loss: 10.8030, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:17:16 Iters: 076000/[02], loss: 11.2561, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:19:19 Iters: 076100/[02], loss: 10.6658, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:21:22 Iters: 076200/[02], loss: 10.9004, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:23:25 Iters: 076300/[02], loss: 12.3554, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:25:28 Iters: 076400/[02], loss: 11.2170, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:27:31 Iters: 076500/[02], loss: 11.1363, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:29:34 Iters: 076600/[02], loss: 11.2896, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:31:38 Iters: 076700/[02], loss: 11.4222, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:33:41 Iters: 076800/[02], loss: 10.9845, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:35:44 Iters: 076900/[02], loss: 11.2339, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:37:47 Iters: 077000/[02], loss: 11.7973, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:39:50 Iters: 077100/[02], loss: 11.6574, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:41:53 Iters: 077200/[02], loss: 11.7823, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:43:57 Iters: 077300/[02], loss: 10.8007, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:46:00 Iters: 077400/[02], loss: 11.3099, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:48:03 Iters: 077500/[02], loss: 10.8044, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:50:06 Iters: 077600/[02], loss: 11.4902, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:52:09 Iters: 077700/[02], loss: 11.6147, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:54:12 Iters: 077800/[02], loss: 10.7196, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:56:15 Iters: 077900/[02], loss: 11.4317, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-19:58:18 Iters: 078000/[02], loss: 11.2198, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:00:22 Iters: 078100/[02], loss: 11.0653, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:02:25 Iters: 078200/[02], loss: 11.3422, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:04:28 Iters: 078300/[02], loss: 11.3783, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:06:31 Iters: 078400/[02], loss: 11.3628, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:08:34 Iters: 078500/[02], loss: 11.4813, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:10:37 Iters: 078600/[02], loss: 11.3951, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:12:41 Iters: 078700/[02], loss: 12.2157, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:14:44 Iters: 078800/[02], loss: 10.7734, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:16:47 Iters: 078900/[02], loss: 11.0579, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:18:50 Iters: 079000/[02], loss: 11.6215, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:20:53 Iters: 079100/[02], loss: 11.5993, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:22:56 Iters: 079200/[02], loss: 11.6234, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:25:00 Iters: 079300/[02], loss: 11.5999, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:27:03 Iters: 079400/[02], loss: 11.3447, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:29:06 Iters: 079500/[02], loss: 11.7329, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:31:09 Iters: 079600/[02], loss: 11.3550, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:33:12 Iters: 079700/[02], loss: 10.6570, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:35:16 Iters: 079800/[02], loss: 11.0054, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:37:19 Iters: 079900/[02], loss: 11.1905, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:39:22 Iters: 080000/[02], loss: 11.6163, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:39:22 Saving checkpoint: 80000 -20220701-20:40:38 LFW Ave Accuracy: 99.0832 -20220701-20:41:53 AgeDB-30 Ave Accuracy: 92.9000 -20220701-20:43:19 CFP-FP Ave Accuracy: 87.1714 -20220701-20:43:19 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 30000 -20220701-20:45:22 Iters: 080100/[02], loss: 11.1080, train_accuracy: 0.0312, time: 3.60 s/iter, learning rate: 0.05 -20220701-20:47:25 Iters: 080200/[02], loss: 11.4404, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:49:28 Iters: 080300/[02], loss: 11.2513, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:51:31 Iters: 080400/[02], loss: 11.8009, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:53:34 Iters: 080500/[02], loss: 11.4461, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:55:37 Iters: 080600/[02], loss: 11.2796, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:57:41 Iters: 080700/[02], loss: 11.4793, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-20:59:44 Iters: 080800/[02], loss: 10.6255, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:01:47 Iters: 080900/[02], loss: 10.7846, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:03:50 Iters: 081000/[02], loss: 11.2862, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:05:53 Iters: 081100/[02], loss: 10.9128, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:07:56 Iters: 081200/[02], loss: 11.1183, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:09:59 Iters: 081300/[02], loss: 11.8509, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:12:03 Iters: 081400/[02], loss: 11.7142, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:14:06 Iters: 081500/[02], loss: 11.6409, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:16:09 Iters: 081600/[02], loss: 10.6759, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:18:12 Iters: 081700/[02], loss: 10.7438, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:20:15 Iters: 081800/[02], loss: 11.7473, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:22:18 Iters: 081900/[02], loss: 11.4023, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:24:21 Iters: 082000/[02], loss: 12.1101, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:26:24 Iters: 082100/[02], loss: 11.6078, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:28:27 Iters: 082200/[02], loss: 11.2177, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:30:30 Iters: 082300/[02], loss: 12.1487, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:32:33 Iters: 082400/[02], loss: 11.2891, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:34:36 Iters: 082500/[02], loss: 12.3982, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:36:39 Iters: 082600/[02], loss: 11.1080, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:38:42 Iters: 082700/[02], loss: 10.8696, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:40:45 Iters: 082800/[02], loss: 12.3388, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:42:48 Iters: 082900/[02], loss: 11.1150, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:44:52 Iters: 083000/[02], loss: 10.5534, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:46:55 Iters: 083100/[02], loss: 12.6892, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:48:58 Iters: 083200/[02], loss: 11.4105, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:51:01 Iters: 083300/[02], loss: 11.3229, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:53:04 Iters: 083400/[02], loss: 11.5713, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:55:07 Iters: 083500/[02], loss: 10.9549, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:57:10 Iters: 083600/[02], loss: 10.6126, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-21:59:13 Iters: 083700/[02], loss: 10.7677, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:01:16 Iters: 083800/[02], loss: 10.7455, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:03:19 Iters: 083900/[02], loss: 10.9432, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:05:22 Iters: 084000/[02], loss: 10.8579, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:07:25 Iters: 084100/[02], loss: 12.0410, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:09:28 Iters: 084200/[02], loss: 10.8398, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:11:31 Iters: 084300/[02], loss: 10.6232, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:13:34 Iters: 084400/[02], loss: 11.3088, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:15:37 Iters: 084500/[02], loss: 10.6504, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:17:40 Iters: 084600/[02], loss: 10.6812, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:19:43 Iters: 084700/[02], loss: 10.2860, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:21:46 Iters: 084800/[02], loss: 11.3267, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:23:49 Iters: 084900/[02], loss: 10.2832, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:25:52 Iters: 085000/[02], loss: 11.0796, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:27:55 Iters: 085100/[02], loss: 11.0919, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:29:58 Iters: 085200/[02], loss: 10.5042, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:32:01 Iters: 085300/[02], loss: 10.9777, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:34:04 Iters: 085400/[02], loss: 10.9723, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:36:07 Iters: 085500/[02], loss: 11.7295, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:38:10 Iters: 085600/[02], loss: 11.9288, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:40:13 Iters: 085700/[02], loss: 11.5515, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:42:16 Iters: 085800/[02], loss: 11.0371, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:44:19 Iters: 085900/[02], loss: 11.2287, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:46:22 Iters: 086000/[02], loss: 11.5523, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:48:25 Iters: 086100/[02], loss: 10.9386, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:50:28 Iters: 086200/[02], loss: 11.5405, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:52:31 Iters: 086300/[02], loss: 11.7447, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:54:34 Iters: 086400/[02], loss: 12.0745, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:56:37 Iters: 086500/[02], loss: 11.5202, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-22:58:41 Iters: 086600/[02], loss: 10.6674, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:00:44 Iters: 086700/[02], loss: 11.3150, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:02:47 Iters: 086800/[02], loss: 10.9531, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:04:50 Iters: 086900/[02], loss: 11.7971, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:06:53 Iters: 087000/[02], loss: 11.5118, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:08:56 Iters: 087100/[02], loss: 11.8129, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:10:59 Iters: 087200/[02], loss: 10.7608, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:13:02 Iters: 087300/[02], loss: 11.4557, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:15:05 Iters: 087400/[02], loss: 11.2769, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:17:08 Iters: 087500/[02], loss: 10.9840, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:19:10 Iters: 087600/[02], loss: 11.8196, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:21:13 Iters: 087700/[02], loss: 11.2232, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:23:16 Iters: 087800/[02], loss: 11.3344, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:25:19 Iters: 087900/[02], loss: 12.1373, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:27:22 Iters: 088000/[02], loss: 11.9316, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:29:25 Iters: 088100/[02], loss: 11.2079, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:31:28 Iters: 088200/[02], loss: 11.4781, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:33:31 Iters: 088300/[02], loss: 11.6840, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:35:34 Iters: 088400/[02], loss: 11.2718, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:37:37 Iters: 088500/[02], loss: 11.7125, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:39:40 Iters: 088600/[02], loss: 11.7535, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:41:43 Iters: 088700/[02], loss: 11.7334, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:43:46 Iters: 088800/[02], loss: 11.3865, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:45:49 Iters: 088900/[02], loss: 10.3675, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:47:52 Iters: 089000/[02], loss: 11.3599, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:49:55 Iters: 089100/[02], loss: 10.9871, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:51:58 Iters: 089200/[02], loss: 11.1143, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:54:01 Iters: 089300/[02], loss: 11.1085, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:56:04 Iters: 089400/[02], loss: 11.1963, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220701-23:58:07 Iters: 089500/[02], loss: 11.1799, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:00:10 Iters: 089600/[02], loss: 11.4256, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:02:13 Iters: 089700/[02], loss: 12.2037, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:04:16 Iters: 089800/[02], loss: 11.1605, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:06:19 Iters: 089900/[02], loss: 11.3663, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:08:22 Iters: 090000/[02], loss: 11.3065, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:08:22 Saving checkpoint: 90000 -20220702-00:09:41 LFW Ave Accuracy: 99.0832 -20220702-00:11:05 AgeDB-30 Ave Accuracy: 93.7000 -20220702-00:12:39 CFP-FP Ave Accuracy: 87.1286 -20220702-00:12:39 Current Best Accuracy: LFW: 99.0999 in iters: 20000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 30000 -20220702-00:14:41 Iters: 090100/[02], loss: 10.9673, train_accuracy: 0.0312, time: 3.79 s/iter, learning rate: 0.05 -20220702-00:16:44 Iters: 090200/[02], loss: 11.4925, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:18:47 Iters: 090300/[02], loss: 10.8348, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:20:50 Iters: 090400/[02], loss: 12.0060, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:22:53 Iters: 090500/[02], loss: 10.8498, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:24:56 Iters: 090600/[02], loss: 11.0933, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:26:59 Iters: 090700/[02], loss: 11.7785, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:29:02 Iters: 090800/[02], loss: 12.3455, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:31:05 Iters: 090900/[02], loss: 10.4424, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:32:43 Train Epoch: 3/18 ... -20220702-00:33:12 Iters: 091000/[03], loss: 10.0875, train_accuracy: 0.0469, time: 0.29 s/iter, learning rate: 0.05 -20220702-00:35:15 Iters: 091100/[03], loss: 11.0766, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:37:18 Iters: 091200/[03], loss: 11.1149, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:39:21 Iters: 091300/[03], loss: 11.2012, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:41:23 Iters: 091400/[03], loss: 11.2715, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:43:26 Iters: 091500/[03], loss: 11.0696, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:45:29 Iters: 091600/[03], loss: 12.2892, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:47:32 Iters: 091700/[03], loss: 11.8391, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:49:35 Iters: 091800/[03], loss: 10.8769, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:51:38 Iters: 091900/[03], loss: 11.1967, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:53:41 Iters: 092000/[03], loss: 10.5004, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:55:44 Iters: 092100/[03], loss: 11.7290, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:57:47 Iters: 092200/[03], loss: 12.1559, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-00:59:50 Iters: 092300/[03], loss: 11.2420, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:01:53 Iters: 092400/[03], loss: 11.0708, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:03:56 Iters: 092500/[03], loss: 11.9411, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:05:59 Iters: 092600/[03], loss: 11.0583, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:08:02 Iters: 092700/[03], loss: 10.5011, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:10:05 Iters: 092800/[03], loss: 11.8803, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:12:08 Iters: 092900/[03], loss: 11.2903, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:14:11 Iters: 093000/[03], loss: 11.2765, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:16:14 Iters: 093100/[03], loss: 11.2228, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:18:17 Iters: 093200/[03], loss: 12.2428, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:20:20 Iters: 093300/[03], loss: 12.1895, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:22:23 Iters: 093400/[03], loss: 10.7447, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:24:26 Iters: 093500/[03], loss: 11.1851, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:26:28 Iters: 093600/[03], loss: 11.6275, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:28:31 Iters: 093700/[03], loss: 11.6245, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:30:34 Iters: 093800/[03], loss: 11.2330, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:32:37 Iters: 093900/[03], loss: 11.8331, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:34:40 Iters: 094000/[03], loss: 10.5911, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:36:43 Iters: 094100/[03], loss: 12.5796, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:38:46 Iters: 094200/[03], loss: 11.6530, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:40:48 Iters: 094300/[03], loss: 11.0569, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:42:51 Iters: 094400/[03], loss: 10.7548, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:44:54 Iters: 094500/[03], loss: 11.1141, train_accuracy: 0.0859, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:46:57 Iters: 094600/[03], loss: 11.4804, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:49:00 Iters: 094700/[03], loss: 11.6698, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:51:03 Iters: 094800/[03], loss: 11.4871, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:53:06 Iters: 094900/[03], loss: 11.6567, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:55:08 Iters: 095000/[03], loss: 11.2741, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:57:11 Iters: 095100/[03], loss: 11.7700, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-01:59:14 Iters: 095200/[03], loss: 10.4033, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:01:17 Iters: 095300/[03], loss: 11.2736, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:03:20 Iters: 095400/[03], loss: 11.6109, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:05:23 Iters: 095500/[03], loss: 11.0302, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:07:26 Iters: 095600/[03], loss: 11.0903, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:09:29 Iters: 095700/[03], loss: 10.3622, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:11:32 Iters: 095800/[03], loss: 10.6696, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:13:35 Iters: 095900/[03], loss: 10.5715, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:15:38 Iters: 096000/[03], loss: 11.5804, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:17:40 Iters: 096100/[03], loss: 11.4335, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:19:43 Iters: 096200/[03], loss: 10.4772, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:21:46 Iters: 096300/[03], loss: 11.4153, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:23:49 Iters: 096400/[03], loss: 10.7786, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:25:52 Iters: 096500/[03], loss: 11.2538, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:27:55 Iters: 096600/[03], loss: 10.4855, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:29:58 Iters: 096700/[03], loss: 11.2645, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:32:00 Iters: 096800/[03], loss: 11.6664, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:34:03 Iters: 096900/[03], loss: 12.5294, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:36:06 Iters: 097000/[03], loss: 11.1766, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:38:09 Iters: 097100/[03], loss: 11.1031, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:40:12 Iters: 097200/[03], loss: 11.2285, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:42:14 Iters: 097300/[03], loss: 11.3800, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:44:17 Iters: 097400/[03], loss: 10.8819, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:46:20 Iters: 097500/[03], loss: 11.3744, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:48:23 Iters: 097600/[03], loss: 10.5838, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:50:26 Iters: 097700/[03], loss: 11.4536, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:52:29 Iters: 097800/[03], loss: 11.5720, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:54:31 Iters: 097900/[03], loss: 11.3411, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:56:34 Iters: 098000/[03], loss: 10.2354, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-02:58:37 Iters: 098100/[03], loss: 10.7118, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:00:40 Iters: 098200/[03], loss: 11.4450, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:02:43 Iters: 098300/[03], loss: 12.1546, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:04:46 Iters: 098400/[03], loss: 11.6284, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:06:48 Iters: 098500/[03], loss: 11.6014, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:08:51 Iters: 098600/[03], loss: 10.6553, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:10:54 Iters: 098700/[03], loss: 11.3168, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:12:57 Iters: 098800/[03], loss: 12.4457, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:15:00 Iters: 098900/[03], loss: 10.5490, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:17:03 Iters: 099000/[03], loss: 12.0830, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:19:06 Iters: 099100/[03], loss: 11.3578, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:21:09 Iters: 099200/[03], loss: 11.5124, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:23:11 Iters: 099300/[03], loss: 10.8782, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:25:14 Iters: 099400/[03], loss: 11.0237, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:27:17 Iters: 099500/[03], loss: 11.5509, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:29:20 Iters: 099600/[03], loss: 11.0774, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:31:23 Iters: 099700/[03], loss: 11.9533, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:33:26 Iters: 099800/[03], loss: 11.1980, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:35:28 Iters: 099900/[03], loss: 11.4264, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:37:31 Iters: 100000/[03], loss: 10.8192, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:37:31 Saving checkpoint: 100000 -20220702-03:38:49 LFW Ave Accuracy: 99.1333 -20220702-03:40:06 AgeDB-30 Ave Accuracy: 92.9000 -20220702-03:41:35 CFP-FP Ave Accuracy: 87.9000 -20220702-03:41:35 Current Best Accuracy: LFW: 99.1333 in iters: 100000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 30000 -20220702-03:43:38 Iters: 100100/[03], loss: 11.2570, train_accuracy: 0.0391, time: 3.67 s/iter, learning rate: 0.05 -20220702-03:45:41 Iters: 100200/[03], loss: 11.4610, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:47:44 Iters: 100300/[03], loss: 11.7988, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:49:46 Iters: 100400/[03], loss: 11.4968, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:51:49 Iters: 100500/[03], loss: 11.1872, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:53:52 Iters: 100600/[03], loss: 10.6812, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:55:55 Iters: 100700/[03], loss: 11.4256, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-03:57:58 Iters: 100800/[03], loss: 10.8933, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:00:01 Iters: 100900/[03], loss: 10.8627, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:02:04 Iters: 101000/[03], loss: 11.0615, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:04:07 Iters: 101100/[03], loss: 11.1744, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:06:09 Iters: 101200/[03], loss: 11.4632, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:08:12 Iters: 101300/[03], loss: 11.4029, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:10:15 Iters: 101400/[03], loss: 11.3231, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:12:18 Iters: 101500/[03], loss: 11.9176, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:14:21 Iters: 101600/[03], loss: 10.6512, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:16:24 Iters: 101700/[03], loss: 10.4673, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:18:27 Iters: 101800/[03], loss: 11.2115, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:20:30 Iters: 101900/[03], loss: 11.5661, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:22:33 Iters: 102000/[03], loss: 11.2327, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:24:35 Iters: 102100/[03], loss: 12.0389, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:26:38 Iters: 102200/[03], loss: 10.9901, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:28:41 Iters: 102300/[03], loss: 12.0733, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:30:44 Iters: 102400/[03], loss: 11.0046, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:32:47 Iters: 102500/[03], loss: 11.7213, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:34:51 Iters: 102600/[03], loss: 11.4381, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:36:54 Iters: 102700/[03], loss: 11.3340, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:38:57 Iters: 102800/[03], loss: 10.5126, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:41:00 Iters: 102900/[03], loss: 11.2076, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:43:03 Iters: 103000/[03], loss: 10.8428, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:45:06 Iters: 103100/[03], loss: 11.4656, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:47:09 Iters: 103200/[03], loss: 10.3774, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:49:12 Iters: 103300/[03], loss: 11.1950, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:51:15 Iters: 103400/[03], loss: 11.3565, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:53:18 Iters: 103500/[03], loss: 12.2540, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:55:22 Iters: 103600/[03], loss: 11.0237, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:57:25 Iters: 103700/[03], loss: 11.5775, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-04:59:28 Iters: 103800/[03], loss: 10.6322, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:01:31 Iters: 103900/[03], loss: 11.2496, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:03:34 Iters: 104000/[03], loss: 11.0190, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:05:37 Iters: 104100/[03], loss: 12.2623, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:07:40 Iters: 104200/[03], loss: 11.2154, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:09:43 Iters: 104300/[03], loss: 11.0183, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:11:46 Iters: 104400/[03], loss: 11.4217, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:13:49 Iters: 104500/[03], loss: 11.4030, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:15:52 Iters: 104600/[03], loss: 10.6223, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:17:55 Iters: 104700/[03], loss: 11.2028, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:19:58 Iters: 104800/[03], loss: 10.5397, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:22:01 Iters: 104900/[03], loss: 11.9618, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:24:04 Iters: 105000/[03], loss: 11.7027, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:26:07 Iters: 105100/[03], loss: 11.0864, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:28:10 Iters: 105200/[03], loss: 10.3178, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:30:13 Iters: 105300/[03], loss: 12.0078, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:32:16 Iters: 105400/[03], loss: 10.6631, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:34:19 Iters: 105500/[03], loss: 11.0193, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:36:22 Iters: 105600/[03], loss: 10.4901, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:38:25 Iters: 105700/[03], loss: 10.5201, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:40:28 Iters: 105800/[03], loss: 11.2843, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:42:31 Iters: 105900/[03], loss: 11.0705, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:44:34 Iters: 106000/[03], loss: 11.4069, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:46:37 Iters: 106100/[03], loss: 11.2373, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:48:40 Iters: 106200/[03], loss: 11.5745, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:50:43 Iters: 106300/[03], loss: 11.3055, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:52:46 Iters: 106400/[03], loss: 10.8524, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:54:49 Iters: 106500/[03], loss: 11.7339, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:56:52 Iters: 106600/[03], loss: 11.1861, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-05:58:55 Iters: 106700/[03], loss: 11.8001, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:00:58 Iters: 106800/[03], loss: 11.8919, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:03:01 Iters: 106900/[03], loss: 11.1096, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:05:05 Iters: 107000/[03], loss: 12.0678, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:07:08 Iters: 107100/[03], loss: 10.6569, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:09:11 Iters: 107200/[03], loss: 11.8299, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:11:14 Iters: 107300/[03], loss: 12.0252, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:13:17 Iters: 107400/[03], loss: 11.5277, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:15:20 Iters: 107500/[03], loss: 11.2905, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:17:23 Iters: 107600/[03], loss: 11.3033, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:19:26 Iters: 107700/[03], loss: 11.8931, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:21:29 Iters: 107800/[03], loss: 10.9256, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:23:32 Iters: 107900/[03], loss: 10.6364, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:25:35 Iters: 108000/[03], loss: 11.4360, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:27:38 Iters: 108100/[03], loss: 11.0911, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:29:41 Iters: 108200/[03], loss: 11.8286, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:31:44 Iters: 108300/[03], loss: 10.0340, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:33:47 Iters: 108400/[03], loss: 11.2730, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:35:50 Iters: 108500/[03], loss: 11.6090, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:37:53 Iters: 108600/[03], loss: 11.3039, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:39:56 Iters: 108700/[03], loss: 11.3380, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:41:59 Iters: 108800/[03], loss: 11.0802, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:44:02 Iters: 108900/[03], loss: 11.9104, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:46:05 Iters: 109000/[03], loss: 10.9655, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:48:08 Iters: 109100/[03], loss: 10.9756, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:50:11 Iters: 109200/[03], loss: 11.5812, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:52:14 Iters: 109300/[03], loss: 11.8247, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:54:17 Iters: 109400/[03], loss: 11.1706, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:56:20 Iters: 109500/[03], loss: 11.6034, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-06:58:23 Iters: 109600/[03], loss: 11.2093, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:00:26 Iters: 109700/[03], loss: 11.2574, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:02:29 Iters: 109800/[03], loss: 11.9114, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:04:33 Iters: 109900/[03], loss: 11.1106, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:06:36 Iters: 110000/[03], loss: 11.6430, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:06:36 Saving checkpoint: 110000 -20220702-07:07:53 LFW Ave Accuracy: 99.0832 -20220702-07:09:10 AgeDB-30 Ave Accuracy: 93.6500 -20220702-07:10:40 CFP-FP Ave Accuracy: 86.9429 -20220702-07:10:40 Current Best Accuracy: LFW: 99.1333 in iters: 100000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 30000 -20220702-07:12:43 Iters: 110100/[03], loss: 11.7653, train_accuracy: 0.0312, time: 3.67 s/iter, learning rate: 0.05 -20220702-07:14:46 Iters: 110200/[03], loss: 11.3802, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:16:49 Iters: 110300/[03], loss: 10.8620, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:18:52 Iters: 110400/[03], loss: 11.8255, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:20:55 Iters: 110500/[03], loss: 10.9426, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:22:58 Iters: 110600/[03], loss: 11.1083, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:25:01 Iters: 110700/[03], loss: 11.9451, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:27:04 Iters: 110800/[03], loss: 11.5891, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:29:07 Iters: 110900/[03], loss: 10.8365, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:31:11 Iters: 111000/[03], loss: 11.4347, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:33:14 Iters: 111100/[03], loss: 11.3385, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:35:17 Iters: 111200/[03], loss: 11.1491, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:37:20 Iters: 111300/[03], loss: 10.5571, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:39:23 Iters: 111400/[03], loss: 11.6663, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:41:26 Iters: 111500/[03], loss: 10.8956, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:43:29 Iters: 111600/[03], loss: 11.5593, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:45:32 Iters: 111700/[03], loss: 10.7948, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:47:35 Iters: 111800/[03], loss: 11.7606, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:49:38 Iters: 111900/[03], loss: 11.6230, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:51:41 Iters: 112000/[03], loss: 11.7014, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:53:44 Iters: 112100/[03], loss: 11.3224, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:55:47 Iters: 112200/[03], loss: 11.2686, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:57:50 Iters: 112300/[03], loss: 12.1378, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-07:59:53 Iters: 112400/[03], loss: 11.9168, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:01:56 Iters: 112500/[03], loss: 12.0494, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:03:59 Iters: 112600/[03], loss: 10.7953, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:06:02 Iters: 112700/[03], loss: 11.6886, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:08:05 Iters: 112800/[03], loss: 11.2497, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:10:08 Iters: 112900/[03], loss: 12.0952, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:12:11 Iters: 113000/[03], loss: 11.7579, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:14:14 Iters: 113100/[03], loss: 10.7802, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:16:17 Iters: 113200/[03], loss: 10.7219, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:18:20 Iters: 113300/[03], loss: 11.0936, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:20:23 Iters: 113400/[03], loss: 11.3744, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:22:26 Iters: 113500/[03], loss: 11.2801, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:24:30 Iters: 113600/[03], loss: 11.8733, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:26:33 Iters: 113700/[03], loss: 11.3076, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:28:36 Iters: 113800/[03], loss: 11.8112, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:30:39 Iters: 113900/[03], loss: 10.9219, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:32:42 Iters: 114000/[03], loss: 11.9759, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:34:45 Iters: 114100/[03], loss: 10.6551, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:36:48 Iters: 114200/[03], loss: 11.6623, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:38:51 Iters: 114300/[03], loss: 10.9778, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:40:54 Iters: 114400/[03], loss: 11.6703, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:42:57 Iters: 114500/[03], loss: 11.6790, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:45:00 Iters: 114600/[03], loss: 11.2677, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:47:03 Iters: 114700/[03], loss: 11.0286, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:49:06 Iters: 114800/[03], loss: 11.9417, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:51:09 Iters: 114900/[03], loss: 11.6132, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:53:13 Iters: 115000/[03], loss: 10.9356, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:55:16 Iters: 115100/[03], loss: 10.7405, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:57:19 Iters: 115200/[03], loss: 11.7804, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-08:59:23 Iters: 115300/[03], loss: 12.0996, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:01:27 Iters: 115400/[03], loss: 11.1838, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:03:30 Iters: 115500/[03], loss: 11.7289, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:05:34 Iters: 115600/[03], loss: 11.2962, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:07:38 Iters: 115700/[03], loss: 11.1436, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:09:41 Iters: 115800/[03], loss: 11.4762, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:11:45 Iters: 115900/[03], loss: 11.2111, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:13:49 Iters: 116000/[03], loss: 10.6940, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:15:52 Iters: 116100/[03], loss: 11.2543, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:17:56 Iters: 116200/[03], loss: 11.1598, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-09:19:59 Iters: 116300/[03], loss: 11.8859, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:22:03 Iters: 116400/[03], loss: 11.5739, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:24:06 Iters: 116500/[03], loss: 11.3491, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:26:10 Iters: 116600/[03], loss: 11.4106, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:28:14 Iters: 116700/[03], loss: 11.9487, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:30:18 Iters: 116800/[03], loss: 10.5873, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:32:22 Iters: 116900/[03], loss: 11.9578, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:34:25 Iters: 117000/[03], loss: 11.4530, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:36:29 Iters: 117100/[03], loss: 11.4672, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:38:33 Iters: 117200/[03], loss: 11.1519, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:40:37 Iters: 117300/[03], loss: 11.3725, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:42:40 Iters: 117400/[03], loss: 11.3249, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:44:44 Iters: 117500/[03], loss: 11.2039, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:46:48 Iters: 117600/[03], loss: 10.5234, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:48:52 Iters: 117700/[03], loss: 11.8611, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:50:55 Iters: 117800/[03], loss: 12.2735, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:52:59 Iters: 117900/[03], loss: 10.9307, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:55:03 Iters: 118000/[03], loss: 10.7395, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:57:06 Iters: 118100/[03], loss: 10.9440, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-09:59:10 Iters: 118200/[03], loss: 10.4507, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:01:14 Iters: 118300/[03], loss: 10.9573, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:03:18 Iters: 118400/[03], loss: 11.8458, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:05:22 Iters: 118500/[03], loss: 11.6248, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:07:25 Iters: 118600/[03], loss: 11.5280, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:09:29 Iters: 118700/[03], loss: 11.0756, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:11:33 Iters: 118800/[03], loss: 10.9334, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:13:37 Iters: 118900/[03], loss: 11.1462, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:15:40 Iters: 119000/[03], loss: 11.8225, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:17:44 Iters: 119100/[03], loss: 12.0099, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:19:48 Iters: 119200/[03], loss: 11.5011, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:21:52 Iters: 119300/[03], loss: 12.1691, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:23:55 Iters: 119400/[03], loss: 10.4668, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:25:59 Iters: 119500/[03], loss: 11.0295, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:28:03 Iters: 119600/[03], loss: 11.4233, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:30:07 Iters: 119700/[03], loss: 11.6121, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:32:10 Iters: 119800/[03], loss: 11.1133, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:34:14 Iters: 119900/[03], loss: 10.9089, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:36:18 Iters: 120000/[03], loss: 12.3622, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:36:18 Saving checkpoint: 120000 -20220702-10:37:35 LFW Ave Accuracy: 99.2333 -20220702-10:38:50 AgeDB-30 Ave Accuracy: 93.1000 -20220702-10:40:17 CFP-FP Ave Accuracy: 88.3000 -20220702-10:40:17 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 120000 -20220702-10:42:20 Iters: 120100/[03], loss: 11.5480, train_accuracy: 0.0391, time: 3.62 s/iter, learning rate: 0.05 -20220702-10:44:24 Iters: 120200/[03], loss: 11.2926, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:46:27 Iters: 120300/[03], loss: 11.3658, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:48:31 Iters: 120400/[03], loss: 10.5148, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:50:35 Iters: 120500/[03], loss: 11.4977, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:52:38 Iters: 120600/[03], loss: 11.3240, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:54:42 Iters: 120700/[03], loss: 12.0107, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:56:46 Iters: 120800/[03], loss: 11.0145, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-10:58:50 Iters: 120900/[03], loss: 11.7255, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:00:53 Iters: 121000/[03], loss: 11.2509, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:02:57 Iters: 121100/[03], loss: 12.0811, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:05:01 Iters: 121200/[03], loss: 11.9388, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:07:04 Iters: 121300/[03], loss: 11.5740, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:09:08 Iters: 121400/[03], loss: 11.1105, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:11:11 Iters: 121500/[03], loss: 11.7581, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:13:15 Iters: 121600/[03], loss: 11.9974, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:15:18 Iters: 121700/[03], loss: 12.3966, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:17:22 Iters: 121800/[03], loss: 10.5557, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:19:25 Iters: 121900/[03], loss: 11.4338, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:21:29 Iters: 122000/[03], loss: 11.5735, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:23:32 Iters: 122100/[03], loss: 11.1255, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:25:35 Iters: 122200/[03], loss: 11.3760, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:27:39 Iters: 122300/[03], loss: 11.3381, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:29:42 Iters: 122400/[03], loss: 11.5830, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-11:31:46 Iters: 122500/[03], loss: 11.4235, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:33:49 Iters: 122600/[03], loss: 10.7045, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:35:53 Iters: 122700/[03], loss: 11.3251, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:37:57 Iters: 122800/[03], loss: 11.4380, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:40:01 Iters: 122900/[03], loss: 11.5199, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:42:04 Iters: 123000/[03], loss: 10.9198, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:44:08 Iters: 123100/[03], loss: 11.2054, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:46:12 Iters: 123200/[03], loss: 11.3857, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:48:16 Iters: 123300/[03], loss: 11.2791, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:50:19 Iters: 123400/[03], loss: 10.3308, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:52:23 Iters: 123500/[03], loss: 11.5220, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:54:27 Iters: 123600/[03], loss: 12.3050, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:56:31 Iters: 123700/[03], loss: 11.2485, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-11:58:34 Iters: 123800/[03], loss: 10.4566, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:00:38 Iters: 123900/[03], loss: 10.7641, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:02:42 Iters: 124000/[03], loss: 11.8546, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:04:46 Iters: 124100/[03], loss: 11.7214, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:06:49 Iters: 124200/[03], loss: 11.1326, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:08:53 Iters: 124300/[03], loss: 10.8921, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:10:57 Iters: 124400/[03], loss: 11.4065, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:13:01 Iters: 124500/[03], loss: 11.1831, train_accuracy: 0.0781, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:15:04 Iters: 124600/[03], loss: 10.3985, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:17:08 Iters: 124700/[03], loss: 10.5808, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:19:12 Iters: 124800/[03], loss: 11.4490, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:21:16 Iters: 124900/[03], loss: 11.2390, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:23:19 Iters: 125000/[03], loss: 10.6411, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:25:23 Iters: 125100/[03], loss: 11.9072, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:27:27 Iters: 125200/[03], loss: 11.5257, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:29:31 Iters: 125300/[03], loss: 11.6423, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:31:34 Iters: 125400/[03], loss: 11.6895, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:33:38 Iters: 125500/[03], loss: 12.1533, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:35:42 Iters: 125600/[03], loss: 10.8494, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:37:46 Iters: 125700/[03], loss: 10.7577, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:39:49 Iters: 125800/[03], loss: 11.6522, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:41:53 Iters: 125900/[03], loss: 10.8224, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:43:57 Iters: 126000/[03], loss: 11.9173, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:46:01 Iters: 126100/[03], loss: 11.9537, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:48:04 Iters: 126200/[03], loss: 11.5271, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:50:08 Iters: 126300/[03], loss: 10.5772, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:52:12 Iters: 126400/[03], loss: 10.8306, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:54:16 Iters: 126500/[03], loss: 11.3193, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:56:19 Iters: 126600/[03], loss: 11.2117, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-12:58:23 Iters: 126700/[03], loss: 11.1504, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220702-13:00:27 Iters: 126800/[03], loss: 11.1023, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-13:02:31 Iters: 126900/[03], loss: 11.2834, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-13:04:34 Iters: 127000/[03], loss: 10.9226, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220702-13:06:38 Iters: 127100/[03], loss: 11.4992, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220702-13:08:42 Iters: 127200/[03], loss: 12.1559, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-13:10:45 Iters: 127300/[03], loss: 11.0380, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-13:12:49 Iters: 127400/[03], loss: 11.9161, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:14:52 Iters: 127500/[03], loss: 10.7660, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:16:55 Iters: 127600/[03], loss: 10.7787, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:18:59 Iters: 127700/[03], loss: 11.2025, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:21:02 Iters: 127800/[03], loss: 11.6998, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:23:05 Iters: 127900/[03], loss: 10.8655, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:25:08 Iters: 128000/[03], loss: 11.4906, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:27:12 Iters: 128100/[03], loss: 11.1444, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:29:15 Iters: 128200/[03], loss: 11.0672, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:31:18 Iters: 128300/[03], loss: 11.8201, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:33:21 Iters: 128400/[03], loss: 11.1551, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:35:24 Iters: 128500/[03], loss: 11.5695, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:37:28 Iters: 128600/[03], loss: 11.0980, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:39:31 Iters: 128700/[03], loss: 10.9764, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:41:34 Iters: 128800/[03], loss: 11.0576, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:43:37 Iters: 128900/[03], loss: 11.7871, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:45:40 Iters: 129000/[03], loss: 12.2465, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:47:43 Iters: 129100/[03], loss: 11.0129, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:49:47 Iters: 129200/[03], loss: 11.2250, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:51:49 Iters: 129300/[03], loss: 11.2002, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:53:53 Iters: 129400/[03], loss: 9.9889, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:55:56 Iters: 129500/[03], loss: 11.3680, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-13:57:59 Iters: 129600/[03], loss: 10.8778, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:00:02 Iters: 129700/[03], loss: 11.4850, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:02:05 Iters: 129800/[03], loss: 11.4726, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:04:08 Iters: 129900/[03], loss: 11.9710, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:06:11 Iters: 130000/[03], loss: 11.6744, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:06:11 Saving checkpoint: 130000 -20220702-14:07:28 LFW Ave Accuracy: 99.0499 -20220702-14:08:44 AgeDB-30 Ave Accuracy: 93.5000 -20220702-14:10:13 CFP-FP Ave Accuracy: 87.8000 -20220702-14:10:13 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.8500 in iters: 50000 and CFP-FP: 88.3000 in iters: 120000 -20220702-14:12:15 Iters: 130100/[03], loss: 11.4323, train_accuracy: 0.0391, time: 3.65 s/iter, learning rate: 0.05 -20220702-14:14:18 Iters: 130200/[03], loss: 11.0011, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:16:21 Iters: 130300/[03], loss: 11.1061, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:18:24 Iters: 130400/[03], loss: 10.6098, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:20:27 Iters: 130500/[03], loss: 11.3008, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:22:30 Iters: 130600/[03], loss: 11.3009, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:24:33 Iters: 130700/[03], loss: 10.9683, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:26:36 Iters: 130800/[03], loss: 11.9657, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:28:39 Iters: 130900/[03], loss: 10.6162, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:30:42 Iters: 131000/[03], loss: 10.8843, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:32:45 Iters: 131100/[03], loss: 11.5667, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:34:48 Iters: 131200/[03], loss: 11.5151, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:36:51 Iters: 131300/[03], loss: 10.7394, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:38:54 Iters: 131400/[03], loss: 11.1171, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:40:57 Iters: 131500/[03], loss: 11.1461, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:43:00 Iters: 131600/[03], loss: 10.5117, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:45:03 Iters: 131700/[03], loss: 11.7957, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:47:06 Iters: 131800/[03], loss: 11.8293, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:49:09 Iters: 131900/[03], loss: 11.1031, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:51:12 Iters: 132000/[03], loss: 11.0055, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:53:15 Iters: 132100/[03], loss: 12.5758, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:55:18 Iters: 132200/[03], loss: 10.7974, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:57:21 Iters: 132300/[03], loss: 12.0782, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-14:59:24 Iters: 132400/[03], loss: 11.3105, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:01:27 Iters: 132500/[03], loss: 11.1878, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:03:30 Iters: 132600/[03], loss: 12.6733, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:05:33 Iters: 132700/[03], loss: 11.5948, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:07:36 Iters: 132800/[03], loss: 10.8142, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:09:39 Iters: 132900/[03], loss: 11.7787, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:11:42 Iters: 133000/[03], loss: 11.8632, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:13:45 Iters: 133100/[03], loss: 11.7967, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:15:48 Iters: 133200/[03], loss: 11.5428, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:17:51 Iters: 133300/[03], loss: 10.8747, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:19:54 Iters: 133400/[03], loss: 11.6664, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:21:57 Iters: 133500/[03], loss: 11.6713, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:24:00 Iters: 133600/[03], loss: 11.2246, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:26:03 Iters: 133700/[03], loss: 10.0348, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:28:06 Iters: 133800/[03], loss: 10.8734, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:30:09 Iters: 133900/[03], loss: 12.6557, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:32:12 Iters: 134000/[03], loss: 11.5711, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:34:15 Iters: 134100/[03], loss: 11.2273, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:36:18 Iters: 134200/[03], loss: 11.0096, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:38:21 Iters: 134300/[03], loss: 11.0847, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:40:24 Iters: 134400/[03], loss: 11.7204, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:42:28 Iters: 134500/[03], loss: 11.2273, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:44:30 Iters: 134600/[03], loss: 11.7264, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:46:33 Iters: 134700/[03], loss: 11.2356, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:48:37 Iters: 134800/[03], loss: 11.8889, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:50:40 Iters: 134900/[03], loss: 11.2243, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:52:43 Iters: 135000/[03], loss: 11.2854, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:54:46 Iters: 135100/[03], loss: 10.1520, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:56:49 Iters: 135200/[03], loss: 11.5735, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-15:58:52 Iters: 135300/[03], loss: 11.0943, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:00:55 Iters: 135400/[03], loss: 11.6488, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:02:58 Iters: 135500/[03], loss: 11.3264, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:05:01 Iters: 135600/[03], loss: 12.1123, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:07:04 Iters: 135700/[03], loss: 11.3525, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:09:07 Iters: 135800/[03], loss: 11.3625, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:11:10 Iters: 135900/[03], loss: 11.7157, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:13:13 Iters: 136000/[03], loss: 11.5131, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:15:17 Iters: 136100/[03], loss: 10.7012, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:17:20 Iters: 136200/[03], loss: 12.0135, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:19:23 Iters: 136300/[03], loss: 10.7920, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:21:26 Iters: 136400/[03], loss: 11.6538, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:22:51 Train Epoch: 4/18 ... -20220702-16:23:29 Iters: 136500/[04], loss: 10.5065, train_accuracy: 0.0625, time: 0.37 s/iter, learning rate: 0.05 -20220702-16:25:32 Iters: 136600/[04], loss: 11.2743, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:27:35 Iters: 136700/[04], loss: 11.0206, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:29:38 Iters: 136800/[04], loss: 10.4722, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:31:41 Iters: 136900/[04], loss: 10.8302, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:33:44 Iters: 137000/[04], loss: 10.7250, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:35:47 Iters: 137100/[04], loss: 11.5221, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:37:51 Iters: 137200/[04], loss: 12.0592, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:39:54 Iters: 137300/[04], loss: 11.3607, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:41:57 Iters: 137400/[04], loss: 10.7214, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:44:00 Iters: 137500/[04], loss: 11.5342, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:46:03 Iters: 137600/[04], loss: 12.0527, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:48:06 Iters: 137700/[04], loss: 10.4175, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:50:09 Iters: 137800/[04], loss: 12.2903, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:52:12 Iters: 137900/[04], loss: 11.0694, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:54:15 Iters: 138000/[04], loss: 10.9786, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:56:18 Iters: 138100/[04], loss: 11.8667, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-16:58:21 Iters: 138200/[04], loss: 11.6059, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:00:25 Iters: 138300/[04], loss: 10.6973, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:02:28 Iters: 138400/[04], loss: 10.6759, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:04:32 Iters: 138500/[04], loss: 11.5327, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:06:35 Iters: 138600/[04], loss: 11.0650, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:08:39 Iters: 138700/[04], loss: 11.4567, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:10:43 Iters: 138800/[04], loss: 10.4901, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:12:47 Iters: 138900/[04], loss: 11.2707, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:14:50 Iters: 139000/[04], loss: 11.5773, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:16:54 Iters: 139100/[04], loss: 12.1220, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:18:58 Iters: 139200/[04], loss: 10.7923, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:21:01 Iters: 139300/[04], loss: 10.8787, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:23:04 Iters: 139400/[04], loss: 11.5820, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:25:08 Iters: 139500/[04], loss: 11.5125, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:27:11 Iters: 139600/[04], loss: 11.1429, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:29:14 Iters: 139700/[04], loss: 11.4964, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:31:17 Iters: 139800/[04], loss: 10.8577, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:33:20 Iters: 139900/[04], loss: 11.5401, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:35:23 Iters: 140000/[04], loss: 10.7699, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:35:23 Saving checkpoint: 140000 -20220702-17:36:39 LFW Ave Accuracy: 98.9999 -20220702-17:37:54 AgeDB-30 Ave Accuracy: 93.9500 -20220702-17:39:21 CFP-FP Ave Accuracy: 87.1286 -20220702-17:39:21 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220702-17:41:24 Iters: 140100/[04], loss: 11.6589, train_accuracy: 0.0078, time: 3.61 s/iter, learning rate: 0.05 -20220702-17:43:28 Iters: 140200/[04], loss: 11.9645, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:45:31 Iters: 140300/[04], loss: 10.7242, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-17:47:34 Iters: 140400/[04], loss: 10.4897, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:49:37 Iters: 140500/[04], loss: 10.5114, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:51:41 Iters: 140600/[04], loss: 10.7605, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:53:44 Iters: 140700/[04], loss: 11.5973, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:55:47 Iters: 140800/[04], loss: 12.5674, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:57:50 Iters: 140900/[04], loss: 11.9579, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-17:59:53 Iters: 141000/[04], loss: 11.2625, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:01:56 Iters: 141100/[04], loss: 10.8190, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:03:59 Iters: 141200/[04], loss: 11.5222, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:06:02 Iters: 141300/[04], loss: 10.7851, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:08:05 Iters: 141400/[04], loss: 11.6944, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:10:08 Iters: 141500/[04], loss: 10.5718, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:12:11 Iters: 141600/[04], loss: 10.6040, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:14:14 Iters: 141700/[04], loss: 11.9435, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:16:17 Iters: 141800/[04], loss: 11.2316, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:18:20 Iters: 141900/[04], loss: 11.3810, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:20:23 Iters: 142000/[04], loss: 11.4291, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:22:26 Iters: 142100/[04], loss: 10.9345, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:24:29 Iters: 142200/[04], loss: 10.8025, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:26:32 Iters: 142300/[04], loss: 11.2173, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:28:35 Iters: 142400/[04], loss: 10.6233, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:30:38 Iters: 142500/[04], loss: 11.5390, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:32:41 Iters: 142600/[04], loss: 11.9552, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:34:45 Iters: 142700/[04], loss: 11.8594, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:36:48 Iters: 142800/[04], loss: 11.8120, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:38:51 Iters: 142900/[04], loss: 11.2392, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:40:54 Iters: 143000/[04], loss: 10.9087, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:42:57 Iters: 143100/[04], loss: 11.6193, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:45:00 Iters: 143200/[04], loss: 10.8855, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:47:03 Iters: 143300/[04], loss: 11.6196, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:49:06 Iters: 143400/[04], loss: 10.5414, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:51:09 Iters: 143500/[04], loss: 10.8455, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:53:12 Iters: 143600/[04], loss: 12.1704, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:55:15 Iters: 143700/[04], loss: 10.8320, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:57:18 Iters: 143800/[04], loss: 11.3351, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-18:59:21 Iters: 143900/[04], loss: 11.8932, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:01:24 Iters: 144000/[04], loss: 11.3004, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:03:27 Iters: 144100/[04], loss: 11.3632, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:05:30 Iters: 144200/[04], loss: 11.1385, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:07:33 Iters: 144300/[04], loss: 11.6640, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:09:36 Iters: 144400/[04], loss: 11.4521, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:11:39 Iters: 144500/[04], loss: 11.0499, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:13:42 Iters: 144600/[04], loss: 11.4937, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:15:45 Iters: 144700/[04], loss: 11.0760, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:17:48 Iters: 144800/[04], loss: 11.5750, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:19:51 Iters: 144900/[04], loss: 11.7381, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:21:54 Iters: 145000/[04], loss: 11.6016, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:23:57 Iters: 145100/[04], loss: 10.0352, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:26:00 Iters: 145200/[04], loss: 11.3545, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:28:03 Iters: 145300/[04], loss: 11.0235, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:30:06 Iters: 145400/[04], loss: 11.8599, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:32:09 Iters: 145500/[04], loss: 11.6544, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:34:12 Iters: 145600/[04], loss: 10.9427, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:36:15 Iters: 145700/[04], loss: 11.4017, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:38:18 Iters: 145800/[04], loss: 11.0677, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:40:21 Iters: 145900/[04], loss: 11.5001, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:42:24 Iters: 146000/[04], loss: 11.8079, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:44:27 Iters: 146100/[04], loss: 11.1241, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:46:30 Iters: 146200/[04], loss: 11.0587, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:48:33 Iters: 146300/[04], loss: 12.0473, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:50:36 Iters: 146400/[04], loss: 11.5345, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:52:39 Iters: 146500/[04], loss: 10.9246, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:54:42 Iters: 146600/[04], loss: 11.9991, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:56:45 Iters: 146700/[04], loss: 11.4034, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-19:58:48 Iters: 146800/[04], loss: 10.3768, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:00:51 Iters: 146900/[04], loss: 10.8434, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:02:54 Iters: 147000/[04], loss: 11.2195, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:04:56 Iters: 147100/[04], loss: 11.3777, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:06:59 Iters: 147200/[04], loss: 10.7921, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:09:02 Iters: 147300/[04], loss: 11.4148, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:11:05 Iters: 147400/[04], loss: 11.3398, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:13:08 Iters: 147500/[04], loss: 10.2848, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:15:11 Iters: 147600/[04], loss: 10.8306, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:17:14 Iters: 147700/[04], loss: 10.9907, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:19:17 Iters: 147800/[04], loss: 11.1985, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:21:20 Iters: 147900/[04], loss: 11.4080, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:23:23 Iters: 148000/[04], loss: 13.0027, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:25:26 Iters: 148100/[04], loss: 10.9940, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:27:29 Iters: 148200/[04], loss: 11.1180, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:29:33 Iters: 148300/[04], loss: 11.6524, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:31:36 Iters: 148400/[04], loss: 10.8103, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:33:39 Iters: 148500/[04], loss: 10.6521, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:35:42 Iters: 148600/[04], loss: 11.6980, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:37:45 Iters: 148700/[04], loss: 10.8612, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:39:48 Iters: 148800/[04], loss: 11.4524, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:41:51 Iters: 148900/[04], loss: 11.0737, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:43:54 Iters: 149000/[04], loss: 11.5063, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:45:57 Iters: 149100/[04], loss: 11.2804, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:48:00 Iters: 149200/[04], loss: 11.1492, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:50:02 Iters: 149300/[04], loss: 11.3080, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:52:05 Iters: 149400/[04], loss: 11.5800, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:54:08 Iters: 149500/[04], loss: 11.4100, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:56:11 Iters: 149600/[04], loss: 10.8515, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-20:58:14 Iters: 149700/[04], loss: 11.5474, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:00:17 Iters: 149800/[04], loss: 11.7310, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:02:20 Iters: 149900/[04], loss: 11.9377, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:04:23 Iters: 150000/[04], loss: 11.7504, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:04:23 Saving checkpoint: 150000 -20220702-21:05:40 LFW Ave Accuracy: 99.0832 -20220702-21:06:55 AgeDB-30 Ave Accuracy: 93.6167 -20220702-21:08:21 CFP-FP Ave Accuracy: 87.8571 -20220702-21:08:21 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220702-21:10:24 Iters: 150100/[04], loss: 11.2980, train_accuracy: 0.0391, time: 3.61 s/iter, learning rate: 0.05 -20220702-21:12:27 Iters: 150200/[04], loss: 10.7900, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:14:30 Iters: 150300/[04], loss: 10.9237, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:16:33 Iters: 150400/[04], loss: 10.9831, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:18:36 Iters: 150500/[04], loss: 11.0676, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:20:39 Iters: 150600/[04], loss: 11.2197, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:22:42 Iters: 150700/[04], loss: 11.2748, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:24:45 Iters: 150800/[04], loss: 11.7859, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:26:48 Iters: 150900/[04], loss: 10.8515, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:28:51 Iters: 151000/[04], loss: 11.2819, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:30:54 Iters: 151100/[04], loss: 11.6747, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:32:57 Iters: 151200/[04], loss: 11.4359, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:35:01 Iters: 151300/[04], loss: 10.4320, train_accuracy: 0.1016, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:37:04 Iters: 151400/[04], loss: 11.3195, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:39:07 Iters: 151500/[04], loss: 12.4645, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:41:10 Iters: 151600/[04], loss: 10.8549, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:43:13 Iters: 151700/[04], loss: 10.5132, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:45:16 Iters: 151800/[04], loss: 11.1872, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:47:19 Iters: 151900/[04], loss: 11.3409, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:49:22 Iters: 152000/[04], loss: 12.2447, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:51:25 Iters: 152100/[04], loss: 10.7299, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:53:28 Iters: 152200/[04], loss: 11.2778, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:55:31 Iters: 152300/[04], loss: 12.2457, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:57:34 Iters: 152400/[04], loss: 11.9112, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-21:59:38 Iters: 152500/[04], loss: 11.0486, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:01:41 Iters: 152600/[04], loss: 10.8415, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:03:43 Iters: 152700/[04], loss: 12.0082, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:05:46 Iters: 152800/[04], loss: 11.9595, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:07:49 Iters: 152900/[04], loss: 11.7819, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:09:52 Iters: 153000/[04], loss: 11.5101, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:11:55 Iters: 153100/[04], loss: 11.5210, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:13:58 Iters: 153200/[04], loss: 11.2321, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:16:01 Iters: 153300/[04], loss: 11.5106, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:18:05 Iters: 153400/[04], loss: 12.0349, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:20:08 Iters: 153500/[04], loss: 11.1622, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:22:11 Iters: 153600/[04], loss: 11.6572, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:24:14 Iters: 153700/[04], loss: 10.2737, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:26:16 Iters: 153800/[04], loss: 10.4736, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:28:19 Iters: 153900/[04], loss: 10.9480, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:30:22 Iters: 154000/[04], loss: 10.8610, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:32:25 Iters: 154100/[04], loss: 10.6684, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:34:28 Iters: 154200/[04], loss: 11.4221, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:36:31 Iters: 154300/[04], loss: 10.6678, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:38:34 Iters: 154400/[04], loss: 11.5805, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:40:37 Iters: 154500/[04], loss: 10.7436, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:42:39 Iters: 154600/[04], loss: 10.4327, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:44:42 Iters: 154700/[04], loss: 11.4051, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:46:45 Iters: 154800/[04], loss: 11.0675, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:48:48 Iters: 154900/[04], loss: 11.1456, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:50:51 Iters: 155000/[04], loss: 11.2481, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:52:54 Iters: 155100/[04], loss: 11.3906, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:54:56 Iters: 155200/[04], loss: 10.5666, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:56:59 Iters: 155300/[04], loss: 11.3618, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-22:59:02 Iters: 155400/[04], loss: 11.7348, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:01:05 Iters: 155500/[04], loss: 11.2996, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:03:08 Iters: 155600/[04], loss: 11.0121, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:05:11 Iters: 155700/[04], loss: 11.4915, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:07:14 Iters: 155800/[04], loss: 10.7824, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:09:18 Iters: 155900/[04], loss: 11.1956, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:11:21 Iters: 156000/[04], loss: 11.0144, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:13:24 Iters: 156100/[04], loss: 10.4280, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220702-23:15:28 Iters: 156200/[04], loss: 11.5528, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:17:31 Iters: 156300/[04], loss: 11.9970, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:19:34 Iters: 156400/[04], loss: 11.3050, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:21:37 Iters: 156500/[04], loss: 11.8814, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:23:41 Iters: 156600/[04], loss: 11.1375, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:25:44 Iters: 156700/[04], loss: 11.4557, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:27:47 Iters: 156800/[04], loss: 11.8844, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:29:50 Iters: 156900/[04], loss: 10.9881, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:31:54 Iters: 157000/[04], loss: 11.3536, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:33:57 Iters: 157100/[04], loss: 11.3484, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:36:00 Iters: 157200/[04], loss: 11.1874, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:38:04 Iters: 157300/[04], loss: 10.8777, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220702-23:40:07 Iters: 157400/[04], loss: 11.9263, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:42:10 Iters: 157500/[04], loss: 12.0226, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:44:14 Iters: 157600/[04], loss: 11.6676, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:46:17 Iters: 157700/[04], loss: 11.1248, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:48:20 Iters: 157800/[04], loss: 11.7507, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:50:23 Iters: 157900/[04], loss: 11.2548, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:52:26 Iters: 158000/[04], loss: 10.4368, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:54:30 Iters: 158100/[04], loss: 10.8228, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220702-23:56:33 Iters: 158200/[04], loss: 10.7929, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220702-23:58:37 Iters: 158300/[04], loss: 11.1219, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-00:00:40 Iters: 158400/[04], loss: 10.6885, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:02:43 Iters: 158500/[04], loss: 11.2960, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:04:46 Iters: 158600/[04], loss: 12.0813, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:06:49 Iters: 158700/[04], loss: 11.2466, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:08:53 Iters: 158800/[04], loss: 10.9056, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:10:56 Iters: 158900/[04], loss: 11.3172, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:12:59 Iters: 159000/[04], loss: 12.8257, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:15:03 Iters: 159100/[04], loss: 10.8481, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:17:06 Iters: 159200/[04], loss: 11.4906, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:19:09 Iters: 159300/[04], loss: 10.7940, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-00:21:13 Iters: 159400/[04], loss: 11.0202, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:23:16 Iters: 159500/[04], loss: 10.5230, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:25:19 Iters: 159600/[04], loss: 11.4270, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:27:23 Iters: 159700/[04], loss: 11.4403, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:29:26 Iters: 159800/[04], loss: 10.7266, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:31:29 Iters: 159900/[04], loss: 11.6495, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:33:32 Iters: 160000/[04], loss: 11.4739, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:33:32 Saving checkpoint: 160000 -20220703-00:34:50 LFW Ave Accuracy: 99.1332 -20220703-00:36:06 AgeDB-30 Ave Accuracy: 93.4167 -20220703-00:37:32 CFP-FP Ave Accuracy: 87.9571 -20220703-00:37:32 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220703-00:39:35 Iters: 160100/[04], loss: 10.6388, train_accuracy: 0.0391, time: 3.63 s/iter, learning rate: 0.05 -20220703-00:41:39 Iters: 160200/[04], loss: 11.6504, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:43:42 Iters: 160300/[04], loss: 10.7937, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:45:45 Iters: 160400/[04], loss: 11.3872, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:47:48 Iters: 160500/[04], loss: 11.5527, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:49:51 Iters: 160600/[04], loss: 10.7489, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:51:54 Iters: 160700/[04], loss: 10.8998, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:53:57 Iters: 160800/[04], loss: 11.6191, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:56:00 Iters: 160900/[04], loss: 11.5338, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-00:58:04 Iters: 161000/[04], loss: 10.3660, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:00:07 Iters: 161100/[04], loss: 11.5742, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:02:10 Iters: 161200/[04], loss: 11.1968, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:04:13 Iters: 161300/[04], loss: 11.1042, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:06:16 Iters: 161400/[04], loss: 11.5879, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:08:20 Iters: 161500/[04], loss: 11.4507, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:10:23 Iters: 161600/[04], loss: 11.2687, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-01:12:26 Iters: 161700/[04], loss: 11.6079, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:14:30 Iters: 161800/[04], loss: 11.2584, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:16:33 Iters: 161900/[04], loss: 11.5496, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:18:36 Iters: 162000/[04], loss: 11.2919, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-01:20:39 Iters: 162100/[04], loss: 11.4932, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:22:43 Iters: 162200/[04], loss: 11.2100, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:24:46 Iters: 162300/[04], loss: 12.1522, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:26:49 Iters: 162400/[04], loss: 10.7945, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:28:53 Iters: 162500/[04], loss: 10.8767, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:30:56 Iters: 162600/[04], loss: 10.7893, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:32:59 Iters: 162700/[04], loss: 10.7935, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:35:03 Iters: 162800/[04], loss: 11.5969, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:37:06 Iters: 162900/[04], loss: 11.9092, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:39:09 Iters: 163000/[04], loss: 12.4528, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-01:41:13 Iters: 163100/[04], loss: 11.5090, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:43:16 Iters: 163200/[04], loss: 10.5122, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:45:19 Iters: 163300/[04], loss: 11.8236, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:47:23 Iters: 163400/[04], loss: 12.0338, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-01:49:26 Iters: 163500/[04], loss: 12.0307, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:51:29 Iters: 163600/[04], loss: 11.4403, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:53:32 Iters: 163700/[04], loss: 11.0801, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:55:35 Iters: 163800/[04], loss: 10.7742, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:57:38 Iters: 163900/[04], loss: 11.8920, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-01:59:42 Iters: 164000/[04], loss: 11.2004, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:01:45 Iters: 164100/[04], loss: 11.5952, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:03:48 Iters: 164200/[04], loss: 11.8433, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:05:51 Iters: 164300/[04], loss: 11.4181, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:07:55 Iters: 164400/[04], loss: 10.5927, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:09:58 Iters: 164500/[04], loss: 11.4861, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:12:01 Iters: 164600/[04], loss: 10.6010, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:14:04 Iters: 164700/[04], loss: 11.7074, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:16:08 Iters: 164800/[04], loss: 11.1176, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:18:11 Iters: 164900/[04], loss: 11.4947, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:20:14 Iters: 165000/[04], loss: 11.2230, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:22:17 Iters: 165100/[04], loss: 10.6953, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:24:20 Iters: 165200/[04], loss: 11.2672, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:26:24 Iters: 165300/[04], loss: 10.6024, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:28:27 Iters: 165400/[04], loss: 12.1915, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:30:30 Iters: 165500/[04], loss: 11.6951, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-02:32:34 Iters: 165600/[04], loss: 11.7119, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:34:37 Iters: 165700/[04], loss: 10.3755, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:36:40 Iters: 165800/[04], loss: 11.4943, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220703-02:38:43 Iters: 165900/[04], loss: 10.4626, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:40:46 Iters: 166000/[04], loss: 11.6359, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:42:50 Iters: 166100/[04], loss: 11.5917, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:44:53 Iters: 166200/[04], loss: 11.4703, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:46:56 Iters: 166300/[04], loss: 10.5756, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:49:00 Iters: 166400/[04], loss: 11.8014, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:51:03 Iters: 166500/[04], loss: 12.1459, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:53:06 Iters: 166600/[04], loss: 11.2548, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:55:09 Iters: 166700/[04], loss: 11.3631, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:57:12 Iters: 166800/[04], loss: 11.0172, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-02:59:16 Iters: 166900/[04], loss: 12.1290, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:01:19 Iters: 167000/[04], loss: 11.8936, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:03:22 Iters: 167100/[04], loss: 11.6546, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:05:26 Iters: 167200/[04], loss: 11.4351, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220703-03:07:29 Iters: 167300/[04], loss: 11.9533, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:09:32 Iters: 167400/[04], loss: 10.9228, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:11:35 Iters: 167500/[04], loss: 10.8946, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:13:38 Iters: 167600/[04], loss: 10.9732, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:15:42 Iters: 167700/[04], loss: 11.7659, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:17:45 Iters: 167800/[04], loss: 11.0789, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:19:48 Iters: 167900/[04], loss: 11.5885, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:21:51 Iters: 168000/[04], loss: 10.7375, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:23:54 Iters: 168100/[04], loss: 11.6857, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:25:58 Iters: 168200/[04], loss: 11.3234, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:28:01 Iters: 168300/[04], loss: 11.0982, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:30:05 Iters: 168400/[04], loss: 11.5805, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:32:08 Iters: 168500/[04], loss: 10.5014, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:34:11 Iters: 168600/[04], loss: 11.6099, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:36:15 Iters: 168700/[04], loss: 11.6198, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:38:18 Iters: 168800/[04], loss: 10.7295, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:40:21 Iters: 168900/[04], loss: 11.2277, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:42:24 Iters: 169000/[04], loss: 10.8717, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:44:28 Iters: 169100/[04], loss: 11.0310, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-03:46:31 Iters: 169200/[04], loss: 11.2781, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:48:35 Iters: 169300/[04], loss: 11.0967, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-03:50:38 Iters: 169400/[04], loss: 11.6984, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:52:41 Iters: 169500/[04], loss: 11.2152, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:54:44 Iters: 169600/[04], loss: 11.2090, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:56:47 Iters: 169700/[04], loss: 11.4477, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-03:58:50 Iters: 169800/[04], loss: 11.3522, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:00:53 Iters: 169900/[04], loss: 11.8567, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:02:57 Iters: 170000/[04], loss: 11.3803, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:02:57 Saving checkpoint: 170000 -20220703-04:04:14 LFW Ave Accuracy: 98.8498 -20220703-04:05:31 AgeDB-30 Ave Accuracy: 93.7000 -20220703-04:07:02 CFP-FP Ave Accuracy: 87.5429 -20220703-04:07:02 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220703-04:09:05 Iters: 170100/[04], loss: 10.9676, train_accuracy: 0.0156, time: 3.68 s/iter, learning rate: 0.05 -20220703-04:11:08 Iters: 170200/[04], loss: 11.0585, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:13:11 Iters: 170300/[04], loss: 11.2576, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:15:14 Iters: 170400/[04], loss: 11.3021, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:17:17 Iters: 170500/[04], loss: 11.3388, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:19:21 Iters: 170600/[04], loss: 10.7267, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:21:24 Iters: 170700/[04], loss: 11.1490, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220703-04:23:27 Iters: 170800/[04], loss: 10.5229, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:25:31 Iters: 170900/[04], loss: 10.6656, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:27:34 Iters: 171000/[04], loss: 12.0756, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:29:37 Iters: 171100/[04], loss: 11.7639, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:31:40 Iters: 171200/[04], loss: 10.9506, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:33:44 Iters: 171300/[04], loss: 11.2137, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:35:47 Iters: 171400/[04], loss: 11.5136, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:37:50 Iters: 171500/[04], loss: 11.2281, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:39:54 Iters: 171600/[04], loss: 10.6277, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:41:57 Iters: 171700/[04], loss: 11.6778, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:44:00 Iters: 171800/[04], loss: 10.9989, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:46:03 Iters: 171900/[04], loss: 12.2266, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:48:06 Iters: 172000/[04], loss: 10.9043, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:50:10 Iters: 172100/[04], loss: 12.3333, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:52:13 Iters: 172200/[04], loss: 10.5526, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:54:16 Iters: 172300/[04], loss: 10.4559, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:56:19 Iters: 172400/[04], loss: 11.7750, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-04:58:22 Iters: 172500/[04], loss: 11.7363, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:00:25 Iters: 172600/[04], loss: 10.5294, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:02:28 Iters: 172700/[04], loss: 11.3421, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:04:32 Iters: 172800/[04], loss: 10.7646, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:06:35 Iters: 172900/[04], loss: 10.7779, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:08:38 Iters: 173000/[04], loss: 12.1677, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:10:41 Iters: 173100/[04], loss: 11.8744, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:12:45 Iters: 173200/[04], loss: 11.2200, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:14:48 Iters: 173300/[04], loss: 10.6752, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:16:51 Iters: 173400/[04], loss: 11.0221, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:18:54 Iters: 173500/[04], loss: 10.8674, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:20:58 Iters: 173600/[04], loss: 11.4896, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:23:01 Iters: 173700/[04], loss: 10.2997, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:25:04 Iters: 173800/[04], loss: 10.9052, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:27:07 Iters: 173900/[04], loss: 11.7840, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:29:11 Iters: 174000/[04], loss: 11.8271, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:31:14 Iters: 174100/[04], loss: 10.8944, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:33:17 Iters: 174200/[04], loss: 11.3712, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:35:21 Iters: 174300/[04], loss: 10.6664, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-05:37:24 Iters: 174400/[04], loss: 12.0462, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:39:28 Iters: 174500/[04], loss: 12.0607, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-05:41:31 Iters: 174600/[04], loss: 11.6459, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:43:34 Iters: 174700/[04], loss: 11.6692, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:45:37 Iters: 174800/[04], loss: 11.8145, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:47:41 Iters: 174900/[04], loss: 11.9825, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-05:49:44 Iters: 175000/[04], loss: 11.4366, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:51:48 Iters: 175100/[04], loss: 11.4934, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-05:53:51 Iters: 175200/[04], loss: 11.5713, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:55:54 Iters: 175300/[04], loss: 10.7301, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-05:57:58 Iters: 175400/[04], loss: 11.3917, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-06:00:01 Iters: 175500/[04], loss: 11.7297, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:02:04 Iters: 175600/[04], loss: 11.7364, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:04:08 Iters: 175700/[04], loss: 11.6172, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-06:06:11 Iters: 175800/[04], loss: 10.6644, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:08:14 Iters: 175900/[04], loss: 12.4654, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:10:17 Iters: 176000/[04], loss: 10.6014, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:12:20 Iters: 176100/[04], loss: 11.3025, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:14:24 Iters: 176200/[04], loss: 11.6132, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:16:27 Iters: 176300/[04], loss: 12.5052, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:18:30 Iters: 176400/[04], loss: 10.8751, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:20:33 Iters: 176500/[04], loss: 10.8867, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:22:36 Iters: 176600/[04], loss: 11.9145, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:24:40 Iters: 176700/[04], loss: 11.3786, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-06:26:43 Iters: 176800/[04], loss: 11.0239, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:28:46 Iters: 176900/[04], loss: 11.7247, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:30:49 Iters: 177000/[04], loss: 10.2924, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:32:53 Iters: 177100/[04], loss: 10.8103, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:34:56 Iters: 177200/[04], loss: 10.6932, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:36:59 Iters: 177300/[04], loss: 11.5990, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:39:02 Iters: 177400/[04], loss: 10.9876, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:41:05 Iters: 177500/[04], loss: 10.4154, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:43:09 Iters: 177600/[04], loss: 11.0254, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:45:12 Iters: 177700/[04], loss: 11.9169, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:47:15 Iters: 177800/[04], loss: 11.3258, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:49:19 Iters: 177900/[04], loss: 11.9184, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-06:51:22 Iters: 178000/[04], loss: 10.8822, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:53:25 Iters: 178100/[04], loss: 12.5114, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-06:55:28 Iters: 178200/[04], loss: 11.7025, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-06:57:32 Iters: 178300/[04], loss: 11.2129, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-06:59:35 Iters: 178400/[04], loss: 10.7484, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:01:39 Iters: 178500/[04], loss: 10.7435, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-07:03:42 Iters: 178600/[04], loss: 11.1171, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:05:45 Iters: 178700/[04], loss: 11.2655, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:07:49 Iters: 178800/[04], loss: 11.0791, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:09:52 Iters: 178900/[04], loss: 11.3001, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:11:55 Iters: 179000/[04], loss: 11.7666, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:13:58 Iters: 179100/[04], loss: 11.9073, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:16:02 Iters: 179200/[04], loss: 10.8817, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:18:05 Iters: 179300/[04], loss: 10.5308, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:20:08 Iters: 179400/[04], loss: 11.4320, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:22:11 Iters: 179500/[04], loss: 10.9888, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220703-07:24:15 Iters: 179600/[04], loss: 11.1719, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:26:18 Iters: 179700/[04], loss: 11.2721, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-07:28:21 Iters: 179800/[04], loss: 11.7787, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:30:25 Iters: 179900/[04], loss: 11.0932, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-07:32:28 Iters: 180000/[04], loss: 11.5095, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:32:28 Saving checkpoint: 180000 -20220703-07:33:45 LFW Ave Accuracy: 98.9999 -20220703-07:35:01 AgeDB-30 Ave Accuracy: 93.4833 -20220703-07:36:30 CFP-FP Ave Accuracy: 88.1000 -20220703-07:36:30 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220703-07:38:33 Iters: 180100/[04], loss: 12.2851, train_accuracy: 0.0234, time: 3.65 s/iter, learning rate: 0.05 -20220703-07:40:36 Iters: 180200/[04], loss: 11.3466, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:42:40 Iters: 180300/[04], loss: 11.3431, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:44:43 Iters: 180400/[04], loss: 11.4970, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:46:46 Iters: 180500/[04], loss: 11.2417, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:48:49 Iters: 180600/[04], loss: 11.5930, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:50:53 Iters: 180700/[04], loss: 13.0737, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:52:56 Iters: 180800/[04], loss: 11.2438, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:54:59 Iters: 180900/[04], loss: 11.4736, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:57:02 Iters: 181000/[04], loss: 11.7253, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-07:59:06 Iters: 181100/[04], loss: 11.4794, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-08:01:09 Iters: 181200/[04], loss: 11.8872, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:03:12 Iters: 181300/[04], loss: 10.6458, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:05:15 Iters: 181400/[04], loss: 12.2417, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-08:07:19 Iters: 181500/[04], loss: 11.1971, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:09:22 Iters: 181600/[04], loss: 11.5574, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:11:25 Iters: 181700/[04], loss: 11.9296, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:13:28 Iters: 181800/[04], loss: 10.6815, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:15:32 Iters: 181900/[04], loss: 11.2887, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-08:16:45 Train Epoch: 5/18 ... -20220703-08:17:35 Iters: 182000/[05], loss: 11.6471, train_accuracy: 0.0469, time: 0.50 s/iter, learning rate: 0.05 -20220703-08:19:38 Iters: 182100/[05], loss: 10.9358, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:21:41 Iters: 182200/[05], loss: 11.9578, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:23:44 Iters: 182300/[05], loss: 11.4238, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:25:48 Iters: 182400/[05], loss: 10.5044, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:27:51 Iters: 182500/[05], loss: 11.0048, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:29:54 Iters: 182600/[05], loss: 11.6702, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:31:58 Iters: 182700/[05], loss: 11.8312, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220703-08:34:01 Iters: 182800/[05], loss: 10.4389, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:36:04 Iters: 182900/[05], loss: 10.7917, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:38:07 Iters: 183000/[05], loss: 12.2765, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:40:11 Iters: 183100/[05], loss: 11.5779, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:42:14 Iters: 183200/[05], loss: 11.6827, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:44:17 Iters: 183300/[05], loss: 10.1770, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:46:20 Iters: 183400/[05], loss: 10.4916, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:48:23 Iters: 183500/[05], loss: 11.9918, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:50:27 Iters: 183600/[05], loss: 11.7028, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-08:52:30 Iters: 183700/[05], loss: 11.4340, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:54:33 Iters: 183800/[05], loss: 11.7060, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:56:36 Iters: 183900/[05], loss: 12.0949, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-08:58:40 Iters: 184000/[05], loss: 11.5485, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-09:00:43 Iters: 184100/[05], loss: 11.4963, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:02:46 Iters: 184200/[05], loss: 10.4879, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:04:49 Iters: 184300/[05], loss: 12.5609, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:06:52 Iters: 184400/[05], loss: 11.7639, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:08:56 Iters: 184500/[05], loss: 11.6657, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:10:59 Iters: 184600/[05], loss: 11.1299, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:13:02 Iters: 184700/[05], loss: 11.7997, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:15:05 Iters: 184800/[05], loss: 10.6037, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:17:09 Iters: 184900/[05], loss: 11.0114, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-09:19:12 Iters: 185000/[05], loss: 10.3114, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:21:15 Iters: 185100/[05], loss: 11.3640, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:23:19 Iters: 185200/[05], loss: 10.7891, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-09:25:22 Iters: 185300/[05], loss: 11.3621, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:27:25 Iters: 185400/[05], loss: 11.5273, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-09:29:28 Iters: 185500/[05], loss: 11.4832, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:31:32 Iters: 185600/[05], loss: 10.8278, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:33:35 Iters: 185700/[05], loss: 11.3160, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:35:38 Iters: 185800/[05], loss: 11.1070, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:37:41 Iters: 185900/[05], loss: 11.9839, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:39:45 Iters: 186000/[05], loss: 11.2432, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:41:48 Iters: 186100/[05], loss: 10.8420, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:43:51 Iters: 186200/[05], loss: 10.7966, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:45:54 Iters: 186300/[05], loss: 10.9841, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:47:58 Iters: 186400/[05], loss: 11.4375, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-09:50:01 Iters: 186500/[05], loss: 11.4753, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:52:05 Iters: 186600/[05], loss: 11.9785, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-09:54:08 Iters: 186700/[05], loss: 11.3912, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:56:11 Iters: 186800/[05], loss: 10.3592, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-09:58:15 Iters: 186900/[05], loss: 11.3795, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:00:18 Iters: 187000/[05], loss: 11.5268, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:02:21 Iters: 187100/[05], loss: 10.7765, train_accuracy: 0.1094, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:04:24 Iters: 187200/[05], loss: 11.4983, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:06:27 Iters: 187300/[05], loss: 11.2624, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:08:31 Iters: 187400/[05], loss: 11.8816, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:10:34 Iters: 187500/[05], loss: 11.4119, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:12:37 Iters: 187600/[05], loss: 10.5647, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:14:40 Iters: 187700/[05], loss: 11.2162, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:16:43 Iters: 187800/[05], loss: 11.5504, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:18:47 Iters: 187900/[05], loss: 10.8981, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:20:50 Iters: 188000/[05], loss: 11.3028, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:22:53 Iters: 188100/[05], loss: 11.8354, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:24:57 Iters: 188200/[05], loss: 11.3952, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:27:00 Iters: 188300/[05], loss: 11.6380, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:29:03 Iters: 188400/[05], loss: 11.9120, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:31:07 Iters: 188500/[05], loss: 10.9063, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:33:10 Iters: 188600/[05], loss: 11.0454, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:35:13 Iters: 188700/[05], loss: 11.8108, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:37:17 Iters: 188800/[05], loss: 10.6296, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:39:20 Iters: 188900/[05], loss: 11.1822, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:41:23 Iters: 189000/[05], loss: 10.7059, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:43:27 Iters: 189100/[05], loss: 11.5485, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:45:30 Iters: 189200/[05], loss: 10.8738, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:47:33 Iters: 189300/[05], loss: 11.1099, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:49:36 Iters: 189400/[05], loss: 11.2840, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:51:40 Iters: 189500/[05], loss: 11.4167, train_accuracy: 0.0000, time: 1.24 s/iter, learning rate: 0.05 -20220703-10:53:43 Iters: 189600/[05], loss: 11.5198, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:55:46 Iters: 189700/[05], loss: 10.5887, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:57:50 Iters: 189800/[05], loss: 12.0237, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-10:59:53 Iters: 189900/[05], loss: 11.3372, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:01:56 Iters: 190000/[05], loss: 11.3124, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:01:56 Saving checkpoint: 190000 -20220703-11:03:12 LFW Ave Accuracy: 98.9832 -20220703-11:04:28 AgeDB-30 Ave Accuracy: 93.5333 -20220703-11:05:55 CFP-FP Ave Accuracy: 86.8286 -20220703-11:05:55 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220703-11:07:58 Iters: 190100/[05], loss: 11.0373, train_accuracy: 0.0234, time: 3.62 s/iter, learning rate: 0.05 -20220703-11:10:01 Iters: 190200/[05], loss: 11.1050, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:12:05 Iters: 190300/[05], loss: 11.5006, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:14:08 Iters: 190400/[05], loss: 10.0798, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:16:11 Iters: 190500/[05], loss: 11.8772, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-11:18:15 Iters: 190600/[05], loss: 11.0624, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-11:20:18 Iters: 190700/[05], loss: 11.3778, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:22:21 Iters: 190800/[05], loss: 10.7428, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:24:25 Iters: 190900/[05], loss: 10.3850, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:26:28 Iters: 191000/[05], loss: 10.9641, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:28:31 Iters: 191100/[05], loss: 11.4269, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:30:34 Iters: 191200/[05], loss: 11.3351, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:32:38 Iters: 191300/[05], loss: 11.6741, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:34:41 Iters: 191400/[05], loss: 11.3100, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-11:36:44 Iters: 191500/[05], loss: 11.5573, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:38:48 Iters: 191600/[05], loss: 10.5696, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-11:40:51 Iters: 191700/[05], loss: 11.7624, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:42:54 Iters: 191800/[05], loss: 11.0072, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:44:58 Iters: 191900/[05], loss: 11.3195, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:47:01 Iters: 192000/[05], loss: 10.4423, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:49:04 Iters: 192100/[05], loss: 10.9224, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:51:07 Iters: 192200/[05], loss: 11.3698, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:53:11 Iters: 192300/[05], loss: 11.2588, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:55:14 Iters: 192400/[05], loss: 11.7998, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:57:17 Iters: 192500/[05], loss: 10.7844, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-11:59:21 Iters: 192600/[05], loss: 11.6600, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:01:24 Iters: 192700/[05], loss: 10.7030, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:03:28 Iters: 192800/[05], loss: 11.2667, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:05:31 Iters: 192900/[05], loss: 11.0460, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:07:34 Iters: 193000/[05], loss: 11.4091, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:09:37 Iters: 193100/[05], loss: 11.3211, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:11:40 Iters: 193200/[05], loss: 10.8389, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:13:44 Iters: 193300/[05], loss: 10.5055, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:15:47 Iters: 193400/[05], loss: 10.3923, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:17:50 Iters: 193500/[05], loss: 11.3711, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:19:54 Iters: 193600/[05], loss: 11.1879, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:21:57 Iters: 193700/[05], loss: 10.8578, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:24:00 Iters: 193800/[05], loss: 11.6116, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:26:04 Iters: 193900/[05], loss: 11.5868, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:28:07 Iters: 194000/[05], loss: 11.5703, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:30:10 Iters: 194100/[05], loss: 10.6251, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:32:14 Iters: 194200/[05], loss: 11.4137, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:34:17 Iters: 194300/[05], loss: 11.0539, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:36:20 Iters: 194400/[05], loss: 11.9160, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:38:24 Iters: 194500/[05], loss: 10.7493, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:40:27 Iters: 194600/[05], loss: 10.7653, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:42:30 Iters: 194700/[05], loss: 11.1271, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:44:34 Iters: 194800/[05], loss: 11.1881, train_accuracy: 0.0859, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:46:37 Iters: 194900/[05], loss: 11.3898, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:48:40 Iters: 195000/[05], loss: 11.5684, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:50:44 Iters: 195100/[05], loss: 10.7675, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:52:47 Iters: 195200/[05], loss: 10.9662, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:54:50 Iters: 195300/[05], loss: 11.4028, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-12:56:53 Iters: 195400/[05], loss: 11.2886, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-12:58:57 Iters: 195500/[05], loss: 10.9861, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:01:00 Iters: 195600/[05], loss: 11.7433, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:03:03 Iters: 195700/[05], loss: 11.2926, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:05:07 Iters: 195800/[05], loss: 10.6673, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-13:07:10 Iters: 195900/[05], loss: 11.8867, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:09:14 Iters: 196000/[05], loss: 10.5690, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-13:11:17 Iters: 196100/[05], loss: 11.1074, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:13:20 Iters: 196200/[05], loss: 11.1482, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:15:23 Iters: 196300/[05], loss: 10.9884, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:17:26 Iters: 196400/[05], loss: 11.4613, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:19:30 Iters: 196500/[05], loss: 11.5699, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:21:33 Iters: 196600/[05], loss: 11.9922, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:23:36 Iters: 196700/[05], loss: 10.8172, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:25:40 Iters: 196800/[05], loss: 10.9627, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-13:27:43 Iters: 196900/[05], loss: 11.3094, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:29:46 Iters: 197000/[05], loss: 12.1259, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-13:31:49 Iters: 197100/[05], loss: 11.4646, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:33:53 Iters: 197200/[05], loss: 12.0267, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:35:56 Iters: 197300/[05], loss: 11.6519, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-13:37:59 Iters: 197400/[05], loss: 11.2833, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:40:03 Iters: 197500/[05], loss: 10.6823, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:42:06 Iters: 197600/[05], loss: 11.2018, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:44:09 Iters: 197700/[05], loss: 11.1676, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:46:12 Iters: 197800/[05], loss: 11.3549, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:48:15 Iters: 197900/[05], loss: 11.4528, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:50:18 Iters: 198000/[05], loss: 11.1823, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:52:22 Iters: 198100/[05], loss: 11.1964, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:54:25 Iters: 198200/[05], loss: 11.7464, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:56:28 Iters: 198300/[05], loss: 10.9442, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-13:58:31 Iters: 198400/[05], loss: 11.2504, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:00:34 Iters: 198500/[05], loss: 11.0741, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:02:38 Iters: 198600/[05], loss: 10.6397, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:04:41 Iters: 198700/[05], loss: 11.3491, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:06:44 Iters: 198800/[05], loss: 10.1516, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:08:48 Iters: 198900/[05], loss: 11.5586, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-14:10:51 Iters: 199000/[05], loss: 12.3363, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:12:54 Iters: 199100/[05], loss: 10.6557, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:14:57 Iters: 199200/[05], loss: 11.2891, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:17:00 Iters: 199300/[05], loss: 11.3552, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:19:04 Iters: 199400/[05], loss: 10.3661, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:21:07 Iters: 199500/[05], loss: 11.3326, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:23:10 Iters: 199600/[05], loss: 10.6499, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:25:19 Iters: 199700/[05], loss: 10.7816, train_accuracy: 0.0312, time: 1.29 s/iter, learning rate: 0.05 -20220703-14:27:22 Iters: 199800/[05], loss: 11.1771, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:29:26 Iters: 199900/[05], loss: 10.8905, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:31:29 Iters: 200000/[05], loss: 11.2633, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:31:29 Saving checkpoint: 200000 -20220703-14:32:46 LFW Ave Accuracy: 99.0999 -20220703-14:34:01 AgeDB-30 Ave Accuracy: 93.4833 -20220703-14:35:28 CFP-FP Ave Accuracy: 86.4571 -20220703-14:35:28 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220703-14:37:31 Iters: 200100/[05], loss: 10.9195, train_accuracy: 0.0547, time: 3.62 s/iter, learning rate: 0.05 -20220703-14:39:35 Iters: 200200/[05], loss: 11.2041, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:41:38 Iters: 200300/[05], loss: 10.7501, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:43:41 Iters: 200400/[05], loss: 11.8577, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:45:44 Iters: 200500/[05], loss: 11.0940, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:47:47 Iters: 200600/[05], loss: 10.9965, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:49:51 Iters: 200700/[05], loss: 11.4564, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:51:54 Iters: 200800/[05], loss: 10.8532, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:53:57 Iters: 200900/[05], loss: 11.3249, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:56:00 Iters: 201000/[05], loss: 10.9481, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-14:58:03 Iters: 201100/[05], loss: 12.0128, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:00:07 Iters: 201200/[05], loss: 10.2752, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:02:10 Iters: 201300/[05], loss: 10.4327, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:04:13 Iters: 201400/[05], loss: 11.6176, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:06:17 Iters: 201500/[05], loss: 11.1361, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:08:20 Iters: 201600/[05], loss: 11.6389, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220703-15:10:23 Iters: 201700/[05], loss: 10.7934, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:12:26 Iters: 201800/[05], loss: 11.4217, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:14:30 Iters: 201900/[05], loss: 10.3285, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:16:33 Iters: 202000/[05], loss: 10.9674, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:18:37 Iters: 202100/[05], loss: 11.4713, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-15:20:40 Iters: 202200/[05], loss: 11.3092, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:22:43 Iters: 202300/[05], loss: 10.7739, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-15:24:47 Iters: 202400/[05], loss: 10.7355, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:26:50 Iters: 202500/[05], loss: 11.7081, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-15:28:53 Iters: 202600/[05], loss: 12.1495, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:30:56 Iters: 202700/[05], loss: 11.9298, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:33:00 Iters: 202800/[05], loss: 11.3825, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220703-15:35:03 Iters: 202900/[05], loss: 10.5319, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:37:07 Iters: 203000/[05], loss: 11.3859, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-15:39:10 Iters: 203100/[05], loss: 11.6414, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:41:13 Iters: 203200/[05], loss: 10.9006, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:43:17 Iters: 203300/[05], loss: 11.2327, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:45:20 Iters: 203400/[05], loss: 10.7373, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:47:23 Iters: 203500/[05], loss: 10.8699, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-15:49:27 Iters: 203600/[05], loss: 11.5639, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:51:30 Iters: 203700/[05], loss: 11.3358, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:53:33 Iters: 203800/[05], loss: 11.1344, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:55:36 Iters: 203900/[05], loss: 11.7875, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:57:40 Iters: 204000/[05], loss: 11.0250, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-15:59:43 Iters: 204100/[05], loss: 11.3567, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:01:46 Iters: 204200/[05], loss: 10.8095, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:03:49 Iters: 204300/[05], loss: 10.7877, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:05:53 Iters: 204400/[05], loss: 10.5453, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:07:56 Iters: 204500/[05], loss: 10.6281, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:09:59 Iters: 204600/[05], loss: 11.0919, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:12:02 Iters: 204700/[05], loss: 10.5448, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:14:06 Iters: 204800/[05], loss: 10.8844, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:16:09 Iters: 204900/[05], loss: 11.6363, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:18:12 Iters: 205000/[05], loss: 11.6821, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:20:16 Iters: 205100/[05], loss: 11.0985, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:22:19 Iters: 205200/[05], loss: 10.7535, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:24:22 Iters: 205300/[05], loss: 10.9217, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:26:26 Iters: 205400/[05], loss: 11.3411, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:28:29 Iters: 205500/[05], loss: 11.7172, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:30:33 Iters: 205600/[05], loss: 11.4088, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:32:36 Iters: 205700/[05], loss: 11.0922, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:34:39 Iters: 205800/[05], loss: 10.3020, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:36:43 Iters: 205900/[05], loss: 10.0807, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:38:46 Iters: 206000/[05], loss: 11.7189, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:40:49 Iters: 206100/[05], loss: 11.1274, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:42:53 Iters: 206200/[05], loss: 11.5711, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:44:56 Iters: 206300/[05], loss: 11.5190, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:47:00 Iters: 206400/[05], loss: 11.3468, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:49:03 Iters: 206500/[05], loss: 10.3036, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:51:06 Iters: 206600/[05], loss: 11.3162, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:53:10 Iters: 206700/[05], loss: 11.4297, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-16:55:13 Iters: 206800/[05], loss: 11.0934, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:57:16 Iters: 206900/[05], loss: 11.9399, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-16:59:20 Iters: 207000/[05], loss: 10.8647, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:01:23 Iters: 207100/[05], loss: 10.9696, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:03:26 Iters: 207200/[05], loss: 11.4037, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-17:05:30 Iters: 207300/[05], loss: 10.5171, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:07:33 Iters: 207400/[05], loss: 11.2826, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:09:36 Iters: 207500/[05], loss: 11.7069, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:11:39 Iters: 207600/[05], loss: 11.4155, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:13:42 Iters: 207700/[05], loss: 11.0668, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:15:46 Iters: 207800/[05], loss: 11.4518, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:17:49 Iters: 207900/[05], loss: 10.7824, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:19:53 Iters: 208000/[05], loss: 10.8773, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220703-17:21:56 Iters: 208100/[05], loss: 11.4046, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:23:59 Iters: 208200/[05], loss: 11.6115, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:26:03 Iters: 208300/[05], loss: 12.1360, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-17:28:06 Iters: 208400/[05], loss: 11.8038, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:30:09 Iters: 208500/[05], loss: 11.3278, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:32:13 Iters: 208600/[05], loss: 11.3988, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-17:34:16 Iters: 208700/[05], loss: 11.5811, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:36:19 Iters: 208800/[05], loss: 11.0635, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-17:38:23 Iters: 208900/[05], loss: 11.2300, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:40:26 Iters: 209000/[05], loss: 11.3214, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:42:29 Iters: 209100/[05], loss: 11.1580, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220703-17:44:32 Iters: 209200/[05], loss: 12.1372, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:46:36 Iters: 209300/[05], loss: 11.8813, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-17:48:39 Iters: 209400/[05], loss: 11.1325, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:50:42 Iters: 209500/[05], loss: 11.6469, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:52:46 Iters: 209600/[05], loss: 10.4139, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:54:49 Iters: 209700/[05], loss: 11.4217, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:56:52 Iters: 209800/[05], loss: 10.1514, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-17:58:56 Iters: 209900/[05], loss: 11.8374, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:00:59 Iters: 210000/[05], loss: 11.1232, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:00:59 Saving checkpoint: 210000 -20220703-18:02:16 LFW Ave Accuracy: 98.8666 -20220703-18:03:32 AgeDB-30 Ave Accuracy: 93.3333 -20220703-18:04:58 CFP-FP Ave Accuracy: 88.2286 -20220703-18:04:58 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220703-18:07:02 Iters: 210100/[05], loss: 11.1004, train_accuracy: 0.0391, time: 3.62 s/iter, learning rate: 0.05 -20220703-18:09:05 Iters: 210200/[05], loss: 11.2893, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:11:08 Iters: 210300/[05], loss: 10.9137, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:13:11 Iters: 210400/[05], loss: 11.4559, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:15:14 Iters: 210500/[05], loss: 11.1615, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:17:18 Iters: 210600/[05], loss: 10.7788, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:19:21 Iters: 210700/[05], loss: 11.6384, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:21:25 Iters: 210800/[05], loss: 11.4360, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-18:23:28 Iters: 210900/[05], loss: 11.1905, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:25:31 Iters: 211000/[05], loss: 11.1733, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:27:34 Iters: 211100/[05], loss: 10.6671, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:29:37 Iters: 211200/[05], loss: 10.3942, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:31:40 Iters: 211300/[05], loss: 10.4261, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:33:44 Iters: 211400/[05], loss: 10.5151, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:35:47 Iters: 211500/[05], loss: 11.9543, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:37:50 Iters: 211600/[05], loss: 11.3796, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:39:53 Iters: 211700/[05], loss: 11.9998, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:41:57 Iters: 211800/[05], loss: 11.5892, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:44:00 Iters: 211900/[05], loss: 11.4080, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:46:03 Iters: 212000/[05], loss: 11.2093, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:48:06 Iters: 212100/[05], loss: 10.7486, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:50:09 Iters: 212200/[05], loss: 11.4182, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:52:12 Iters: 212300/[05], loss: 11.7930, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:54:15 Iters: 212400/[05], loss: 11.6946, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:56:18 Iters: 212500/[05], loss: 12.1675, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-18:58:22 Iters: 212600/[05], loss: 11.8968, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:00:25 Iters: 212700/[05], loss: 11.9826, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:02:28 Iters: 212800/[05], loss: 11.2233, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:04:31 Iters: 212900/[05], loss: 10.7687, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-19:06:35 Iters: 213000/[05], loss: 11.8354, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:08:38 Iters: 213100/[05], loss: 11.0426, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220703-19:10:41 Iters: 213200/[05], loss: 11.8673, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:12:45 Iters: 213300/[05], loss: 11.1884, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220703-19:14:48 Iters: 213400/[05], loss: 10.9420, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:16:51 Iters: 213500/[05], loss: 10.8772, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:18:55 Iters: 213600/[05], loss: 11.3039, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:20:58 Iters: 213700/[05], loss: 11.5344, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:23:01 Iters: 213800/[05], loss: 11.2251, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:25:04 Iters: 213900/[05], loss: 11.1674, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:27:08 Iters: 214000/[05], loss: 11.2032, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220703-19:29:11 Iters: 214100/[05], loss: 11.9791, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:31:14 Iters: 214200/[05], loss: 10.8914, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:33:18 Iters: 214300/[05], loss: 11.0110, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:35:21 Iters: 214400/[05], loss: 11.8097, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:37:24 Iters: 214500/[05], loss: 12.0681, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:39:27 Iters: 214600/[05], loss: 12.1712, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:41:31 Iters: 214700/[05], loss: 11.1042, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-19:43:34 Iters: 214800/[05], loss: 12.1378, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:45:37 Iters: 214900/[05], loss: 10.9086, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:47:40 Iters: 215000/[05], loss: 11.3201, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:49:43 Iters: 215100/[05], loss: 10.5678, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:51:47 Iters: 215200/[05], loss: 11.3607, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:53:50 Iters: 215300/[05], loss: 11.6048, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:55:53 Iters: 215400/[05], loss: 10.9498, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:57:56 Iters: 215500/[05], loss: 11.5446, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-19:59:59 Iters: 215600/[05], loss: 11.2230, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:02:03 Iters: 215700/[05], loss: 10.4573, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:04:06 Iters: 215800/[05], loss: 11.9269, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:06:09 Iters: 215900/[05], loss: 11.3079, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:08:12 Iters: 216000/[05], loss: 10.8979, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:10:15 Iters: 216100/[05], loss: 11.8273, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:12:18 Iters: 216200/[05], loss: 10.8062, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:14:21 Iters: 216300/[05], loss: 11.1587, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:16:24 Iters: 216400/[05], loss: 11.2507, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:18:27 Iters: 216500/[05], loss: 11.3963, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:20:31 Iters: 216600/[05], loss: 11.1651, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:22:34 Iters: 216700/[05], loss: 10.8747, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:24:37 Iters: 216800/[05], loss: 11.1013, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:26:40 Iters: 216900/[05], loss: 11.3460, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:28:44 Iters: 217000/[05], loss: 10.9042, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:30:47 Iters: 217100/[05], loss: 10.5252, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:32:50 Iters: 217200/[05], loss: 10.9833, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:34:53 Iters: 217300/[05], loss: 11.2354, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:36:57 Iters: 217400/[05], loss: 11.5098, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-20:39:00 Iters: 217500/[05], loss: 11.9942, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:41:03 Iters: 217600/[05], loss: 11.0777, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:43:07 Iters: 217700/[05], loss: 11.5688, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:45:10 Iters: 217800/[05], loss: 11.7289, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:47:13 Iters: 217900/[05], loss: 11.0308, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:49:17 Iters: 218000/[05], loss: 10.4288, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:51:20 Iters: 218100/[05], loss: 11.5045, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:53:23 Iters: 218200/[05], loss: 11.3786, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:55:27 Iters: 218300/[05], loss: 12.0132, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-20:57:30 Iters: 218400/[05], loss: 10.6689, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-20:59:33 Iters: 218500/[05], loss: 11.1779, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:01:36 Iters: 218600/[05], loss: 10.7219, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:03:40 Iters: 218700/[05], loss: 11.6872, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:05:43 Iters: 218800/[05], loss: 10.9855, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:07:46 Iters: 218900/[05], loss: 10.8500, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:09:49 Iters: 219000/[05], loss: 11.1684, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:11:53 Iters: 219100/[05], loss: 10.8362, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:13:56 Iters: 219200/[05], loss: 10.7347, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:15:59 Iters: 219300/[05], loss: 11.4426, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:18:03 Iters: 219400/[05], loss: 11.1443, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-21:20:06 Iters: 219500/[05], loss: 10.5520, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:22:10 Iters: 219600/[05], loss: 11.1860, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-21:24:13 Iters: 219700/[05], loss: 11.1044, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:26:17 Iters: 219800/[05], loss: 10.7031, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220703-21:28:20 Iters: 219900/[05], loss: 11.6157, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:30:23 Iters: 220000/[05], loss: 11.5024, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:30:23 Saving checkpoint: 220000 -20220703-21:31:42 LFW Ave Accuracy: 99.0332 -20220703-21:33:00 AgeDB-30 Ave Accuracy: 93.1667 -20220703-21:34:32 CFP-FP Ave Accuracy: 87.4143 -20220703-21:34:32 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220703-21:36:35 Iters: 220100/[05], loss: 11.4734, train_accuracy: 0.0469, time: 3.72 s/iter, learning rate: 0.05 -20220703-21:38:38 Iters: 220200/[05], loss: 10.7484, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:40:41 Iters: 220300/[05], loss: 11.4898, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:42:44 Iters: 220400/[05], loss: 11.2891, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:44:48 Iters: 220500/[05], loss: 12.4688, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-21:46:51 Iters: 220600/[05], loss: 10.4077, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:48:55 Iters: 220700/[05], loss: 11.5153, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220703-21:50:58 Iters: 220800/[05], loss: 10.5868, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:53:01 Iters: 220900/[05], loss: 11.9727, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:55:04 Iters: 221000/[05], loss: 10.6374, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:57:07 Iters: 221100/[05], loss: 10.6097, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-21:59:11 Iters: 221200/[05], loss: 10.8195, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:01:14 Iters: 221300/[05], loss: 11.7153, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:03:17 Iters: 221400/[05], loss: 12.0541, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:05:20 Iters: 221500/[05], loss: 10.6753, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-22:07:24 Iters: 221600/[05], loss: 11.1432, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:09:27 Iters: 221700/[05], loss: 11.1046, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:11:30 Iters: 221800/[05], loss: 12.2037, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:13:34 Iters: 221900/[05], loss: 11.2844, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:15:37 Iters: 222000/[05], loss: 11.1684, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-22:17:40 Iters: 222100/[05], loss: 10.6828, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:19:44 Iters: 222200/[05], loss: 11.5448, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:21:47 Iters: 222300/[05], loss: 10.9092, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:23:50 Iters: 222400/[05], loss: 11.1390, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:25:53 Iters: 222500/[05], loss: 11.6150, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:27:56 Iters: 222600/[05], loss: 11.1095, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:30:00 Iters: 222700/[05], loss: 11.6952, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-22:32:03 Iters: 222800/[05], loss: 11.5579, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:34:06 Iters: 222900/[05], loss: 11.1540, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:36:10 Iters: 223000/[05], loss: 10.3572, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:38:13 Iters: 223100/[05], loss: 12.0069, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:40:16 Iters: 223200/[05], loss: 11.4901, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:42:19 Iters: 223300/[05], loss: 10.8080, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:44:23 Iters: 223400/[05], loss: 11.3405, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:46:26 Iters: 223500/[05], loss: 12.1125, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-22:48:29 Iters: 223600/[05], loss: 11.5957, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:50:32 Iters: 223700/[05], loss: 11.7003, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:52:36 Iters: 223800/[05], loss: 11.6777, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220703-22:54:39 Iters: 223900/[05], loss: 11.2978, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-22:56:43 Iters: 224000/[05], loss: 11.4253, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220703-22:58:46 Iters: 224100/[05], loss: 11.5458, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:00:49 Iters: 224200/[05], loss: 11.2670, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:02:53 Iters: 224300/[05], loss: 10.8691, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:04:56 Iters: 224400/[05], loss: 11.0730, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:06:59 Iters: 224500/[05], loss: 11.6275, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:09:03 Iters: 224600/[05], loss: 10.8543, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:11:06 Iters: 224700/[05], loss: 11.6404, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-23:13:09 Iters: 224800/[05], loss: 11.5752, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:15:13 Iters: 224900/[05], loss: 11.4144, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220703-23:17:16 Iters: 225000/[05], loss: 11.8599, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:19:19 Iters: 225100/[05], loss: 11.2964, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:21:23 Iters: 225200/[05], loss: 11.6483, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:23:26 Iters: 225300/[05], loss: 10.7227, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:25:30 Iters: 225400/[05], loss: 10.7505, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220703-23:27:33 Iters: 225500/[05], loss: 10.8718, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:29:36 Iters: 225600/[05], loss: 11.2048, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-23:31:40 Iters: 225700/[05], loss: 11.9139, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:33:43 Iters: 225800/[05], loss: 11.6165, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:35:46 Iters: 225900/[05], loss: 10.9900, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:37:50 Iters: 226000/[05], loss: 12.2365, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:39:53 Iters: 226100/[05], loss: 10.7996, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:41:56 Iters: 226200/[05], loss: 11.4652, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:44:00 Iters: 226300/[05], loss: 10.9007, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-23:46:03 Iters: 226400/[05], loss: 11.5350, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:48:06 Iters: 226500/[05], loss: 12.9009, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:50:10 Iters: 226600/[05], loss: 11.5807, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220703-23:52:13 Iters: 226700/[05], loss: 10.8063, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:54:16 Iters: 226800/[05], loss: 10.9600, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220703-23:56:20 Iters: 226900/[05], loss: 12.0289, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220703-23:58:23 Iters: 227000/[05], loss: 11.3574, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:00:26 Iters: 227100/[05], loss: 11.5347, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:02:30 Iters: 227200/[05], loss: 10.7665, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:04:33 Iters: 227300/[05], loss: 11.0466, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:06:36 Iters: 227400/[05], loss: 11.3992, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:07:37 Train Epoch: 6/18 ... -20220704-00:08:39 Iters: 227500/[06], loss: 10.6257, train_accuracy: 0.0391, time: 0.62 s/iter, learning rate: 0.05 -20220704-00:10:42 Iters: 227600/[06], loss: 10.0750, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:12:45 Iters: 227700/[06], loss: 11.5047, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:14:49 Iters: 227800/[06], loss: 11.8157, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-00:16:52 Iters: 227900/[06], loss: 11.2325, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:18:55 Iters: 228000/[06], loss: 10.8120, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:20:59 Iters: 228100/[06], loss: 11.8947, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-00:23:02 Iters: 228200/[06], loss: 11.3191, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:25:06 Iters: 228300/[06], loss: 12.2192, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-00:27:09 Iters: 228400/[06], loss: 10.6926, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:29:12 Iters: 228500/[06], loss: 11.2860, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-00:31:16 Iters: 228600/[06], loss: 10.7998, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:33:19 Iters: 228700/[06], loss: 11.8966, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220704-00:35:22 Iters: 228800/[06], loss: 11.1468, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:37:25 Iters: 228900/[06], loss: 10.5659, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:39:29 Iters: 229000/[06], loss: 11.2375, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:41:32 Iters: 229100/[06], loss: 11.3678, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-00:43:35 Iters: 229200/[06], loss: 10.9030, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:45:39 Iters: 229300/[06], loss: 11.5554, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:47:42 Iters: 229400/[06], loss: 11.2805, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:49:45 Iters: 229500/[06], loss: 10.9039, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:51:48 Iters: 229600/[06], loss: 11.2910, train_accuracy: 0.0000, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:53:51 Iters: 229700/[06], loss: 11.1651, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:55:55 Iters: 229800/[06], loss: 11.5384, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-00:57:58 Iters: 229900/[06], loss: 11.4123, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:00:01 Iters: 230000/[06], loss: 11.6060, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:00:01 Saving checkpoint: 230000 -20220704-01:01:18 LFW Ave Accuracy: 98.9499 -20220704-01:02:33 AgeDB-30 Ave Accuracy: 93.5333 -20220704-01:04:00 CFP-FP Ave Accuracy: 87.9143 -20220704-01:04:00 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220704-01:06:02 Iters: 230100/[06], loss: 11.6311, train_accuracy: 0.0625, time: 3.61 s/iter, learning rate: 0.05 -20220704-01:08:06 Iters: 230200/[06], loss: 10.6531, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:10:09 Iters: 230300/[06], loss: 11.7127, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:12:12 Iters: 230400/[06], loss: 11.5849, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-01:14:16 Iters: 230500/[06], loss: 10.7046, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:16:19 Iters: 230600/[06], loss: 10.5418, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:18:22 Iters: 230700/[06], loss: 11.7330, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:20:25 Iters: 230800/[06], loss: 11.4882, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:22:28 Iters: 230900/[06], loss: 11.1363, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:24:32 Iters: 231000/[06], loss: 11.0049, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:26:35 Iters: 231100/[06], loss: 11.4553, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:28:39 Iters: 231200/[06], loss: 11.0169, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-01:30:42 Iters: 231300/[06], loss: 11.3268, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:32:45 Iters: 231400/[06], loss: 11.4976, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:34:49 Iters: 231500/[06], loss: 11.5014, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:36:52 Iters: 231600/[06], loss: 11.6185, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220704-01:38:56 Iters: 231700/[06], loss: 11.7695, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:40:59 Iters: 231800/[06], loss: 10.9989, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:43:02 Iters: 231900/[06], loss: 11.5605, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:45:06 Iters: 232000/[06], loss: 10.4062, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220704-01:47:09 Iters: 232100/[06], loss: 10.9394, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:49:13 Iters: 232200/[06], loss: 11.1894, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-01:51:16 Iters: 232300/[06], loss: 11.4479, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:53:19 Iters: 232400/[06], loss: 10.8519, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:55:22 Iters: 232500/[06], loss: 10.5728, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-01:57:26 Iters: 232600/[06], loss: 11.3176, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220704-01:59:29 Iters: 232700/[06], loss: 11.7472, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:01:33 Iters: 232800/[06], loss: 10.8819, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-02:03:36 Iters: 232900/[06], loss: 11.7307, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:05:39 Iters: 233000/[06], loss: 11.0079, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:07:42 Iters: 233100/[06], loss: 11.2401, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:09:46 Iters: 233200/[06], loss: 10.4513, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:11:49 Iters: 233300/[06], loss: 10.2288, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:13:52 Iters: 233400/[06], loss: 11.2771, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:15:55 Iters: 233500/[06], loss: 11.6963, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:17:58 Iters: 233600/[06], loss: 11.7520, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:20:01 Iters: 233700/[06], loss: 11.9936, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:22:04 Iters: 233800/[06], loss: 11.3456, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:24:08 Iters: 233900/[06], loss: 11.1200, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:26:11 Iters: 234000/[06], loss: 10.9643, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:28:14 Iters: 234100/[06], loss: 11.0091, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:30:17 Iters: 234200/[06], loss: 11.1726, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:32:20 Iters: 234300/[06], loss: 12.5291, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:34:23 Iters: 234400/[06], loss: 11.2582, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:36:26 Iters: 234500/[06], loss: 10.9678, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:38:29 Iters: 234600/[06], loss: 11.3184, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:40:33 Iters: 234700/[06], loss: 10.8490, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:42:36 Iters: 234800/[06], loss: 11.0133, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:44:40 Iters: 234900/[06], loss: 10.4819, train_accuracy: 0.0547, time: 1.24 s/iter, learning rate: 0.05 -20220704-02:46:43 Iters: 235000/[06], loss: 11.0599, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:48:46 Iters: 235100/[06], loss: 10.7205, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:50:50 Iters: 235200/[06], loss: 10.7447, train_accuracy: 0.0625, time: 1.24 s/iter, learning rate: 0.05 -20220704-02:52:53 Iters: 235300/[06], loss: 10.6799, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:54:56 Iters: 235400/[06], loss: 11.7260, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:56:59 Iters: 235500/[06], loss: 11.0937, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-02:59:02 Iters: 235600/[06], loss: 11.3487, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:01:06 Iters: 235700/[06], loss: 12.0011, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:03:09 Iters: 235800/[06], loss: 11.6306, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:05:12 Iters: 235900/[06], loss: 12.6693, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:07:15 Iters: 236000/[06], loss: 11.0246, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:09:19 Iters: 236100/[06], loss: 11.1743, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:11:22 Iters: 236200/[06], loss: 11.0256, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:13:26 Iters: 236300/[06], loss: 11.3858, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:15:29 Iters: 236400/[06], loss: 10.6500, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:17:33 Iters: 236500/[06], loss: 11.3414, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:19:36 Iters: 236600/[06], loss: 11.5769, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:21:39 Iters: 236700/[06], loss: 10.6966, train_accuracy: 0.0703, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:23:43 Iters: 236800/[06], loss: 10.7748, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:25:46 Iters: 236900/[06], loss: 11.4759, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:27:49 Iters: 237000/[06], loss: 11.8187, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:29:53 Iters: 237100/[06], loss: 11.1876, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:31:56 Iters: 237200/[06], loss: 12.6234, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:34:00 Iters: 237300/[06], loss: 10.0832, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:36:03 Iters: 237400/[06], loss: 11.3935, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:38:06 Iters: 237500/[06], loss: 11.8831, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:40:09 Iters: 237600/[06], loss: 11.5456, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:42:13 Iters: 237700/[06], loss: 10.9257, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:44:16 Iters: 237800/[06], loss: 11.5126, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:46:20 Iters: 237900/[06], loss: 11.2600, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:48:23 Iters: 238000/[06], loss: 11.6070, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:50:26 Iters: 238100/[06], loss: 11.1159, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:52:30 Iters: 238200/[06], loss: 9.9475, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:54:33 Iters: 238300/[06], loss: 12.3845, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-03:56:36 Iters: 238400/[06], loss: 12.1861, train_accuracy: 0.0000, time: 1.24 s/iter, learning rate: 0.05 -20220704-03:58:40 Iters: 238500/[06], loss: 11.4128, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:00:43 Iters: 238600/[06], loss: 11.0617, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:02:46 Iters: 238700/[06], loss: 11.2862, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:04:49 Iters: 238800/[06], loss: 11.5913, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:06:53 Iters: 238900/[06], loss: 10.7311, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:08:56 Iters: 239000/[06], loss: 10.3746, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:10:59 Iters: 239100/[06], loss: 12.2681, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:13:03 Iters: 239200/[06], loss: 11.0141, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:15:06 Iters: 239300/[06], loss: 11.2937, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220704-04:17:09 Iters: 239400/[06], loss: 11.5493, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:19:13 Iters: 239500/[06], loss: 10.9925, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:21:16 Iters: 239600/[06], loss: 11.2964, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-04:23:19 Iters: 239700/[06], loss: 11.3940, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:25:23 Iters: 239800/[06], loss: 11.6451, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:27:26 Iters: 239900/[06], loss: 12.1016, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:29:29 Iters: 240000/[06], loss: 10.9809, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:29:29 Saving checkpoint: 240000 -20220704-04:30:46 LFW Ave Accuracy: 99.0998 -20220704-04:32:01 AgeDB-30 Ave Accuracy: 93.3833 -20220704-04:33:27 CFP-FP Ave Accuracy: 87.1571 -20220704-04:33:27 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.3000 in iters: 120000 -20220704-04:35:30 Iters: 240100/[06], loss: 11.8553, train_accuracy: 0.0234, time: 3.61 s/iter, learning rate: 0.05 -20220704-04:37:34 Iters: 240200/[06], loss: 12.4322, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:39:37 Iters: 240300/[06], loss: 12.5225, train_accuracy: 0.0234, time: 1.24 s/iter, learning rate: 0.05 -20220704-04:41:40 Iters: 240400/[06], loss: 11.2777, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:43:44 Iters: 240500/[06], loss: 11.4245, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:45:47 Iters: 240600/[06], loss: 11.1405, train_accuracy: 0.0156, time: 1.24 s/iter, learning rate: 0.05 -20220704-04:47:50 Iters: 240700/[06], loss: 10.7161, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:49:53 Iters: 240800/[06], loss: 10.9458, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:51:57 Iters: 240900/[06], loss: 10.9750, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220704-04:54:00 Iters: 241000/[06], loss: 11.9087, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:56:03 Iters: 241100/[06], loss: 10.8882, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-04:58:07 Iters: 241200/[06], loss: 10.2983, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:00:10 Iters: 241300/[06], loss: 11.4021, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:02:14 Iters: 241400/[06], loss: 12.0877, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-05:04:17 Iters: 241500/[06], loss: 11.0337, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:06:20 Iters: 241600/[06], loss: 11.1252, train_accuracy: 0.0078, time: 1.24 s/iter, learning rate: 0.05 -20220704-05:08:23 Iters: 241700/[06], loss: 11.8072, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:10:26 Iters: 241800/[06], loss: 11.1406, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:12:30 Iters: 241900/[06], loss: 11.2321, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:14:33 Iters: 242000/[06], loss: 11.4025, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:16:36 Iters: 242100/[06], loss: 10.7240, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:18:39 Iters: 242200/[06], loss: 11.7133, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:20:42 Iters: 242300/[06], loss: 11.6550, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:22:45 Iters: 242400/[06], loss: 11.2729, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:24:49 Iters: 242500/[06], loss: 10.6847, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:26:52 Iters: 242600/[06], loss: 11.1944, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:28:55 Iters: 242700/[06], loss: 11.5539, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:30:58 Iters: 242800/[06], loss: 10.5228, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:33:02 Iters: 242900/[06], loss: 11.1785, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:35:05 Iters: 243000/[06], loss: 11.0622, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:37:08 Iters: 243100/[06], loss: 11.0752, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:39:11 Iters: 243200/[06], loss: 11.1202, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:41:15 Iters: 243300/[06], loss: 11.8669, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:43:18 Iters: 243400/[06], loss: 11.5312, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:45:21 Iters: 243500/[06], loss: 12.0242, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:47:24 Iters: 243600/[06], loss: 11.4022, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:49:27 Iters: 243700/[06], loss: 10.9716, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:51:31 Iters: 243800/[06], loss: 12.1771, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:53:34 Iters: 243900/[06], loss: 10.9938, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:55:37 Iters: 244000/[06], loss: 11.6640, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:57:40 Iters: 244100/[06], loss: 10.2742, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-05:59:43 Iters: 244200/[06], loss: 10.6527, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:01:47 Iters: 244300/[06], loss: 10.9342, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:03:50 Iters: 244400/[06], loss: 10.8720, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:05:53 Iters: 244500/[06], loss: 11.1906, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:07:56 Iters: 244600/[06], loss: 10.9124, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:10:00 Iters: 244700/[06], loss: 11.4884, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:12:03 Iters: 244800/[06], loss: 10.7333, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:14:06 Iters: 244900/[06], loss: 10.7912, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:16:09 Iters: 245000/[06], loss: 11.2835, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:18:12 Iters: 245100/[06], loss: 10.5105, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:20:16 Iters: 245200/[06], loss: 10.6988, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:22:19 Iters: 245300/[06], loss: 11.7308, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:24:22 Iters: 245400/[06], loss: 11.4900, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:26:25 Iters: 245500/[06], loss: 11.9426, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:28:28 Iters: 245600/[06], loss: 11.2861, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:30:32 Iters: 245700/[06], loss: 11.3018, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:32:35 Iters: 245800/[06], loss: 11.5874, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:34:38 Iters: 245900/[06], loss: 10.9984, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:36:41 Iters: 246000/[06], loss: 11.5283, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:38:44 Iters: 246100/[06], loss: 10.3913, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:40:48 Iters: 246200/[06], loss: 11.1277, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:42:51 Iters: 246300/[06], loss: 11.3241, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:44:54 Iters: 246400/[06], loss: 11.1294, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:46:57 Iters: 246500/[06], loss: 11.3981, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:49:00 Iters: 246600/[06], loss: 10.7724, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:51:04 Iters: 246700/[06], loss: 11.5749, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:53:07 Iters: 246800/[06], loss: 11.4225, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:55:10 Iters: 246900/[06], loss: 11.1744, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:57:13 Iters: 247000/[06], loss: 10.8365, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-06:59:17 Iters: 247100/[06], loss: 11.0258, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:01:20 Iters: 247200/[06], loss: 11.2315, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:03:23 Iters: 247300/[06], loss: 11.7611, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:05:26 Iters: 247400/[06], loss: 11.2772, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:07:30 Iters: 247500/[06], loss: 11.2943, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:09:33 Iters: 247600/[06], loss: 11.1212, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:11:36 Iters: 247700/[06], loss: 11.0259, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:13:39 Iters: 247800/[06], loss: 11.0712, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:15:43 Iters: 247900/[06], loss: 11.9985, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:17:46 Iters: 248000/[06], loss: 10.9488, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:19:49 Iters: 248100/[06], loss: 11.3473, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:21:52 Iters: 248200/[06], loss: 11.1243, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:23:55 Iters: 248300/[06], loss: 11.3197, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:25:59 Iters: 248400/[06], loss: 11.7095, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:28:02 Iters: 248500/[06], loss: 11.1746, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:30:05 Iters: 248600/[06], loss: 11.1667, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:32:08 Iters: 248700/[06], loss: 11.2125, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:34:12 Iters: 248800/[06], loss: 11.5322, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:36:15 Iters: 248900/[06], loss: 10.9965, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:38:18 Iters: 249000/[06], loss: 11.3152, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:40:21 Iters: 249100/[06], loss: 10.9324, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:42:25 Iters: 249200/[06], loss: 10.6801, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:44:28 Iters: 249300/[06], loss: 10.7475, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:46:31 Iters: 249400/[06], loss: 9.9853, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:48:34 Iters: 249500/[06], loss: 11.5325, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:50:38 Iters: 249600/[06], loss: 11.7364, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:52:41 Iters: 249700/[06], loss: 11.0821, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:54:44 Iters: 249800/[06], loss: 11.4169, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:56:47 Iters: 249900/[06], loss: 10.7325, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:58:51 Iters: 250000/[06], loss: 11.3018, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-07:58:51 Saving checkpoint: 250000 -20220704-08:00:08 LFW Ave Accuracy: 99.1332 -20220704-08:01:24 AgeDB-30 Ave Accuracy: 93.7500 -20220704-08:02:52 CFP-FP Ave Accuracy: 88.5143 -20220704-08:02:52 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.5143 in iters: 250000 -20220704-08:04:55 Iters: 250100/[06], loss: 11.3329, train_accuracy: 0.0078, time: 3.64 s/iter, learning rate: 0.05 -20220704-08:06:58 Iters: 250200/[06], loss: 10.8384, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:09:01 Iters: 250300/[06], loss: 11.0649, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:11:04 Iters: 250400/[06], loss: 11.7687, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:13:08 Iters: 250500/[06], loss: 10.8111, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:15:11 Iters: 250600/[06], loss: 10.6718, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:17:14 Iters: 250700/[06], loss: 10.8576, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:19:17 Iters: 250800/[06], loss: 11.4171, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:21:20 Iters: 250900/[06], loss: 10.4826, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:23:24 Iters: 251000/[06], loss: 12.1597, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:25:27 Iters: 251100/[06], loss: 10.9017, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:27:30 Iters: 251200/[06], loss: 11.3048, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:29:33 Iters: 251300/[06], loss: 12.1271, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:31:36 Iters: 251400/[06], loss: 12.0292, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:33:39 Iters: 251500/[06], loss: 10.8617, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:35:43 Iters: 251600/[06], loss: 11.3063, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:37:46 Iters: 251700/[06], loss: 10.3418, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:39:49 Iters: 251800/[06], loss: 11.5613, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:41:52 Iters: 251900/[06], loss: 11.0885, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:43:55 Iters: 252000/[06], loss: 10.7136, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:45:59 Iters: 252100/[06], loss: 10.4458, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:48:02 Iters: 252200/[06], loss: 11.8750, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:50:05 Iters: 252300/[06], loss: 11.2692, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:52:08 Iters: 252400/[06], loss: 11.1923, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:54:11 Iters: 252500/[06], loss: 12.2843, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:56:14 Iters: 252600/[06], loss: 11.0469, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-08:58:18 Iters: 252700/[06], loss: 11.5835, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:00:21 Iters: 252800/[06], loss: 10.1471, train_accuracy: 0.1016, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:02:24 Iters: 252900/[06], loss: 10.6568, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:04:27 Iters: 253000/[06], loss: 11.3428, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:06:31 Iters: 253100/[06], loss: 10.2687, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:08:34 Iters: 253200/[06], loss: 11.8712, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:10:37 Iters: 253300/[06], loss: 11.1890, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:12:40 Iters: 253400/[06], loss: 10.7062, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:14:44 Iters: 253500/[06], loss: 10.7761, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:16:47 Iters: 253600/[06], loss: 10.7524, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:18:50 Iters: 253700/[06], loss: 11.5351, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:20:53 Iters: 253800/[06], loss: 10.7987, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:22:57 Iters: 253900/[06], loss: 11.5344, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:25:00 Iters: 254000/[06], loss: 11.0199, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:27:03 Iters: 254100/[06], loss: 12.3401, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:29:06 Iters: 254200/[06], loss: 11.0951, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:31:10 Iters: 254300/[06], loss: 11.1516, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:33:13 Iters: 254400/[06], loss: 10.6650, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:35:16 Iters: 254500/[06], loss: 10.2173, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:37:19 Iters: 254600/[06], loss: 11.7329, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:39:22 Iters: 254700/[06], loss: 11.4493, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:41:26 Iters: 254800/[06], loss: 11.4924, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:43:29 Iters: 254900/[06], loss: 10.8373, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:45:32 Iters: 255000/[06], loss: 11.2099, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:47:35 Iters: 255100/[06], loss: 12.0800, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:49:38 Iters: 255200/[06], loss: 10.5339, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:51:41 Iters: 255300/[06], loss: 11.5815, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:53:45 Iters: 255400/[06], loss: 10.5906, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:55:48 Iters: 255500/[06], loss: 11.1893, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:57:51 Iters: 255600/[06], loss: 11.0726, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-09:59:54 Iters: 255700/[06], loss: 10.8622, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:01:58 Iters: 255800/[06], loss: 11.3154, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:04:01 Iters: 255900/[06], loss: 11.0156, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:06:04 Iters: 256000/[06], loss: 11.4775, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:08:07 Iters: 256100/[06], loss: 11.4436, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:10:10 Iters: 256200/[06], loss: 10.9711, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:12:14 Iters: 256300/[06], loss: 11.7816, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:14:17 Iters: 256400/[06], loss: 10.8802, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:16:20 Iters: 256500/[06], loss: 10.9457, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:18:23 Iters: 256600/[06], loss: 11.2132, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:20:26 Iters: 256700/[06], loss: 11.4232, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:22:30 Iters: 256800/[06], loss: 11.5609, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:24:33 Iters: 256900/[06], loss: 11.3210, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:26:36 Iters: 257000/[06], loss: 10.0294, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:28:39 Iters: 257100/[06], loss: 11.4733, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:30:42 Iters: 257200/[06], loss: 11.3332, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:32:46 Iters: 257300/[06], loss: 11.7534, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:34:49 Iters: 257400/[06], loss: 11.3240, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:36:52 Iters: 257500/[06], loss: 11.4507, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:38:55 Iters: 257600/[06], loss: 10.7050, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:40:59 Iters: 257700/[06], loss: 10.9997, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:43:02 Iters: 257800/[06], loss: 11.9377, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:45:05 Iters: 257900/[06], loss: 12.0387, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:47:08 Iters: 258000/[06], loss: 10.9002, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:49:12 Iters: 258100/[06], loss: 12.0330, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:51:15 Iters: 258200/[06], loss: 10.5710, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:53:18 Iters: 258300/[06], loss: 10.7763, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:55:21 Iters: 258400/[06], loss: 11.3815, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:57:25 Iters: 258500/[06], loss: 10.7935, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.05 -20220704-10:59:28 Iters: 258600/[06], loss: 11.6685, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:01:31 Iters: 258700/[06], loss: 11.4018, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:03:34 Iters: 258800/[06], loss: 11.2774, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:05:38 Iters: 258900/[06], loss: 11.7579, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:07:41 Iters: 259000/[06], loss: 11.1640, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:09:44 Iters: 259100/[06], loss: 11.4661, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:11:47 Iters: 259200/[06], loss: 10.9543, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:13:50 Iters: 259300/[06], loss: 11.5567, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:15:54 Iters: 259400/[06], loss: 11.7702, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:17:57 Iters: 259500/[06], loss: 12.0188, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:20:00 Iters: 259600/[06], loss: 11.5457, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:22:03 Iters: 259700/[06], loss: 11.3029, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:24:06 Iters: 259800/[06], loss: 11.0827, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:26:10 Iters: 259900/[06], loss: 11.3697, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:28:13 Iters: 260000/[06], loss: 11.1637, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:28:13 Saving checkpoint: 260000 -20220704-11:29:30 LFW Ave Accuracy: 98.9332 -20220704-11:30:47 AgeDB-30 Ave Accuracy: 93.3167 -20220704-11:32:16 CFP-FP Ave Accuracy: 87.9429 -20220704-11:32:16 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9500 in iters: 140000 and CFP-FP: 88.5143 in iters: 250000 -20220704-11:34:18 Iters: 260100/[06], loss: 11.2327, train_accuracy: 0.0391, time: 3.66 s/iter, learning rate: 0.05 -20220704-11:36:21 Iters: 260200/[06], loss: 11.3054, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:38:25 Iters: 260300/[06], loss: 11.0411, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:40:28 Iters: 260400/[06], loss: 11.1530, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:42:31 Iters: 260500/[06], loss: 10.5581, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:44:34 Iters: 260600/[06], loss: 11.1173, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:46:37 Iters: 260700/[06], loss: 11.1801, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:48:40 Iters: 260800/[06], loss: 11.0849, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:50:43 Iters: 260900/[06], loss: 11.0174, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:52:47 Iters: 261000/[06], loss: 11.1730, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:54:50 Iters: 261100/[06], loss: 10.5402, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:56:53 Iters: 261200/[06], loss: 11.3199, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-11:58:56 Iters: 261300/[06], loss: 10.6253, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:00:59 Iters: 261400/[06], loss: 11.6791, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:03:02 Iters: 261500/[06], loss: 11.6463, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:05:05 Iters: 261600/[06], loss: 11.0744, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:07:09 Iters: 261700/[06], loss: 10.9375, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:09:12 Iters: 261800/[06], loss: 11.3906, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:11:15 Iters: 261900/[06], loss: 10.6579, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:13:18 Iters: 262000/[06], loss: 10.4116, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:15:21 Iters: 262100/[06], loss: 10.4419, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:17:24 Iters: 262200/[06], loss: 11.3779, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:19:27 Iters: 262300/[06], loss: 10.7435, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:21:31 Iters: 262400/[06], loss: 11.2717, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:23:34 Iters: 262500/[06], loss: 11.7645, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:25:37 Iters: 262600/[06], loss: 11.1282, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:27:40 Iters: 262700/[06], loss: 10.3897, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:29:43 Iters: 262800/[06], loss: 11.2132, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:31:46 Iters: 262900/[06], loss: 11.2786, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:33:49 Iters: 263000/[06], loss: 11.0751, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:35:53 Iters: 263100/[06], loss: 11.4222, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:37:56 Iters: 263200/[06], loss: 10.9203, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:39:59 Iters: 263300/[06], loss: 11.0148, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:42:02 Iters: 263400/[06], loss: 11.6050, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:44:05 Iters: 263500/[06], loss: 10.4799, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:46:09 Iters: 263600/[06], loss: 10.6094, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:48:12 Iters: 263700/[06], loss: 12.4073, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:50:15 Iters: 263800/[06], loss: 11.5543, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:52:18 Iters: 263900/[06], loss: 12.0351, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:54:22 Iters: 264000/[06], loss: 11.6765, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:56:25 Iters: 264100/[06], loss: 11.0170, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-12:58:28 Iters: 264200/[06], loss: 11.6523, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:00:31 Iters: 264300/[06], loss: 11.2294, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:02:34 Iters: 264400/[06], loss: 11.6202, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:04:38 Iters: 264500/[06], loss: 11.0255, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:06:41 Iters: 264600/[06], loss: 11.1744, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:08:44 Iters: 264700/[06], loss: 11.4946, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:10:47 Iters: 264800/[06], loss: 11.5614, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:12:50 Iters: 264900/[06], loss: 10.8359, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:14:53 Iters: 265000/[06], loss: 12.1427, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:16:57 Iters: 265100/[06], loss: 11.0723, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:19:00 Iters: 265200/[06], loss: 11.5880, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:21:03 Iters: 265300/[06], loss: 10.5142, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:23:06 Iters: 265400/[06], loss: 11.6904, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:25:09 Iters: 265500/[06], loss: 10.8258, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:27:13 Iters: 265600/[06], loss: 11.5935, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:29:16 Iters: 265700/[06], loss: 11.1110, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:31:19 Iters: 265800/[06], loss: 10.2230, train_accuracy: 0.0938, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:33:23 Iters: 265900/[06], loss: 11.3026, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:35:26 Iters: 266000/[06], loss: 12.2552, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:37:29 Iters: 266100/[06], loss: 11.5776, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:39:32 Iters: 266200/[06], loss: 10.7070, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:41:36 Iters: 266300/[06], loss: 10.8086, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:43:39 Iters: 266400/[06], loss: 10.5582, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:45:42 Iters: 266500/[06], loss: 11.4324, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:47:45 Iters: 266600/[06], loss: 10.5606, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:49:48 Iters: 266700/[06], loss: 12.1010, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:51:51 Iters: 266800/[06], loss: 10.8777, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:53:54 Iters: 266900/[06], loss: 11.2954, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:55:58 Iters: 267000/[06], loss: 11.1313, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-13:58:01 Iters: 267100/[06], loss: 10.8937, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:00:04 Iters: 267200/[06], loss: 11.2246, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:02:07 Iters: 267300/[06], loss: 11.6090, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:04:10 Iters: 267400/[06], loss: 11.8248, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:06:13 Iters: 267500/[06], loss: 11.3916, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:08:17 Iters: 267600/[06], loss: 10.7955, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:10:20 Iters: 267700/[06], loss: 11.0611, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:12:23 Iters: 267800/[06], loss: 11.2001, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:14:26 Iters: 267900/[06], loss: 11.4045, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:16:29 Iters: 268000/[06], loss: 11.2396, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:18:33 Iters: 268100/[06], loss: 10.4137, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:20:36 Iters: 268200/[06], loss: 12.4990, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:22:39 Iters: 268300/[06], loss: 11.1172, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:24:42 Iters: 268400/[06], loss: 12.1040, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:26:45 Iters: 268500/[06], loss: 10.9533, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:28:48 Iters: 268600/[06], loss: 11.4982, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:30:52 Iters: 268700/[06], loss: 11.5152, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:32:55 Iters: 268800/[06], loss: 12.1791, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:34:58 Iters: 268900/[06], loss: 11.6722, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:37:01 Iters: 269000/[06], loss: 10.7897, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:39:04 Iters: 269100/[06], loss: 11.1406, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:41:07 Iters: 269200/[06], loss: 11.6408, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:43:10 Iters: 269300/[06], loss: 10.3658, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:45:14 Iters: 269400/[06], loss: 11.8596, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:47:17 Iters: 269500/[06], loss: 11.4754, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:49:20 Iters: 269600/[06], loss: 10.8224, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:51:23 Iters: 269700/[06], loss: 11.0545, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:53:26 Iters: 269800/[06], loss: 11.0707, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:55:29 Iters: 269900/[06], loss: 10.3405, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:57:33 Iters: 270000/[06], loss: 11.4128, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-14:57:33 Saving checkpoint: 270000 -20220704-14:58:51 LFW Ave Accuracy: 99.1666 -20220704-15:00:08 AgeDB-30 Ave Accuracy: 93.9833 -20220704-15:01:37 CFP-FP Ave Accuracy: 88.4000 -20220704-15:01:37 Current Best Accuracy: LFW: 99.2333 in iters: 120000, AgeDB-30: 93.9833 in iters: 270000 and CFP-FP: 88.5143 in iters: 250000 -20220704-15:03:40 Iters: 270100/[06], loss: 11.2566, train_accuracy: 0.0078, time: 3.67 s/iter, learning rate: 0.05 -20220704-15:05:43 Iters: 270200/[06], loss: 11.2872, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:07:46 Iters: 270300/[06], loss: 12.1902, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:09:49 Iters: 270400/[06], loss: 11.4535, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:11:52 Iters: 270500/[06], loss: 11.8669, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:13:55 Iters: 270600/[06], loss: 11.4371, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:15:58 Iters: 270700/[06], loss: 12.1720, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:18:01 Iters: 270800/[06], loss: 11.4423, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:20:04 Iters: 270900/[06], loss: 11.2150, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:22:08 Iters: 271000/[06], loss: 10.8281, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:24:11 Iters: 271100/[06], loss: 11.8874, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:26:14 Iters: 271200/[06], loss: 11.2062, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-15:28:17 Iters: 271300/[06], loss: 10.5458, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:30:21 Iters: 271400/[06], loss: 11.2805, train_accuracy: 0.0391, time: 1.24 s/iter, learning rate: 0.05 -20220704-15:32:24 Iters: 271500/[06], loss: 10.6452, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:34:28 Iters: 271600/[06], loss: 12.5898, train_accuracy: 0.0078, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:36:31 Iters: 271700/[06], loss: 11.0624, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:38:34 Iters: 271800/[06], loss: 11.6222, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:40:38 Iters: 271900/[06], loss: 10.1836, train_accuracy: 0.0781, time: 1.24 s/iter, learning rate: 0.05 -20220704-15:42:41 Iters: 272000/[06], loss: 10.7883, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:44:44 Iters: 272100/[06], loss: 11.5306, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:46:47 Iters: 272200/[06], loss: 11.0936, train_accuracy: 0.0312, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:48:51 Iters: 272300/[06], loss: 11.4153, train_accuracy: 0.0469, time: 1.24 s/iter, learning rate: 0.05 -20220704-15:50:54 Iters: 272400/[06], loss: 10.8651, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:52:57 Iters: 272500/[06], loss: 11.4286, train_accuracy: 0.0156, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:55:01 Iters: 272600/[06], loss: 11.2984, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.05 -20220704-15:57:04 Iters: 272700/[06], loss: 11.5191, train_accuracy: 0.0312, time: 1.24 s/iter, learning rate: 0.05 -20220704-15:59:07 Iters: 272800/[06], loss: 12.0750, train_accuracy: 0.0234, time: 1.23 s/iter, learning rate: 0.05 -20220704-16:01:10 Iters: 272900/[06], loss: 11.3029, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.05 -20220704-16:01:59 Train Epoch: 7/18 ... -20220704-16:03:13 Iters: 273000/[07], loss: 10.5349, train_accuracy: 0.0938, time: 0.74 s/iter, learning rate: 0.0005000000000000001 -20220704-16:05:16 Iters: 273100/[07], loss: 10.1506, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:07:19 Iters: 273200/[07], loss: 9.7129, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:09:22 Iters: 273300/[07], loss: 9.7664, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:11:25 Iters: 273400/[07], loss: 9.7451, train_accuracy: 0.0781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:13:28 Iters: 273500/[07], loss: 9.4928, train_accuracy: 0.0391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:15:31 Iters: 273600/[07], loss: 9.4053, train_accuracy: 0.0547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:17:34 Iters: 273700/[07], loss: 10.0651, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:19:37 Iters: 273800/[07], loss: 9.4433, train_accuracy: 0.1406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:21:40 Iters: 273900/[07], loss: 9.9437, train_accuracy: 0.0703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:23:43 Iters: 274000/[07], loss: 9.1913, train_accuracy: 0.0625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:25:46 Iters: 274100/[07], loss: 8.9044, train_accuracy: 0.0859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:27:49 Iters: 274200/[07], loss: 8.5510, train_accuracy: 0.1406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:29:52 Iters: 274300/[07], loss: 9.9815, train_accuracy: 0.0469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:31:55 Iters: 274400/[07], loss: 8.0824, train_accuracy: 0.1328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:33:59 Iters: 274500/[07], loss: 8.4158, train_accuracy: 0.1094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:36:02 Iters: 274600/[07], loss: 8.3947, train_accuracy: 0.1328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:38:05 Iters: 274700/[07], loss: 8.2179, train_accuracy: 0.1328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:40:09 Iters: 274800/[07], loss: 8.6474, train_accuracy: 0.1172, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-16:42:12 Iters: 274900/[07], loss: 8.1019, train_accuracy: 0.1719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:44:15 Iters: 275000/[07], loss: 8.7569, train_accuracy: 0.1016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:46:18 Iters: 275100/[07], loss: 9.0291, train_accuracy: 0.1484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:48:22 Iters: 275200/[07], loss: 7.9973, train_accuracy: 0.1172, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-16:50:25 Iters: 275300/[07], loss: 7.5834, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:52:28 Iters: 275400/[07], loss: 8.0861, train_accuracy: 0.1094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:54:31 Iters: 275500/[07], loss: 7.9512, train_accuracy: 0.1406, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-16:56:34 Iters: 275600/[07], loss: 7.4492, train_accuracy: 0.1484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-16:58:38 Iters: 275700/[07], loss: 7.7917, train_accuracy: 0.1250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:00:41 Iters: 275800/[07], loss: 8.2875, train_accuracy: 0.1641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:02:44 Iters: 275900/[07], loss: 7.8552, train_accuracy: 0.1406, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-17:04:47 Iters: 276000/[07], loss: 7.6145, train_accuracy: 0.1328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:06:51 Iters: 276100/[07], loss: 7.3199, train_accuracy: 0.1719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:08:54 Iters: 276200/[07], loss: 8.6045, train_accuracy: 0.1250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:10:57 Iters: 276300/[07], loss: 7.9690, train_accuracy: 0.1406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:13:00 Iters: 276400/[07], loss: 7.1673, train_accuracy: 0.1719, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-17:15:04 Iters: 276500/[07], loss: 7.1216, train_accuracy: 0.1406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:17:07 Iters: 276600/[07], loss: 7.2737, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:19:10 Iters: 276700/[07], loss: 7.6641, train_accuracy: 0.1250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:21:13 Iters: 276800/[07], loss: 6.8040, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:23:17 Iters: 276900/[07], loss: 7.3441, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:25:20 Iters: 277000/[07], loss: 8.2234, train_accuracy: 0.1406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:27:23 Iters: 277100/[07], loss: 7.8567, train_accuracy: 0.1719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:29:27 Iters: 277200/[07], loss: 6.8518, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:31:30 Iters: 277300/[07], loss: 6.5514, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:33:33 Iters: 277400/[07], loss: 6.8700, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:35:37 Iters: 277500/[07], loss: 7.5759, train_accuracy: 0.1484, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-17:37:40 Iters: 277600/[07], loss: 7.0618, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:39:43 Iters: 277700/[07], loss: 6.8067, train_accuracy: 0.1562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:41:46 Iters: 277800/[07], loss: 7.0910, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:43:49 Iters: 277900/[07], loss: 7.2597, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:45:52 Iters: 278000/[07], loss: 6.6515, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:47:56 Iters: 278100/[07], loss: 6.7371, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:49:59 Iters: 278200/[07], loss: 7.0919, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:52:02 Iters: 278300/[07], loss: 7.2923, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:54:06 Iters: 278400/[07], loss: 6.7394, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-17:56:09 Iters: 278500/[07], loss: 7.0743, train_accuracy: 0.1953, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-17:58:12 Iters: 278600/[07], loss: 6.5242, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:00:16 Iters: 278700/[07], loss: 8.0552, train_accuracy: 0.1641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:02:19 Iters: 278800/[07], loss: 6.5873, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:04:22 Iters: 278900/[07], loss: 6.7548, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:06:26 Iters: 279000/[07], loss: 6.5080, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:08:29 Iters: 279100/[07], loss: 7.3077, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:10:32 Iters: 279200/[07], loss: 6.8304, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:12:36 Iters: 279300/[07], loss: 7.5819, train_accuracy: 0.1797, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-18:14:39 Iters: 279400/[07], loss: 6.7911, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:16:43 Iters: 279500/[07], loss: 6.3844, train_accuracy: 0.2500, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-18:18:46 Iters: 279600/[07], loss: 6.3367, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:20:49 Iters: 279700/[07], loss: 7.7020, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:22:52 Iters: 279800/[07], loss: 7.1129, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:24:55 Iters: 279900/[07], loss: 7.2561, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:26:59 Iters: 280000/[07], loss: 6.9344, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:26:59 Saving checkpoint: 280000 -20220704-18:28:15 LFW Ave Accuracy: 99.4165 -20220704-18:29:30 AgeDB-30 Ave Accuracy: 95.6333 -20220704-18:30:58 CFP-FP Ave Accuracy: 91.9429 -20220704-18:30:58 Current Best Accuracy: LFW: 99.4165 in iters: 280000, AgeDB-30: 95.6333 in iters: 280000 and CFP-FP: 91.9429 in iters: 280000 -20220704-18:33:01 Iters: 280100/[07], loss: 6.7312, train_accuracy: 0.1875, time: 3.62 s/iter, learning rate: 0.0005000000000000001 -20220704-18:35:04 Iters: 280200/[07], loss: 6.9800, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:37:08 Iters: 280300/[07], loss: 6.2361, train_accuracy: 0.2109, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-18:39:11 Iters: 280400/[07], loss: 6.1980, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:41:14 Iters: 280500/[07], loss: 6.1669, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:43:18 Iters: 280600/[07], loss: 7.2176, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:45:21 Iters: 280700/[07], loss: 6.3589, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:47:24 Iters: 280800/[07], loss: 7.2025, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:49:27 Iters: 280900/[07], loss: 5.6165, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:51:31 Iters: 281000/[07], loss: 6.2845, train_accuracy: 0.2656, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220704-18:53:34 Iters: 281100/[07], loss: 5.4726, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:55:38 Iters: 281200/[07], loss: 5.7618, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:57:41 Iters: 281300/[07], loss: 5.5972, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-18:59:44 Iters: 281400/[07], loss: 6.4657, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:01:47 Iters: 281500/[07], loss: 6.1775, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:03:50 Iters: 281600/[07], loss: 6.3707, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:05:53 Iters: 281700/[07], loss: 6.4730, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:07:57 Iters: 281800/[07], loss: 6.4723, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:10:00 Iters: 281900/[07], loss: 6.2239, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:12:03 Iters: 282000/[07], loss: 6.6604, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:14:06 Iters: 282100/[07], loss: 6.0415, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:16:10 Iters: 282200/[07], loss: 6.7041, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:18:13 Iters: 282300/[07], loss: 5.9640, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:20:16 Iters: 282400/[07], loss: 6.4076, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:22:19 Iters: 282500/[07], loss: 6.8115, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:24:23 Iters: 282600/[07], loss: 5.0755, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:26:26 Iters: 282700/[07], loss: 5.6420, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:28:29 Iters: 282800/[07], loss: 5.6986, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:30:32 Iters: 282900/[07], loss: 6.0465, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:32:36 Iters: 283000/[07], loss: 5.6722, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:34:39 Iters: 283100/[07], loss: 5.9159, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:36:42 Iters: 283200/[07], loss: 5.8805, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:38:46 Iters: 283300/[07], loss: 5.3827, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:40:49 Iters: 283400/[07], loss: 6.9292, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:42:52 Iters: 283500/[07], loss: 5.9433, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:44:55 Iters: 283600/[07], loss: 6.4808, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:46:59 Iters: 283700/[07], loss: 5.6572, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:49:02 Iters: 283800/[07], loss: 5.6217, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:51:05 Iters: 283900/[07], loss: 6.2648, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:53:08 Iters: 284000/[07], loss: 5.5069, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:55:12 Iters: 284100/[07], loss: 5.5006, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:57:15 Iters: 284200/[07], loss: 5.6336, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-19:59:18 Iters: 284300/[07], loss: 5.4188, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:01:21 Iters: 284400/[07], loss: 6.2855, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:03:24 Iters: 284500/[07], loss: 5.6107, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:05:28 Iters: 284600/[07], loss: 5.9283, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:07:31 Iters: 284700/[07], loss: 6.1865, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:09:34 Iters: 284800/[07], loss: 6.0519, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:11:37 Iters: 284900/[07], loss: 5.8359, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:13:40 Iters: 285000/[07], loss: 5.5213, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:15:44 Iters: 285100/[07], loss: 6.0864, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:17:47 Iters: 285200/[07], loss: 5.4329, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:19:50 Iters: 285300/[07], loss: 6.3232, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:21:53 Iters: 285400/[07], loss: 5.7547, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:23:56 Iters: 285500/[07], loss: 5.4194, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:26:00 Iters: 285600/[07], loss: 5.4958, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:28:03 Iters: 285700/[07], loss: 6.0637, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:30:06 Iters: 285800/[07], loss: 5.7523, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:32:09 Iters: 285900/[07], loss: 6.3727, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:34:13 Iters: 286000/[07], loss: 5.8268, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:36:16 Iters: 286100/[07], loss: 5.9367, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:38:19 Iters: 286200/[07], loss: 5.0472, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:40:22 Iters: 286300/[07], loss: 4.8049, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:42:26 Iters: 286400/[07], loss: 6.8482, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:44:29 Iters: 286500/[07], loss: 5.4679, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:46:32 Iters: 286600/[07], loss: 5.7856, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:48:35 Iters: 286700/[07], loss: 5.4913, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:50:38 Iters: 286800/[07], loss: 6.5894, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:52:41 Iters: 286900/[07], loss: 6.0459, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:54:45 Iters: 287000/[07], loss: 6.2403, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:56:48 Iters: 287100/[07], loss: 5.0177, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-20:58:51 Iters: 287200/[07], loss: 6.3451, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:00:54 Iters: 287300/[07], loss: 6.3818, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:02:57 Iters: 287400/[07], loss: 5.8443, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:05:01 Iters: 287500/[07], loss: 5.7929, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:07:04 Iters: 287600/[07], loss: 5.4749, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:09:07 Iters: 287700/[07], loss: 6.2180, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:11:10 Iters: 287800/[07], loss: 5.9278, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:13:13 Iters: 287900/[07], loss: 6.1218, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:15:17 Iters: 288000/[07], loss: 5.3824, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:17:20 Iters: 288100/[07], loss: 5.2571, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:19:23 Iters: 288200/[07], loss: 5.0104, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:21:26 Iters: 288300/[07], loss: 4.3779, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:23:30 Iters: 288400/[07], loss: 5.7850, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:25:33 Iters: 288500/[07], loss: 6.0429, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:27:36 Iters: 288600/[07], loss: 5.6084, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:29:40 Iters: 288700/[07], loss: 6.2063, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:31:43 Iters: 288800/[07], loss: 5.1714, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:33:46 Iters: 288900/[07], loss: 5.7380, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:35:49 Iters: 289000/[07], loss: 5.3591, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:37:53 Iters: 289100/[07], loss: 5.5491, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:39:56 Iters: 289200/[07], loss: 5.6316, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:41:59 Iters: 289300/[07], loss: 5.6731, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:44:02 Iters: 289400/[07], loss: 5.2360, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:46:06 Iters: 289500/[07], loss: 5.9289, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:48:09 Iters: 289600/[07], loss: 5.8802, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:50:12 Iters: 289700/[07], loss: 5.2597, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:52:15 Iters: 289800/[07], loss: 4.9709, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:54:18 Iters: 289900/[07], loss: 5.4980, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:56:22 Iters: 290000/[07], loss: 5.4401, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-21:56:22 Saving checkpoint: 290000 -20220704-21:57:38 LFW Ave Accuracy: 99.4833 -20220704-21:58:53 AgeDB-30 Ave Accuracy: 95.8000 -20220704-22:00:20 CFP-FP Ave Accuracy: 92.8143 -20220704-22:00:20 Current Best Accuracy: LFW: 99.4833 in iters: 290000, AgeDB-30: 95.8000 in iters: 290000 and CFP-FP: 92.8143 in iters: 290000 -20220704-22:02:23 Iters: 290100/[07], loss: 5.9006, train_accuracy: 0.2891, time: 3.61 s/iter, learning rate: 0.0005000000000000001 -20220704-22:04:26 Iters: 290200/[07], loss: 5.0880, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:06:29 Iters: 290300/[07], loss: 5.5520, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:08:33 Iters: 290400/[07], loss: 5.3634, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:10:36 Iters: 290500/[07], loss: 5.4190, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:12:39 Iters: 290600/[07], loss: 5.3288, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:14:42 Iters: 290700/[07], loss: 5.5081, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:16:46 Iters: 290800/[07], loss: 5.3034, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:18:49 Iters: 290900/[07], loss: 5.8261, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:20:52 Iters: 291000/[07], loss: 6.1737, train_accuracy: 0.1797, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:22:55 Iters: 291100/[07], loss: 5.6662, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:24:58 Iters: 291200/[07], loss: 6.1791, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:27:02 Iters: 291300/[07], loss: 5.5681, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:29:05 Iters: 291400/[07], loss: 5.9825, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:31:08 Iters: 291500/[07], loss: 5.6309, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:33:11 Iters: 291600/[07], loss: 5.0448, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:35:14 Iters: 291700/[07], loss: 5.2502, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:37:18 Iters: 291800/[07], loss: 6.2180, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:39:21 Iters: 291900/[07], loss: 5.8852, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:41:24 Iters: 292000/[07], loss: 5.9619, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:43:27 Iters: 292100/[07], loss: 6.3892, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:45:31 Iters: 292200/[07], loss: 5.6102, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:47:34 Iters: 292300/[07], loss: 5.2456, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:49:37 Iters: 292400/[07], loss: 5.8799, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:51:40 Iters: 292500/[07], loss: 4.8486, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:53:43 Iters: 292600/[07], loss: 5.4462, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:55:47 Iters: 292700/[07], loss: 4.9158, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:57:50 Iters: 292800/[07], loss: 6.2756, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-22:59:53 Iters: 292900/[07], loss: 5.1306, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:01:56 Iters: 293000/[07], loss: 5.8809, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:03:59 Iters: 293100/[07], loss: 4.5307, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:06:03 Iters: 293200/[07], loss: 4.9149, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:08:06 Iters: 293300/[07], loss: 5.0192, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:10:09 Iters: 293400/[07], loss: 5.2837, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:12:12 Iters: 293500/[07], loss: 5.8334, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:14:16 Iters: 293600/[07], loss: 5.0658, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:16:19 Iters: 293700/[07], loss: 5.2385, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:18:22 Iters: 293800/[07], loss: 5.6631, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:20:25 Iters: 293900/[07], loss: 5.3735, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:22:29 Iters: 294000/[07], loss: 5.1804, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:24:32 Iters: 294100/[07], loss: 5.6817, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:26:35 Iters: 294200/[07], loss: 5.7109, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:28:38 Iters: 294300/[07], loss: 5.5733, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:30:42 Iters: 294400/[07], loss: 4.8643, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:32:45 Iters: 294500/[07], loss: 5.9744, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:34:48 Iters: 294600/[07], loss: 5.3083, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:36:51 Iters: 294700/[07], loss: 5.2998, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:38:54 Iters: 294800/[07], loss: 5.9242, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:40:57 Iters: 294900/[07], loss: 5.3856, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:43:01 Iters: 295000/[07], loss: 5.1034, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:45:04 Iters: 295100/[07], loss: 5.7383, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:47:07 Iters: 295200/[07], loss: 5.7420, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:49:10 Iters: 295300/[07], loss: 5.4763, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:51:13 Iters: 295400/[07], loss: 6.1533, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:53:17 Iters: 295500/[07], loss: 5.3643, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:55:20 Iters: 295600/[07], loss: 5.6871, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:57:23 Iters: 295700/[07], loss: 5.7769, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220704-23:59:26 Iters: 295800/[07], loss: 6.0454, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:01:29 Iters: 295900/[07], loss: 5.7672, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:03:33 Iters: 296000/[07], loss: 5.3767, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:05:36 Iters: 296100/[07], loss: 6.1940, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:07:39 Iters: 296200/[07], loss: 5.5422, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:09:42 Iters: 296300/[07], loss: 5.1451, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:11:45 Iters: 296400/[07], loss: 5.3577, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:13:48 Iters: 296500/[07], loss: 5.3620, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:15:52 Iters: 296600/[07], loss: 5.9447, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:17:55 Iters: 296700/[07], loss: 4.7793, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:19:58 Iters: 296800/[07], loss: 5.9823, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:22:01 Iters: 296900/[07], loss: 6.3507, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:24:05 Iters: 297000/[07], loss: 5.8361, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:26:08 Iters: 297100/[07], loss: 6.0573, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:28:11 Iters: 297200/[07], loss: 6.1822, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:30:14 Iters: 297300/[07], loss: 5.7172, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:32:17 Iters: 297400/[07], loss: 5.2017, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:34:21 Iters: 297500/[07], loss: 6.4664, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:36:24 Iters: 297600/[07], loss: 6.1625, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:38:27 Iters: 297700/[07], loss: 5.2390, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:40:30 Iters: 297800/[07], loss: 5.8014, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:42:33 Iters: 297900/[07], loss: 5.6847, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:44:37 Iters: 298000/[07], loss: 6.2382, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:46:40 Iters: 298100/[07], loss: 5.6479, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:48:43 Iters: 298200/[07], loss: 6.2950, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:50:46 Iters: 298300/[07], loss: 5.0603, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:52:50 Iters: 298400/[07], loss: 5.3100, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:54:53 Iters: 298500/[07], loss: 5.3387, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:56:56 Iters: 298600/[07], loss: 5.1310, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-00:58:59 Iters: 298700/[07], loss: 5.5569, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:01:02 Iters: 298800/[07], loss: 5.8497, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:03:06 Iters: 298900/[07], loss: 5.4139, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:05:09 Iters: 299000/[07], loss: 5.1348, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:07:12 Iters: 299100/[07], loss: 5.3889, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:09:15 Iters: 299200/[07], loss: 5.0473, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:11:19 Iters: 299300/[07], loss: 6.1061, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:13:22 Iters: 299400/[07], loss: 5.0234, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:15:25 Iters: 299500/[07], loss: 5.1307, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:17:28 Iters: 299600/[07], loss: 5.4756, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:19:32 Iters: 299700/[07], loss: 5.4766, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:21:35 Iters: 299800/[07], loss: 5.2390, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:23:38 Iters: 299900/[07], loss: 5.0194, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:25:41 Iters: 300000/[07], loss: 5.7247, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:25:41 Saving checkpoint: 300000 -20220705-01:26:58 LFW Ave Accuracy: 99.5333 -20220705-01:28:14 AgeDB-30 Ave Accuracy: 95.8833 -20220705-01:29:42 CFP-FP Ave Accuracy: 92.7429 -20220705-01:29:42 Current Best Accuracy: LFW: 99.5333 in iters: 300000, AgeDB-30: 95.8833 in iters: 300000 and CFP-FP: 92.8143 in iters: 290000 -20220705-01:31:45 Iters: 300100/[07], loss: 7.1182, train_accuracy: 0.2188, time: 3.63 s/iter, learning rate: 0.0005000000000000001 -20220705-01:33:48 Iters: 300200/[07], loss: 5.0560, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:35:51 Iters: 300300/[07], loss: 5.9284, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:37:54 Iters: 300400/[07], loss: 5.9469, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:39:57 Iters: 300500/[07], loss: 5.0810, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:42:01 Iters: 300600/[07], loss: 5.4829, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:44:04 Iters: 300700/[07], loss: 6.0172, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:46:07 Iters: 300800/[07], loss: 5.6112, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:48:10 Iters: 300900/[07], loss: 6.8803, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:50:13 Iters: 301000/[07], loss: 5.6822, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:52:16 Iters: 301100/[07], loss: 5.4816, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:54:20 Iters: 301200/[07], loss: 5.0249, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:56:23 Iters: 301300/[07], loss: 5.4253, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-01:58:26 Iters: 301400/[07], loss: 5.4765, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:00:29 Iters: 301500/[07], loss: 5.8296, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:02:32 Iters: 301600/[07], loss: 5.6025, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:04:35 Iters: 301700/[07], loss: 5.5229, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:06:39 Iters: 301800/[07], loss: 5.2985, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:08:42 Iters: 301900/[07], loss: 5.5061, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:10:45 Iters: 302000/[07], loss: 5.8405, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:12:48 Iters: 302100/[07], loss: 5.3088, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:14:51 Iters: 302200/[07], loss: 5.8930, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:16:55 Iters: 302300/[07], loss: 5.0589, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:18:58 Iters: 302400/[07], loss: 5.5293, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:21:01 Iters: 302500/[07], loss: 5.3306, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:23:04 Iters: 302600/[07], loss: 5.8046, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:25:08 Iters: 302700/[07], loss: 5.7874, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:27:11 Iters: 302800/[07], loss: 5.0882, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:29:14 Iters: 302900/[07], loss: 5.8273, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:31:17 Iters: 303000/[07], loss: 5.0883, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:33:20 Iters: 303100/[07], loss: 5.2088, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:35:24 Iters: 303200/[07], loss: 5.2997, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:37:27 Iters: 303300/[07], loss: 5.1955, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:39:30 Iters: 303400/[07], loss: 6.0569, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:41:34 Iters: 303500/[07], loss: 5.4920, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:43:37 Iters: 303600/[07], loss: 5.2898, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:45:40 Iters: 303700/[07], loss: 4.7115, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:47:43 Iters: 303800/[07], loss: 5.4758, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:49:47 Iters: 303900/[07], loss: 5.2187, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:51:50 Iters: 304000/[07], loss: 5.8791, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:53:53 Iters: 304100/[07], loss: 5.5558, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:55:56 Iters: 304200/[07], loss: 5.8910, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-02:58:00 Iters: 304300/[07], loss: 5.7212, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:00:03 Iters: 304400/[07], loss: 5.5289, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:02:06 Iters: 304500/[07], loss: 5.4983, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:04:09 Iters: 304600/[07], loss: 5.6978, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:06:13 Iters: 304700/[07], loss: 6.1016, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:08:16 Iters: 304800/[07], loss: 5.7348, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:10:19 Iters: 304900/[07], loss: 6.3761, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:12:22 Iters: 305000/[07], loss: 4.6835, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:14:25 Iters: 305100/[07], loss: 5.2155, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:16:29 Iters: 305200/[07], loss: 5.1861, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:18:32 Iters: 305300/[07], loss: 5.9300, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:20:35 Iters: 305400/[07], loss: 5.4062, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:22:38 Iters: 305500/[07], loss: 6.4094, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:24:42 Iters: 305600/[07], loss: 5.4604, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:26:45 Iters: 305700/[07], loss: 5.1227, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:28:48 Iters: 305800/[07], loss: 5.3641, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:30:51 Iters: 305900/[07], loss: 5.6595, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:32:55 Iters: 306000/[07], loss: 5.1337, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:34:58 Iters: 306100/[07], loss: 5.5846, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:37:01 Iters: 306200/[07], loss: 5.9554, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:39:05 Iters: 306300/[07], loss: 5.1490, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:41:08 Iters: 306400/[07], loss: 5.5602, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:43:11 Iters: 306500/[07], loss: 6.0361, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:45:14 Iters: 306600/[07], loss: 5.4282, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:47:18 Iters: 306700/[07], loss: 5.3809, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:49:21 Iters: 306800/[07], loss: 5.3872, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:51:24 Iters: 306900/[07], loss: 5.3466, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:53:27 Iters: 307000/[07], loss: 5.6051, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:55:30 Iters: 307100/[07], loss: 6.0762, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:57:34 Iters: 307200/[07], loss: 5.5541, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-03:59:37 Iters: 307300/[07], loss: 5.7578, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:01:40 Iters: 307400/[07], loss: 5.2574, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:03:43 Iters: 307500/[07], loss: 5.3457, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:05:47 Iters: 307600/[07], loss: 5.3400, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:07:50 Iters: 307700/[07], loss: 5.8904, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:09:53 Iters: 307800/[07], loss: 5.8215, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:11:56 Iters: 307900/[07], loss: 7.1742, train_accuracy: 0.1797, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:14:00 Iters: 308000/[07], loss: 6.2160, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:16:03 Iters: 308100/[07], loss: 5.7122, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:18:06 Iters: 308200/[07], loss: 5.6638, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:20:09 Iters: 308300/[07], loss: 5.3087, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:22:12 Iters: 308400/[07], loss: 5.6563, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:24:15 Iters: 308500/[07], loss: 5.9798, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:26:19 Iters: 308600/[07], loss: 5.7484, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:28:22 Iters: 308700/[07], loss: 5.4732, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:30:25 Iters: 308800/[07], loss: 5.6772, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:32:28 Iters: 308900/[07], loss: 5.9471, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:34:31 Iters: 309000/[07], loss: 5.3966, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:36:35 Iters: 309100/[07], loss: 6.0276, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:38:38 Iters: 309200/[07], loss: 5.9061, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:40:41 Iters: 309300/[07], loss: 5.5698, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:42:44 Iters: 309400/[07], loss: 5.7427, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:44:47 Iters: 309500/[07], loss: 6.5201, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:46:50 Iters: 309600/[07], loss: 6.1076, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:48:53 Iters: 309700/[07], loss: 5.8960, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:50:57 Iters: 309800/[07], loss: 5.7113, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:53:00 Iters: 309900/[07], loss: 6.0760, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:55:03 Iters: 310000/[07], loss: 5.7462, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-04:55:03 Saving checkpoint: 310000 -20220705-04:56:22 LFW Ave Accuracy: 99.5166 -20220705-04:57:39 AgeDB-30 Ave Accuracy: 96.0000 -20220705-04:59:09 CFP-FP Ave Accuracy: 93.1143 -20220705-04:59:09 Current Best Accuracy: LFW: 99.5333 in iters: 300000, AgeDB-30: 96.0000 in iters: 310000 and CFP-FP: 93.1143 in iters: 310000 -20220705-05:01:12 Iters: 310100/[07], loss: 6.1017, train_accuracy: 0.2344, time: 3.69 s/iter, learning rate: 0.0005000000000000001 -20220705-05:03:15 Iters: 310200/[07], loss: 5.3410, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:05:18 Iters: 310300/[07], loss: 5.7530, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:07:21 Iters: 310400/[07], loss: 5.4611, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:09:25 Iters: 310500/[07], loss: 5.6100, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:11:28 Iters: 310600/[07], loss: 6.8097, train_accuracy: 0.1719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:13:31 Iters: 310700/[07], loss: 5.6855, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:15:34 Iters: 310800/[07], loss: 6.3954, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:17:38 Iters: 310900/[07], loss: 5.9524, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:19:41 Iters: 311000/[07], loss: 5.8300, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:21:44 Iters: 311100/[07], loss: 4.8188, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:23:47 Iters: 311200/[07], loss: 5.7702, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:25:50 Iters: 311300/[07], loss: 5.6436, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:27:53 Iters: 311400/[07], loss: 5.3234, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:29:57 Iters: 311500/[07], loss: 5.5216, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:32:00 Iters: 311600/[07], loss: 4.9947, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:34:03 Iters: 311700/[07], loss: 6.2107, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:36:06 Iters: 311800/[07], loss: 5.9139, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:38:09 Iters: 311900/[07], loss: 5.2755, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:40:12 Iters: 312000/[07], loss: 5.7471, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:42:16 Iters: 312100/[07], loss: 5.5982, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:44:19 Iters: 312200/[07], loss: 5.9469, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:46:22 Iters: 312300/[07], loss: 5.8586, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:48:25 Iters: 312400/[07], loss: 4.8015, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:50:28 Iters: 312500/[07], loss: 5.0429, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:52:31 Iters: 312600/[07], loss: 6.3873, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:54:35 Iters: 312700/[07], loss: 5.6512, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:56:38 Iters: 312800/[07], loss: 5.7992, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-05:58:41 Iters: 312900/[07], loss: 5.9713, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:00:44 Iters: 313000/[07], loss: 5.7770, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:02:48 Iters: 313100/[07], loss: 5.4878, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:04:51 Iters: 313200/[07], loss: 5.9826, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:06:54 Iters: 313300/[07], loss: 5.3474, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:08:57 Iters: 313400/[07], loss: 4.9810, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:11:01 Iters: 313500/[07], loss: 6.1492, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:13:04 Iters: 313600/[07], loss: 6.0790, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:15:07 Iters: 313700/[07], loss: 6.0226, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:17:10 Iters: 313800/[07], loss: 6.4248, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:19:14 Iters: 313900/[07], loss: 6.3180, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:21:17 Iters: 314000/[07], loss: 5.8744, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:23:20 Iters: 314100/[07], loss: 6.6608, train_accuracy: 0.1406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:25:23 Iters: 314200/[07], loss: 6.1421, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:27:27 Iters: 314300/[07], loss: 5.9868, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:29:30 Iters: 314400/[07], loss: 6.7756, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:31:33 Iters: 314500/[07], loss: 5.5398, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:33:36 Iters: 314600/[07], loss: 5.1145, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:35:39 Iters: 314700/[07], loss: 5.6375, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:37:43 Iters: 314800/[07], loss: 5.1814, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:39:46 Iters: 314900/[07], loss: 5.4342, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:41:49 Iters: 315000/[07], loss: 5.1648, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:43:52 Iters: 315100/[07], loss: 5.9238, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:45:55 Iters: 315200/[07], loss: 5.7384, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:47:59 Iters: 315300/[07], loss: 6.0444, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:50:02 Iters: 315400/[07], loss: 5.3613, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:52:05 Iters: 315500/[07], loss: 5.7858, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:54:08 Iters: 315600/[07], loss: 5.6025, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:56:12 Iters: 315700/[07], loss: 5.6231, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-06:58:15 Iters: 315800/[07], loss: 5.5521, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:00:18 Iters: 315900/[07], loss: 5.2202, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:02:21 Iters: 316000/[07], loss: 5.1169, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:04:24 Iters: 316100/[07], loss: 6.4937, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:06:28 Iters: 316200/[07], loss: 4.7637, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:08:31 Iters: 316300/[07], loss: 5.2628, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:10:34 Iters: 316400/[07], loss: 6.3956, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:12:37 Iters: 316500/[07], loss: 5.8019, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:14:41 Iters: 316600/[07], loss: 5.7880, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:16:44 Iters: 316700/[07], loss: 5.1952, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:18:47 Iters: 316800/[07], loss: 5.7971, train_accuracy: 0.2578, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220705-07:20:51 Iters: 316900/[07], loss: 5.2303, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:22:54 Iters: 317000/[07], loss: 5.9315, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:24:56 Iters: 317100/[07], loss: 5.5978, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:26:59 Iters: 317200/[07], loss: 6.6847, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:29:03 Iters: 317300/[07], loss: 5.2928, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:31:06 Iters: 317400/[07], loss: 5.6212, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:33:09 Iters: 317500/[07], loss: 5.8977, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:35:12 Iters: 317600/[07], loss: 5.7709, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:37:15 Iters: 317700/[07], loss: 5.7656, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:39:18 Iters: 317800/[07], loss: 5.3647, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:41:21 Iters: 317900/[07], loss: 5.5668, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:43:24 Iters: 318000/[07], loss: 5.4775, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:45:27 Iters: 318100/[07], loss: 6.5163, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:47:30 Iters: 318200/[07], loss: 6.2435, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:49:34 Iters: 318300/[07], loss: 5.2163, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:51:37 Iters: 318400/[07], loss: 5.9920, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220705-07:52:13 Train Epoch: 8/18 ... -20220705-07:53:46 Iters: 318500/[08], loss: 5.4484, train_accuracy: 0.3203, time: 0.93 s/iter, learning rate: 0.005000000000000001 -20220705-07:55:50 Iters: 318600/[08], loss: 5.6252, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-07:57:53 Iters: 318700/[08], loss: 5.0364, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-07:59:56 Iters: 318800/[08], loss: 5.4360, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:01:59 Iters: 318900/[08], loss: 5.7512, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:04:02 Iters: 319000/[08], loss: 4.7812, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:06:06 Iters: 319100/[08], loss: 5.9076, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:08:09 Iters: 319200/[08], loss: 5.6344, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:10:12 Iters: 319300/[08], loss: 5.2008, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:12:15 Iters: 319400/[08], loss: 4.9987, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:14:18 Iters: 319500/[08], loss: 5.7742, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:16:22 Iters: 319600/[08], loss: 5.2789, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:18:25 Iters: 319700/[08], loss: 5.5766, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:20:28 Iters: 319800/[08], loss: 6.2851, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:22:31 Iters: 319900/[08], loss: 5.2415, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:24:34 Iters: 320000/[08], loss: 4.7394, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:24:34 Saving checkpoint: 320000 -20220705-08:25:50 LFW Ave Accuracy: 99.5833 -20220705-08:27:05 AgeDB-30 Ave Accuracy: 95.9000 -20220705-08:28:31 CFP-FP Ave Accuracy: 93.3857 -20220705-08:28:31 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.0000 in iters: 310000 and CFP-FP: 93.3857 in iters: 320000 -20220705-08:30:34 Iters: 320100/[08], loss: 5.3464, train_accuracy: 0.3125, time: 3.60 s/iter, learning rate: 0.005000000000000001 -20220705-08:32:37 Iters: 320200/[08], loss: 5.2213, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:34:40 Iters: 320300/[08], loss: 5.1047, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:36:43 Iters: 320400/[08], loss: 5.5429, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:38:47 Iters: 320500/[08], loss: 5.9825, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:40:50 Iters: 320600/[08], loss: 5.3378, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:42:53 Iters: 320700/[08], loss: 5.5687, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:44:56 Iters: 320800/[08], loss: 6.4439, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:46:59 Iters: 320900/[08], loss: 5.9520, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:49:02 Iters: 321000/[08], loss: 5.2733, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:51:06 Iters: 321100/[08], loss: 5.6412, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:53:09 Iters: 321200/[08], loss: 5.3777, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:55:12 Iters: 321300/[08], loss: 5.8311, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:57:15 Iters: 321400/[08], loss: 5.9187, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-08:59:19 Iters: 321500/[08], loss: 5.7158, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:01:22 Iters: 321600/[08], loss: 5.2666, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:03:25 Iters: 321700/[08], loss: 5.7184, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:05:28 Iters: 321800/[08], loss: 5.5794, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:07:32 Iters: 321900/[08], loss: 5.2613, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:09:35 Iters: 322000/[08], loss: 5.8015, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:11:38 Iters: 322100/[08], loss: 4.9840, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:13:41 Iters: 322200/[08], loss: 5.2062, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:15:44 Iters: 322300/[08], loss: 5.8024, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:17:48 Iters: 322400/[08], loss: 6.8150, train_accuracy: 0.1719, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:19:51 Iters: 322500/[08], loss: 5.2692, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:21:54 Iters: 322600/[08], loss: 5.0826, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:23:57 Iters: 322700/[08], loss: 5.6661, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:26:00 Iters: 322800/[08], loss: 5.5977, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:28:04 Iters: 322900/[08], loss: 5.7341, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:30:07 Iters: 323000/[08], loss: 5.0968, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:32:10 Iters: 323100/[08], loss: 5.2671, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:34:13 Iters: 323200/[08], loss: 5.7784, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:36:17 Iters: 323300/[08], loss: 5.0491, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:38:20 Iters: 323400/[08], loss: 5.1876, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:40:23 Iters: 323500/[08], loss: 5.8798, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:42:26 Iters: 323600/[08], loss: 5.7277, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:44:29 Iters: 323700/[08], loss: 5.6967, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:46:33 Iters: 323800/[08], loss: 5.1031, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:48:36 Iters: 323900/[08], loss: 6.4460, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:50:39 Iters: 324000/[08], loss: 6.0268, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:52:42 Iters: 324100/[08], loss: 5.8253, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:54:46 Iters: 324200/[08], loss: 5.5511, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:56:49 Iters: 324300/[08], loss: 5.2030, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-09:58:52 Iters: 324400/[08], loss: 5.3031, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:00:55 Iters: 324500/[08], loss: 5.8333, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:02:58 Iters: 324600/[08], loss: 5.2001, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:05:02 Iters: 324700/[08], loss: 5.6093, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:07:05 Iters: 324800/[08], loss: 6.1200, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:09:08 Iters: 324900/[08], loss: 5.9840, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:11:11 Iters: 325000/[08], loss: 6.0519, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:13:14 Iters: 325100/[08], loss: 5.3979, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:15:17 Iters: 325200/[08], loss: 5.4049, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:17:21 Iters: 325300/[08], loss: 5.3285, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:19:24 Iters: 325400/[08], loss: 6.1051, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:21:27 Iters: 325500/[08], loss: 5.3105, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:23:30 Iters: 325600/[08], loss: 5.8467, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:25:33 Iters: 325700/[08], loss: 5.2133, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:27:37 Iters: 325800/[08], loss: 5.2727, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:29:40 Iters: 325900/[08], loss: 5.8098, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:31:43 Iters: 326000/[08], loss: 5.7120, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:33:46 Iters: 326100/[08], loss: 6.2295, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:35:49 Iters: 326200/[08], loss: 6.0921, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:37:53 Iters: 326300/[08], loss: 5.4461, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:39:56 Iters: 326400/[08], loss: 5.8484, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:41:59 Iters: 326500/[08], loss: 6.1301, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:44:02 Iters: 326600/[08], loss: 5.4742, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:46:05 Iters: 326700/[08], loss: 5.4140, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:48:09 Iters: 326800/[08], loss: 5.1829, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:50:12 Iters: 326900/[08], loss: 5.6053, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:52:15 Iters: 327000/[08], loss: 6.0430, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:54:18 Iters: 327100/[08], loss: 5.7673, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:56:21 Iters: 327200/[08], loss: 5.8179, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-10:58:24 Iters: 327300/[08], loss: 6.2303, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:00:27 Iters: 327400/[08], loss: 5.5887, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:02:30 Iters: 327500/[08], loss: 6.1290, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:04:33 Iters: 327600/[08], loss: 6.5664, train_accuracy: 0.1797, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:06:36 Iters: 327700/[08], loss: 4.9715, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:08:40 Iters: 327800/[08], loss: 5.9117, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:10:43 Iters: 327900/[08], loss: 5.9484, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:12:46 Iters: 328000/[08], loss: 6.0461, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:14:49 Iters: 328100/[08], loss: 6.2819, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:16:52 Iters: 328200/[08], loss: 6.3181, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:18:55 Iters: 328300/[08], loss: 5.5032, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:20:58 Iters: 328400/[08], loss: 5.6446, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:23:02 Iters: 328500/[08], loss: 6.4006, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:25:05 Iters: 328600/[08], loss: 5.6779, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:27:08 Iters: 328700/[08], loss: 5.9147, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:29:11 Iters: 328800/[08], loss: 5.2788, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:31:14 Iters: 328900/[08], loss: 5.5495, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:33:18 Iters: 329000/[08], loss: 6.1908, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:35:21 Iters: 329100/[08], loss: 5.3364, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:37:24 Iters: 329200/[08], loss: 6.4534, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:39:27 Iters: 329300/[08], loss: 5.7542, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:41:30 Iters: 329400/[08], loss: 5.5077, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:43:34 Iters: 329500/[08], loss: 5.7414, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:45:37 Iters: 329600/[08], loss: 6.2639, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:47:40 Iters: 329700/[08], loss: 5.4578, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:49:43 Iters: 329800/[08], loss: 6.2189, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:51:46 Iters: 329900/[08], loss: 5.5581, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:53:50 Iters: 330000/[08], loss: 5.3562, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-11:53:50 Saving checkpoint: 330000 -20220705-11:55:06 LFW Ave Accuracy: 99.4999 -20220705-11:56:22 AgeDB-30 Ave Accuracy: 96.2667 -20220705-11:57:49 CFP-FP Ave Accuracy: 93.1286 -20220705-11:57:49 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.2667 in iters: 330000 and CFP-FP: 93.3857 in iters: 320000 -20220705-11:59:52 Iters: 330100/[08], loss: 5.4973, train_accuracy: 0.3281, time: 3.62 s/iter, learning rate: 0.005000000000000001 -20220705-12:01:55 Iters: 330200/[08], loss: 5.4531, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:03:58 Iters: 330300/[08], loss: 5.0804, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:06:01 Iters: 330400/[08], loss: 5.8615, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:08:04 Iters: 330500/[08], loss: 6.5983, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:10:08 Iters: 330600/[08], loss: 5.6433, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:12:11 Iters: 330700/[08], loss: 6.6048, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:14:14 Iters: 330800/[08], loss: 6.4952, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:16:17 Iters: 330900/[08], loss: 5.6917, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:18:21 Iters: 331000/[08], loss: 6.1720, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:20:24 Iters: 331100/[08], loss: 5.8599, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:22:27 Iters: 331200/[08], loss: 5.4565, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:24:30 Iters: 331300/[08], loss: 5.9031, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:26:33 Iters: 331400/[08], loss: 5.8793, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:28:37 Iters: 331500/[08], loss: 5.7464, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:30:40 Iters: 331600/[08], loss: 5.5150, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:32:43 Iters: 331700/[08], loss: 5.9503, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:34:46 Iters: 331800/[08], loss: 5.5289, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:36:49 Iters: 331900/[08], loss: 5.7708, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:38:52 Iters: 332000/[08], loss: 5.1521, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:40:56 Iters: 332100/[08], loss: 5.6439, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:42:59 Iters: 332200/[08], loss: 4.9127, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:45:02 Iters: 332300/[08], loss: 5.7576, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:47:05 Iters: 332400/[08], loss: 5.9901, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:49:08 Iters: 332500/[08], loss: 5.4316, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:51:12 Iters: 332600/[08], loss: 5.6933, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:53:15 Iters: 332700/[08], loss: 5.6260, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:55:18 Iters: 332800/[08], loss: 6.2200, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:57:21 Iters: 332900/[08], loss: 6.1743, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-12:59:24 Iters: 333000/[08], loss: 6.4542, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:01:27 Iters: 333100/[08], loss: 6.1439, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:03:31 Iters: 333200/[08], loss: 5.5915, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:05:34 Iters: 333300/[08], loss: 6.0092, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:07:37 Iters: 333400/[08], loss: 5.1289, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:09:40 Iters: 333500/[08], loss: 5.5101, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:11:43 Iters: 333600/[08], loss: 5.5969, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:13:47 Iters: 333700/[08], loss: 5.2199, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:15:50 Iters: 333800/[08], loss: 6.0485, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:17:53 Iters: 333900/[08], loss: 5.3883, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:19:56 Iters: 334000/[08], loss: 5.6167, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:21:59 Iters: 334100/[08], loss: 5.9121, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:24:03 Iters: 334200/[08], loss: 5.6763, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:26:06 Iters: 334300/[08], loss: 5.8487, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:28:09 Iters: 334400/[08], loss: 5.9090, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:30:12 Iters: 334500/[08], loss: 5.5973, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:32:16 Iters: 334600/[08], loss: 5.6933, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:34:19 Iters: 334700/[08], loss: 5.9313, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:36:22 Iters: 334800/[08], loss: 6.0300, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:38:25 Iters: 334900/[08], loss: 6.0206, train_accuracy: 0.1719, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:40:29 Iters: 335000/[08], loss: 6.0991, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:42:32 Iters: 335100/[08], loss: 6.2882, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:44:35 Iters: 335200/[08], loss: 5.8262, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:46:38 Iters: 335300/[08], loss: 5.8368, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:48:41 Iters: 335400/[08], loss: 6.4885, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:50:45 Iters: 335500/[08], loss: 5.2968, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:52:48 Iters: 335600/[08], loss: 5.4155, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:54:51 Iters: 335700/[08], loss: 5.8658, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:56:54 Iters: 335800/[08], loss: 5.3011, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-13:58:57 Iters: 335900/[08], loss: 5.4635, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:01:01 Iters: 336000/[08], loss: 6.2850, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:03:04 Iters: 336100/[08], loss: 5.8216, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:05:07 Iters: 336200/[08], loss: 5.8295, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:07:10 Iters: 336300/[08], loss: 5.8099, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:09:13 Iters: 336400/[08], loss: 5.1743, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:11:17 Iters: 336500/[08], loss: 5.2287, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:13:20 Iters: 336600/[08], loss: 6.0322, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:15:23 Iters: 336700/[08], loss: 4.7795, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:17:26 Iters: 336800/[08], loss: 5.7362, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:19:30 Iters: 336900/[08], loss: 5.2314, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:21:33 Iters: 337000/[08], loss: 5.3424, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:23:36 Iters: 337100/[08], loss: 6.0244, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:25:39 Iters: 337200/[08], loss: 5.4916, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:27:42 Iters: 337300/[08], loss: 5.8241, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:29:45 Iters: 337400/[08], loss: 5.9748, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:31:49 Iters: 337500/[08], loss: 6.3299, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:33:52 Iters: 337600/[08], loss: 5.5896, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:35:55 Iters: 337700/[08], loss: 5.3090, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:37:58 Iters: 337800/[08], loss: 5.3847, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:40:01 Iters: 337900/[08], loss: 5.5519, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:42:05 Iters: 338000/[08], loss: 5.7121, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:44:08 Iters: 338100/[08], loss: 5.6488, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:46:11 Iters: 338200/[08], loss: 5.0960, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:48:14 Iters: 338300/[08], loss: 5.2617, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:50:17 Iters: 338400/[08], loss: 5.7081, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:52:20 Iters: 338500/[08], loss: 5.5287, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:54:24 Iters: 338600/[08], loss: 5.9960, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:56:27 Iters: 338700/[08], loss: 6.1680, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-14:58:30 Iters: 338800/[08], loss: 5.5879, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:00:33 Iters: 338900/[08], loss: 6.5621, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:02:36 Iters: 339000/[08], loss: 5.8214, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:04:40 Iters: 339100/[08], loss: 5.9742, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:06:43 Iters: 339200/[08], loss: 5.8708, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:08:46 Iters: 339300/[08], loss: 5.6730, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:10:49 Iters: 339400/[08], loss: 5.9025, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:12:53 Iters: 339500/[08], loss: 5.5814, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:14:56 Iters: 339600/[08], loss: 5.6483, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:16:59 Iters: 339700/[08], loss: 5.5739, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:19:02 Iters: 339800/[08], loss: 6.7208, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:21:06 Iters: 339900/[08], loss: 5.6405, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:23:09 Iters: 340000/[08], loss: 5.4617, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:23:09 Saving checkpoint: 340000 -20220705-15:24:26 LFW Ave Accuracy: 99.5500 -20220705-15:25:44 AgeDB-30 Ave Accuracy: 95.9167 -20220705-15:27:13 CFP-FP Ave Accuracy: 92.8714 -20220705-15:27:13 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.2667 in iters: 330000 and CFP-FP: 93.3857 in iters: 320000 -20220705-15:29:15 Iters: 340100/[08], loss: 5.2485, train_accuracy: 0.2734, time: 3.66 s/iter, learning rate: 0.005000000000000001 -20220705-15:31:18 Iters: 340200/[08], loss: 6.1555, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:33:21 Iters: 340300/[08], loss: 6.0967, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:35:24 Iters: 340400/[08], loss: 5.5594, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:37:28 Iters: 340500/[08], loss: 4.8834, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:39:31 Iters: 340600/[08], loss: 5.3183, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:41:34 Iters: 340700/[08], loss: 5.4013, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:43:37 Iters: 340800/[08], loss: 5.3776, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:45:41 Iters: 340900/[08], loss: 5.9737, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:47:44 Iters: 341000/[08], loss: 4.9407, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:49:47 Iters: 341100/[08], loss: 5.2067, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:51:50 Iters: 341200/[08], loss: 4.8144, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:53:54 Iters: 341300/[08], loss: 5.5172, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:55:57 Iters: 341400/[08], loss: 6.5253, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-15:58:00 Iters: 341500/[08], loss: 5.9201, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:00:03 Iters: 341600/[08], loss: 5.5749, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:02:06 Iters: 341700/[08], loss: 5.4001, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:04:10 Iters: 341800/[08], loss: 6.2446, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:06:13 Iters: 341900/[08], loss: 5.8224, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:08:16 Iters: 342000/[08], loss: 5.6895, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:10:19 Iters: 342100/[08], loss: 5.3054, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:12:22 Iters: 342200/[08], loss: 4.3802, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:14:26 Iters: 342300/[08], loss: 5.7602, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:16:29 Iters: 342400/[08], loss: 5.7058, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:18:32 Iters: 342500/[08], loss: 4.7485, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:20:35 Iters: 342600/[08], loss: 6.1698, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:22:38 Iters: 342700/[08], loss: 5.6875, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:24:41 Iters: 342800/[08], loss: 6.0148, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:26:44 Iters: 342900/[08], loss: 6.2714, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:28:48 Iters: 343000/[08], loss: 5.7517, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:30:51 Iters: 343100/[08], loss: 6.2036, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:32:54 Iters: 343200/[08], loss: 5.6248, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:34:57 Iters: 343300/[08], loss: 5.9410, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:37:00 Iters: 343400/[08], loss: 6.2799, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:39:03 Iters: 343500/[08], loss: 6.0669, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:41:07 Iters: 343600/[08], loss: 5.7512, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:43:10 Iters: 343700/[08], loss: 6.1078, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:45:13 Iters: 343800/[08], loss: 5.6110, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:47:16 Iters: 343900/[08], loss: 5.8476, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:49:19 Iters: 344000/[08], loss: 5.6396, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:51:23 Iters: 344100/[08], loss: 4.8942, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:53:26 Iters: 344200/[08], loss: 6.5993, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:55:29 Iters: 344300/[08], loss: 5.2176, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:57:32 Iters: 344400/[08], loss: 5.9365, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-16:59:35 Iters: 344500/[08], loss: 5.6368, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:01:38 Iters: 344600/[08], loss: 5.4106, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:03:42 Iters: 344700/[08], loss: 6.4568, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:05:45 Iters: 344800/[08], loss: 5.5561, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:07:48 Iters: 344900/[08], loss: 5.5070, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:09:51 Iters: 345000/[08], loss: 5.5330, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:11:54 Iters: 345100/[08], loss: 5.4934, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:13:57 Iters: 345200/[08], loss: 6.1354, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:16:01 Iters: 345300/[08], loss: 5.8719, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:18:04 Iters: 345400/[08], loss: 6.1663, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:20:07 Iters: 345500/[08], loss: 5.7809, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:22:10 Iters: 345600/[08], loss: 6.2549, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:24:13 Iters: 345700/[08], loss: 5.0706, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:26:17 Iters: 345800/[08], loss: 5.1815, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:28:20 Iters: 345900/[08], loss: 5.3765, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:30:23 Iters: 346000/[08], loss: 5.9572, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:32:26 Iters: 346100/[08], loss: 6.9255, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:34:29 Iters: 346200/[08], loss: 6.0093, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:36:32 Iters: 346300/[08], loss: 5.3481, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:38:36 Iters: 346400/[08], loss: 5.4372, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:40:39 Iters: 346500/[08], loss: 6.0215, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:42:42 Iters: 346600/[08], loss: 5.6935, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:44:45 Iters: 346700/[08], loss: 5.7609, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:46:48 Iters: 346800/[08], loss: 5.7045, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:48:52 Iters: 346900/[08], loss: 6.0318, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:50:55 Iters: 347000/[08], loss: 5.6391, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:52:58 Iters: 347100/[08], loss: 5.1922, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:55:01 Iters: 347200/[08], loss: 4.9411, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:57:05 Iters: 347300/[08], loss: 5.7393, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-17:59:08 Iters: 347400/[08], loss: 5.9031, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:01:11 Iters: 347500/[08], loss: 5.7235, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:03:14 Iters: 347600/[08], loss: 4.7333, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:05:17 Iters: 347700/[08], loss: 6.5719, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:07:20 Iters: 347800/[08], loss: 5.8202, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:09:24 Iters: 347900/[08], loss: 5.7699, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:11:27 Iters: 348000/[08], loss: 5.4839, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:13:30 Iters: 348100/[08], loss: 5.7827, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:15:33 Iters: 348200/[08], loss: 5.7703, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:17:37 Iters: 348300/[08], loss: 5.1382, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:19:40 Iters: 348400/[08], loss: 6.4002, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:21:43 Iters: 348500/[08], loss: 5.3350, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:23:46 Iters: 348600/[08], loss: 5.3596, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:25:49 Iters: 348700/[08], loss: 4.6905, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:27:53 Iters: 348800/[08], loss: 5.8956, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:29:56 Iters: 348900/[08], loss: 5.8398, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:31:59 Iters: 349000/[08], loss: 5.3044, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:34:02 Iters: 349100/[08], loss: 5.6768, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:36:06 Iters: 349200/[08], loss: 5.9040, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:38:09 Iters: 349300/[08], loss: 5.4913, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:40:12 Iters: 349400/[08], loss: 5.0379, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:42:15 Iters: 349500/[08], loss: 6.1899, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:44:19 Iters: 349600/[08], loss: 5.2141, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:46:22 Iters: 349700/[08], loss: 5.9968, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:48:25 Iters: 349800/[08], loss: 5.4071, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:50:28 Iters: 349900/[08], loss: 6.0098, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:52:32 Iters: 350000/[08], loss: 5.2264, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-18:52:32 Saving checkpoint: 350000 -20220705-18:53:48 LFW Ave Accuracy: 99.5333 -20220705-18:55:05 AgeDB-30 Ave Accuracy: 96.1167 -20220705-18:56:33 CFP-FP Ave Accuracy: 93.0143 -20220705-18:56:33 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.2667 in iters: 330000 and CFP-FP: 93.3857 in iters: 320000 -20220705-18:58:36 Iters: 350100/[08], loss: 5.4128, train_accuracy: 0.3125, time: 3.64 s/iter, learning rate: 0.005000000000000001 -20220705-19:00:38 Iters: 350200/[08], loss: 5.3944, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:02:42 Iters: 350300/[08], loss: 5.1411, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:04:45 Iters: 350400/[08], loss: 4.9022, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:06:48 Iters: 350500/[08], loss: 5.1051, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:08:51 Iters: 350600/[08], loss: 5.8490, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:10:54 Iters: 350700/[08], loss: 5.0304, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:12:58 Iters: 350800/[08], loss: 5.6046, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:15:01 Iters: 350900/[08], loss: 5.3511, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:17:04 Iters: 351000/[08], loss: 5.4609, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:19:07 Iters: 351100/[08], loss: 5.2712, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:21:10 Iters: 351200/[08], loss: 5.9317, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:23:13 Iters: 351300/[08], loss: 6.5403, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:25:16 Iters: 351400/[08], loss: 5.6576, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:27:19 Iters: 351500/[08], loss: 5.3584, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:29:22 Iters: 351600/[08], loss: 6.5057, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:31:25 Iters: 351700/[08], loss: 6.2078, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:33:29 Iters: 351800/[08], loss: 5.9885, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:35:32 Iters: 351900/[08], loss: 5.4294, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:37:35 Iters: 352000/[08], loss: 5.9834, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:39:38 Iters: 352100/[08], loss: 5.4854, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:41:41 Iters: 352200/[08], loss: 5.3563, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:43:44 Iters: 352300/[08], loss: 5.5045, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:45:47 Iters: 352400/[08], loss: 4.8032, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:47:51 Iters: 352500/[08], loss: 5.1647, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:49:54 Iters: 352600/[08], loss: 5.7201, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:51:57 Iters: 352700/[08], loss: 6.3415, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:54:00 Iters: 352800/[08], loss: 5.9396, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:56:03 Iters: 352900/[08], loss: 5.2362, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-19:58:06 Iters: 353000/[08], loss: 5.4851, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:00:09 Iters: 353100/[08], loss: 6.3634, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:02:13 Iters: 353200/[08], loss: 5.7763, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:04:16 Iters: 353300/[08], loss: 5.2862, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:06:19 Iters: 353400/[08], loss: 5.4389, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:08:22 Iters: 353500/[08], loss: 5.6718, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:10:25 Iters: 353600/[08], loss: 5.5531, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:12:29 Iters: 353700/[08], loss: 6.4928, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:14:32 Iters: 353800/[08], loss: 5.5778, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:16:35 Iters: 353900/[08], loss: 5.5280, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:18:38 Iters: 354000/[08], loss: 5.9992, train_accuracy: 0.1797, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:20:42 Iters: 354100/[08], loss: 5.5009, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:22:45 Iters: 354200/[08], loss: 5.6457, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:24:48 Iters: 354300/[08], loss: 5.1617, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:26:51 Iters: 354400/[08], loss: 5.6291, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:28:54 Iters: 354500/[08], loss: 5.8491, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:30:58 Iters: 354600/[08], loss: 5.1548, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:33:01 Iters: 354700/[08], loss: 6.0710, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:35:04 Iters: 354800/[08], loss: 5.6286, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:37:07 Iters: 354900/[08], loss: 5.6685, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:39:10 Iters: 355000/[08], loss: 5.5338, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:41:13 Iters: 355100/[08], loss: 5.8115, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:43:16 Iters: 355200/[08], loss: 5.2790, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:45:20 Iters: 355300/[08], loss: 5.8634, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:47:23 Iters: 355400/[08], loss: 5.1946, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:49:26 Iters: 355500/[08], loss: 5.2435, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:51:29 Iters: 355600/[08], loss: 5.1769, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:53:32 Iters: 355700/[08], loss: 5.9697, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:55:36 Iters: 355800/[08], loss: 5.0524, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:57:39 Iters: 355900/[08], loss: 5.9471, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-20:59:42 Iters: 356000/[08], loss: 6.0761, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:01:45 Iters: 356100/[08], loss: 6.0476, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:03:48 Iters: 356200/[08], loss: 5.6010, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:05:52 Iters: 356300/[08], loss: 5.1205, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:07:55 Iters: 356400/[08], loss: 6.1809, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:09:58 Iters: 356500/[08], loss: 6.1093, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:12:01 Iters: 356600/[08], loss: 5.5835, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:14:05 Iters: 356700/[08], loss: 5.0695, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:16:08 Iters: 356800/[08], loss: 5.7272, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:18:11 Iters: 356900/[08], loss: 5.6380, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:20:14 Iters: 357000/[08], loss: 6.1594, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:22:17 Iters: 357100/[08], loss: 5.5650, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:24:20 Iters: 357200/[08], loss: 6.0157, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:26:24 Iters: 357300/[08], loss: 5.0878, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:28:27 Iters: 357400/[08], loss: 6.5622, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:30:30 Iters: 357500/[08], loss: 5.4687, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:32:33 Iters: 357600/[08], loss: 5.9121, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:34:36 Iters: 357700/[08], loss: 5.5991, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:36:39 Iters: 357800/[08], loss: 6.0818, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:38:43 Iters: 357900/[08], loss: 5.2792, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:40:46 Iters: 358000/[08], loss: 5.8927, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:42:49 Iters: 358100/[08], loss: 6.3831, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:44:52 Iters: 358200/[08], loss: 5.4663, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:46:55 Iters: 358300/[08], loss: 5.5644, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:48:58 Iters: 358400/[08], loss: 6.3464, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:51:01 Iters: 358500/[08], loss: 5.8439, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:53:05 Iters: 358600/[08], loss: 5.5783, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:55:08 Iters: 358700/[08], loss: 6.0940, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:57:11 Iters: 358800/[08], loss: 5.8302, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-21:59:14 Iters: 358900/[08], loss: 5.5808, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:01:17 Iters: 359000/[08], loss: 6.3718, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:03:21 Iters: 359100/[08], loss: 5.2378, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:05:24 Iters: 359200/[08], loss: 5.5281, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:07:27 Iters: 359300/[08], loss: 5.8943, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:09:30 Iters: 359400/[08], loss: 5.9385, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:11:33 Iters: 359500/[08], loss: 5.1174, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:13:36 Iters: 359600/[08], loss: 6.1668, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:15:40 Iters: 359700/[08], loss: 5.7863, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:17:43 Iters: 359800/[08], loss: 5.5558, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:19:46 Iters: 359900/[08], loss: 5.9442, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:21:49 Iters: 360000/[08], loss: 5.9789, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:21:49 Saving checkpoint: 360000 -20220705-22:23:07 LFW Ave Accuracy: 99.4999 -20220705-22:24:24 AgeDB-30 Ave Accuracy: 96.1500 -20220705-22:25:55 CFP-FP Ave Accuracy: 93.1714 -20220705-22:25:55 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.2667 in iters: 330000 and CFP-FP: 93.3857 in iters: 320000 -20220705-22:27:57 Iters: 360100/[08], loss: 5.7161, train_accuracy: 0.2734, time: 3.68 s/iter, learning rate: 0.005000000000000001 -20220705-22:30:00 Iters: 360200/[08], loss: 5.9010, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:32:03 Iters: 360300/[08], loss: 5.2533, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:34:07 Iters: 360400/[08], loss: 5.4014, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:36:10 Iters: 360500/[08], loss: 6.6322, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:38:13 Iters: 360600/[08], loss: 4.9004, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:40:16 Iters: 360700/[08], loss: 5.3414, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:42:19 Iters: 360800/[08], loss: 5.7578, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:44:23 Iters: 360900/[08], loss: 5.3305, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:46:26 Iters: 361000/[08], loss: 5.5385, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:48:29 Iters: 361100/[08], loss: 4.9628, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:50:32 Iters: 361200/[08], loss: 5.3952, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:52:35 Iters: 361300/[08], loss: 6.0581, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:54:38 Iters: 361400/[08], loss: 5.3970, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:56:41 Iters: 361500/[08], loss: 5.8235, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-22:58:45 Iters: 361600/[08], loss: 5.8126, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:00:48 Iters: 361700/[08], loss: 5.1856, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:02:51 Iters: 361800/[08], loss: 6.0530, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:04:54 Iters: 361900/[08], loss: 6.8844, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:06:57 Iters: 362000/[08], loss: 5.3755, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:09:01 Iters: 362100/[08], loss: 5.5220, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:11:04 Iters: 362200/[08], loss: 5.2999, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:13:07 Iters: 362300/[08], loss: 4.9026, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:15:10 Iters: 362400/[08], loss: 5.1850, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:17:13 Iters: 362500/[08], loss: 5.4589, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:19:17 Iters: 362600/[08], loss: 6.2434, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:21:20 Iters: 362700/[08], loss: 5.5240, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:23:23 Iters: 362800/[08], loss: 5.3399, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:25:26 Iters: 362900/[08], loss: 6.2796, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:27:29 Iters: 363000/[08], loss: 5.8222, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:29:32 Iters: 363100/[08], loss: 5.9896, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:31:35 Iters: 363200/[08], loss: 5.2122, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:33:39 Iters: 363300/[08], loss: 6.2415, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:35:42 Iters: 363400/[08], loss: 6.1433, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:37:45 Iters: 363500/[08], loss: 5.9765, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:39:48 Iters: 363600/[08], loss: 5.7516, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:41:52 Iters: 363700/[08], loss: 5.6646, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:43:55 Iters: 363800/[08], loss: 5.1021, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:45:58 Iters: 363900/[08], loss: 5.9853, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:46:22 Train Epoch: 9/18 ... -20220705-23:48:01 Iters: 364000/[09], loss: 5.3878, train_accuracy: 0.3438, time: 0.99 s/iter, learning rate: 0.005000000000000001 -20220705-23:50:04 Iters: 364100/[09], loss: 6.0858, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:52:07 Iters: 364200/[09], loss: 5.4394, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:54:11 Iters: 364300/[09], loss: 5.9887, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:56:14 Iters: 364400/[09], loss: 5.4018, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220705-23:58:17 Iters: 364500/[09], loss: 5.5206, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:00:20 Iters: 364600/[09], loss: 5.8112, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:02:24 Iters: 364700/[09], loss: 5.8508, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:04:27 Iters: 364800/[09], loss: 5.0663, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:06:30 Iters: 364900/[09], loss: 5.8400, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:08:33 Iters: 365000/[09], loss: 6.4495, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:10:37 Iters: 365100/[09], loss: 5.0111, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:12:40 Iters: 365200/[09], loss: 4.8578, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:14:43 Iters: 365300/[09], loss: 5.1111, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:16:46 Iters: 365400/[09], loss: 4.8892, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:18:49 Iters: 365500/[09], loss: 5.3958, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:20:52 Iters: 365600/[09], loss: 5.8554, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:22:55 Iters: 365700/[09], loss: 5.7284, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:24:58 Iters: 365800/[09], loss: 5.4037, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:27:02 Iters: 365900/[09], loss: 4.7398, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:29:05 Iters: 366000/[09], loss: 5.6530, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:31:08 Iters: 366100/[09], loss: 5.6798, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:33:11 Iters: 366200/[09], loss: 5.5187, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:35:14 Iters: 366300/[09], loss: 4.9246, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:37:17 Iters: 366400/[09], loss: 5.6243, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:39:20 Iters: 366500/[09], loss: 5.2840, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:41:24 Iters: 366600/[09], loss: 6.2190, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:43:27 Iters: 366700/[09], loss: 5.6290, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:45:30 Iters: 366800/[09], loss: 6.0767, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:47:33 Iters: 366900/[09], loss: 5.1727, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:49:36 Iters: 367000/[09], loss: 5.8072, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:51:40 Iters: 367100/[09], loss: 5.9060, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:53:43 Iters: 367200/[09], loss: 5.8992, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:55:46 Iters: 367300/[09], loss: 5.6870, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:57:49 Iters: 367400/[09], loss: 5.7402, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-00:59:53 Iters: 367500/[09], loss: 4.9574, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:01:56 Iters: 367600/[09], loss: 5.3231, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:03:59 Iters: 367700/[09], loss: 5.4855, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:06:02 Iters: 367800/[09], loss: 5.4419, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:08:06 Iters: 367900/[09], loss: 5.0824, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:10:09 Iters: 368000/[09], loss: 5.6559, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:12:12 Iters: 368100/[09], loss: 5.4995, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:14:15 Iters: 368200/[09], loss: 5.1258, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:16:19 Iters: 368300/[09], loss: 5.2415, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:18:22 Iters: 368400/[09], loss: 5.4460, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:20:25 Iters: 368500/[09], loss: 5.8529, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:22:28 Iters: 368600/[09], loss: 5.1528, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:24:31 Iters: 368700/[09], loss: 6.3570, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:26:34 Iters: 368800/[09], loss: 5.2049, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:28:38 Iters: 368900/[09], loss: 5.4845, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:30:41 Iters: 369000/[09], loss: 5.5370, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:32:44 Iters: 369100/[09], loss: 5.9275, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:34:47 Iters: 369200/[09], loss: 5.4896, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:36:50 Iters: 369300/[09], loss: 5.3293, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:38:54 Iters: 369400/[09], loss: 5.2504, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:40:57 Iters: 369500/[09], loss: 5.4324, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:43:00 Iters: 369600/[09], loss: 4.8639, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:45:04 Iters: 369700/[09], loss: 5.8234, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:47:07 Iters: 369800/[09], loss: 5.0756, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:49:10 Iters: 369900/[09], loss: 6.0250, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:51:13 Iters: 370000/[09], loss: 4.9973, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-01:51:13 Saving checkpoint: 370000 -20220706-01:52:30 LFW Ave Accuracy: 99.4999 -20220706-01:53:45 AgeDB-30 Ave Accuracy: 96.2167 -20220706-01:55:11 CFP-FP Ave Accuracy: 93.6143 -20220706-01:55:11 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.2667 in iters: 330000 and CFP-FP: 93.6143 in iters: 370000 -20220706-01:57:13 Iters: 370100/[09], loss: 5.1610, train_accuracy: 0.3047, time: 3.60 s/iter, learning rate: 0.005000000000000001 -20220706-01:59:17 Iters: 370200/[09], loss: 5.6226, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:01:20 Iters: 370300/[09], loss: 5.7924, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:03:23 Iters: 370400/[09], loss: 5.2430, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:05:26 Iters: 370500/[09], loss: 5.0514, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:07:29 Iters: 370600/[09], loss: 5.1062, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:09:33 Iters: 370700/[09], loss: 5.5461, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:11:36 Iters: 370800/[09], loss: 6.0509, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:13:39 Iters: 370900/[09], loss: 5.2304, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:15:42 Iters: 371000/[09], loss: 4.9531, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:17:45 Iters: 371100/[09], loss: 5.3987, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:19:48 Iters: 371200/[09], loss: 6.4060, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:21:51 Iters: 371300/[09], loss: 5.7573, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:23:54 Iters: 371400/[09], loss: 5.5984, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:25:58 Iters: 371500/[09], loss: 6.2392, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:28:01 Iters: 371600/[09], loss: 5.2794, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:30:04 Iters: 371700/[09], loss: 5.4631, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:32:07 Iters: 371800/[09], loss: 5.4451, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:34:10 Iters: 371900/[09], loss: 5.1861, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:36:13 Iters: 372000/[09], loss: 4.9104, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:38:17 Iters: 372100/[09], loss: 5.6305, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:40:20 Iters: 372200/[09], loss: 4.8279, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:42:23 Iters: 372300/[09], loss: 6.0898, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:44:26 Iters: 372400/[09], loss: 5.6919, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:46:29 Iters: 372500/[09], loss: 4.6022, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:48:32 Iters: 372600/[09], loss: 5.0385, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:50:35 Iters: 372700/[09], loss: 5.4740, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:52:39 Iters: 372800/[09], loss: 5.0690, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:54:42 Iters: 372900/[09], loss: 5.3041, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:56:45 Iters: 373000/[09], loss: 5.7740, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-02:58:48 Iters: 373100/[09], loss: 5.5794, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:00:51 Iters: 373200/[09], loss: 5.4324, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:02:54 Iters: 373300/[09], loss: 6.0654, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:04:58 Iters: 373400/[09], loss: 5.9893, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:07:01 Iters: 373500/[09], loss: 6.5634, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:09:04 Iters: 373600/[09], loss: 5.7654, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:11:07 Iters: 373700/[09], loss: 5.2221, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:13:10 Iters: 373800/[09], loss: 5.0700, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:15:14 Iters: 373900/[09], loss: 5.4259, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:17:17 Iters: 374000/[09], loss: 5.3865, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:19:20 Iters: 374100/[09], loss: 5.4198, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:21:23 Iters: 374200/[09], loss: 5.5920, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:23:26 Iters: 374300/[09], loss: 5.3982, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:25:30 Iters: 374400/[09], loss: 4.9142, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:27:33 Iters: 374500/[09], loss: 5.5310, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:29:36 Iters: 374600/[09], loss: 5.3781, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:31:39 Iters: 374700/[09], loss: 5.1936, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:33:42 Iters: 374800/[09], loss: 5.5113, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:35:46 Iters: 374900/[09], loss: 5.6649, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:37:49 Iters: 375000/[09], loss: 5.3374, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:39:52 Iters: 375100/[09], loss: 5.1669, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:41:55 Iters: 375200/[09], loss: 5.6905, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:43:58 Iters: 375300/[09], loss: 5.4615, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:46:01 Iters: 375400/[09], loss: 5.1740, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:48:05 Iters: 375500/[09], loss: 5.0329, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:50:08 Iters: 375600/[09], loss: 5.3671, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:52:11 Iters: 375700/[09], loss: 5.2969, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:54:14 Iters: 375800/[09], loss: 5.2838, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:56:18 Iters: 375900/[09], loss: 4.9007, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-03:58:21 Iters: 376000/[09], loss: 5.0295, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:00:24 Iters: 376100/[09], loss: 6.2503, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:02:27 Iters: 376200/[09], loss: 6.0378, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:04:30 Iters: 376300/[09], loss: 5.1824, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:06:34 Iters: 376400/[09], loss: 4.5748, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:08:37 Iters: 376500/[09], loss: 5.3077, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:10:40 Iters: 376600/[09], loss: 4.4963, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:12:43 Iters: 376700/[09], loss: 5.4865, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:14:46 Iters: 376800/[09], loss: 5.7666, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:16:49 Iters: 376900/[09], loss: 5.4595, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:18:53 Iters: 377000/[09], loss: 5.4804, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:20:56 Iters: 377100/[09], loss: 5.9357, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:22:59 Iters: 377200/[09], loss: 5.9569, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:25:02 Iters: 377300/[09], loss: 5.3769, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:27:05 Iters: 377400/[09], loss: 5.9495, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:29:09 Iters: 377500/[09], loss: 5.7603, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:31:12 Iters: 377600/[09], loss: 5.6515, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:33:15 Iters: 377700/[09], loss: 5.0209, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:35:18 Iters: 377800/[09], loss: 5.4417, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:37:21 Iters: 377900/[09], loss: 5.0504, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:39:25 Iters: 378000/[09], loss: 5.4888, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:41:28 Iters: 378100/[09], loss: 5.4610, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:43:31 Iters: 378200/[09], loss: 5.6702, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:45:34 Iters: 378300/[09], loss: 6.1643, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:47:38 Iters: 378400/[09], loss: 5.5415, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:49:41 Iters: 378500/[09], loss: 5.2088, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:51:44 Iters: 378600/[09], loss: 6.1357, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:53:47 Iters: 378700/[09], loss: 5.6721, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:55:50 Iters: 378800/[09], loss: 5.5272, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:57:54 Iters: 378900/[09], loss: 5.1137, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-04:59:57 Iters: 379000/[09], loss: 5.8070, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:02:00 Iters: 379100/[09], loss: 5.9044, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:04:03 Iters: 379200/[09], loss: 5.9736, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:06:07 Iters: 379300/[09], loss: 5.3494, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:08:10 Iters: 379400/[09], loss: 5.2438, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:10:13 Iters: 379500/[09], loss: 5.3047, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:12:16 Iters: 379600/[09], loss: 5.8294, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:14:19 Iters: 379700/[09], loss: 4.7346, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:16:23 Iters: 379800/[09], loss: 6.0829, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:18:26 Iters: 379900/[09], loss: 5.0514, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:20:29 Iters: 380000/[09], loss: 5.5363, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:20:29 Saving checkpoint: 380000 -20220706-05:21:46 LFW Ave Accuracy: 99.5666 -20220706-05:23:00 AgeDB-30 Ave Accuracy: 95.8000 -20220706-05:24:27 CFP-FP Ave Accuracy: 92.8429 -20220706-05:24:27 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.2667 in iters: 330000 and CFP-FP: 93.6143 in iters: 370000 -20220706-05:26:29 Iters: 380100/[09], loss: 5.5461, train_accuracy: 0.2812, time: 3.60 s/iter, learning rate: 0.005000000000000001 -20220706-05:28:32 Iters: 380200/[09], loss: 5.3157, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:30:36 Iters: 380300/[09], loss: 6.1228, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:32:39 Iters: 380400/[09], loss: 4.7346, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:34:42 Iters: 380500/[09], loss: 5.1990, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:36:45 Iters: 380600/[09], loss: 6.2369, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:38:48 Iters: 380700/[09], loss: 5.5240, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:40:51 Iters: 380800/[09], loss: 5.9173, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:42:55 Iters: 380900/[09], loss: 5.7012, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:44:58 Iters: 381000/[09], loss: 4.6113, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:47:01 Iters: 381100/[09], loss: 5.0876, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:49:04 Iters: 381200/[09], loss: 5.8714, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:51:07 Iters: 381300/[09], loss: 5.0467, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:53:10 Iters: 381400/[09], loss: 5.9395, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:55:14 Iters: 381500/[09], loss: 5.7660, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:57:17 Iters: 381600/[09], loss: 5.9441, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-05:59:20 Iters: 381700/[09], loss: 5.8843, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:01:23 Iters: 381800/[09], loss: 5.6359, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:03:26 Iters: 381900/[09], loss: 5.6155, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:05:29 Iters: 382000/[09], loss: 5.7245, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:07:33 Iters: 382100/[09], loss: 5.7366, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:09:36 Iters: 382200/[09], loss: 5.0672, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:11:39 Iters: 382300/[09], loss: 6.6831, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:13:42 Iters: 382400/[09], loss: 5.3907, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:15:45 Iters: 382500/[09], loss: 5.6297, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:17:49 Iters: 382600/[09], loss: 4.9307, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:19:52 Iters: 382700/[09], loss: 6.3028, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:21:55 Iters: 382800/[09], loss: 5.5202, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:23:58 Iters: 382900/[09], loss: 5.4413, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:26:02 Iters: 383000/[09], loss: 5.2240, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:28:05 Iters: 383100/[09], loss: 5.8372, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:30:08 Iters: 383200/[09], loss: 5.3441, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:32:11 Iters: 383300/[09], loss: 5.2597, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:34:14 Iters: 383400/[09], loss: 4.7769, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:36:17 Iters: 383500/[09], loss: 4.7604, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:38:21 Iters: 383600/[09], loss: 5.0482, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:40:24 Iters: 383700/[09], loss: 5.7439, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:42:27 Iters: 383800/[09], loss: 5.6678, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:44:30 Iters: 383900/[09], loss: 4.9006, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:46:34 Iters: 384000/[09], loss: 5.6006, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:48:37 Iters: 384100/[09], loss: 6.0373, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:50:40 Iters: 384200/[09], loss: 5.7099, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:52:43 Iters: 384300/[09], loss: 6.0402, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:54:46 Iters: 384400/[09], loss: 5.6447, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:56:50 Iters: 384500/[09], loss: 5.8030, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-06:58:53 Iters: 384600/[09], loss: 6.5465, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:00:56 Iters: 384700/[09], loss: 5.1845, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:02:59 Iters: 384800/[09], loss: 5.9507, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:05:02 Iters: 384900/[09], loss: 6.4566, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:07:06 Iters: 385000/[09], loss: 5.3872, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:09:09 Iters: 385100/[09], loss: 6.3577, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:11:12 Iters: 385200/[09], loss: 5.9235, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:13:15 Iters: 385300/[09], loss: 6.1113, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:15:18 Iters: 385400/[09], loss: 5.7976, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:17:22 Iters: 385500/[09], loss: 5.7039, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:19:25 Iters: 385600/[09], loss: 4.9807, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:21:28 Iters: 385700/[09], loss: 5.5663, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:23:31 Iters: 385800/[09], loss: 6.0396, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:25:34 Iters: 385900/[09], loss: 5.1492, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:27:37 Iters: 386000/[09], loss: 5.1488, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:29:41 Iters: 386100/[09], loss: 5.8462, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:31:44 Iters: 386200/[09], loss: 5.8285, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:33:47 Iters: 386300/[09], loss: 5.2224, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:35:50 Iters: 386400/[09], loss: 5.5364, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:37:53 Iters: 386500/[09], loss: 5.4030, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:39:57 Iters: 386600/[09], loss: 5.5617, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:42:00 Iters: 386700/[09], loss: 4.4395, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:44:03 Iters: 386800/[09], loss: 4.8868, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:46:06 Iters: 386900/[09], loss: 5.2201, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:48:10 Iters: 387000/[09], loss: 5.4833, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:50:13 Iters: 387100/[09], loss: 5.3037, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:52:16 Iters: 387200/[09], loss: 5.2143, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:54:19 Iters: 387300/[09], loss: 4.9557, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:56:23 Iters: 387400/[09], loss: 6.0676, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-07:58:26 Iters: 387500/[09], loss: 5.4370, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:00:29 Iters: 387600/[09], loss: 5.2299, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:02:33 Iters: 387700/[09], loss: 6.3662, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:04:36 Iters: 387800/[09], loss: 4.9827, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:06:39 Iters: 387900/[09], loss: 5.2082, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:08:42 Iters: 388000/[09], loss: 6.0331, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:10:46 Iters: 388100/[09], loss: 4.9085, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:12:49 Iters: 388200/[09], loss: 6.2396, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:14:52 Iters: 388300/[09], loss: 6.0634, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:16:55 Iters: 388400/[09], loss: 4.4336, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:18:59 Iters: 388500/[09], loss: 4.7939, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:21:02 Iters: 388600/[09], loss: 5.6146, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:23:05 Iters: 388700/[09], loss: 5.5134, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:25:08 Iters: 388800/[09], loss: 5.6059, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:27:12 Iters: 388900/[09], loss: 4.6878, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:29:15 Iters: 389000/[09], loss: 5.0280, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:31:18 Iters: 389100/[09], loss: 5.5525, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:33:21 Iters: 389200/[09], loss: 5.6883, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:35:25 Iters: 389300/[09], loss: 4.7830, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:37:28 Iters: 389400/[09], loss: 5.3716, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:39:31 Iters: 389500/[09], loss: 5.9292, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:41:34 Iters: 389600/[09], loss: 5.9472, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:43:38 Iters: 389700/[09], loss: 5.1529, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:45:41 Iters: 389800/[09], loss: 5.6929, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:47:44 Iters: 389900/[09], loss: 5.6565, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:49:47 Iters: 390000/[09], loss: 5.8521, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:49:47 Saving checkpoint: 390000 -20220706-08:51:05 LFW Ave Accuracy: 99.5333 -20220706-08:52:21 AgeDB-30 Ave Accuracy: 96.2833 -20220706-08:53:49 CFP-FP Ave Accuracy: 93.3286 -20220706-08:53:49 Current Best Accuracy: LFW: 99.5833 in iters: 320000, AgeDB-30: 96.2833 in iters: 390000 and CFP-FP: 93.6143 in iters: 370000 -20220706-08:55:52 Iters: 390100/[09], loss: 5.3900, train_accuracy: 0.2656, time: 3.65 s/iter, learning rate: 0.005000000000000001 -20220706-08:57:55 Iters: 390200/[09], loss: 5.6028, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-08:59:58 Iters: 390300/[09], loss: 5.3489, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:02:01 Iters: 390400/[09], loss: 5.3955, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:04:05 Iters: 390500/[09], loss: 5.7464, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:06:08 Iters: 390600/[09], loss: 5.4202, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:08:11 Iters: 390700/[09], loss: 5.9472, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:10:14 Iters: 390800/[09], loss: 6.1688, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:12:18 Iters: 390900/[09], loss: 4.6467, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:14:21 Iters: 391000/[09], loss: 5.1382, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:16:24 Iters: 391100/[09], loss: 5.6043, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:18:27 Iters: 391200/[09], loss: 6.0890, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:20:30 Iters: 391300/[09], loss: 6.0159, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:22:34 Iters: 391400/[09], loss: 5.9748, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:24:37 Iters: 391500/[09], loss: 5.8086, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:26:40 Iters: 391600/[09], loss: 5.5393, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:28:43 Iters: 391700/[09], loss: 6.4114, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:30:46 Iters: 391800/[09], loss: 5.9544, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:32:49 Iters: 391900/[09], loss: 4.8079, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:34:53 Iters: 392000/[09], loss: 5.2847, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:36:56 Iters: 392100/[09], loss: 4.6861, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:38:59 Iters: 392200/[09], loss: 6.3373, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:41:02 Iters: 392300/[09], loss: 5.2092, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:43:05 Iters: 392400/[09], loss: 5.7775, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:45:08 Iters: 392500/[09], loss: 5.3382, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:47:12 Iters: 392600/[09], loss: 6.7480, train_accuracy: 0.1719, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:49:15 Iters: 392700/[09], loss: 5.4466, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:51:18 Iters: 392800/[09], loss: 5.0313, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:53:22 Iters: 392900/[09], loss: 5.9723, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:55:25 Iters: 393000/[09], loss: 5.0451, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:57:28 Iters: 393100/[09], loss: 4.9328, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-09:59:31 Iters: 393200/[09], loss: 5.8370, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:01:35 Iters: 393300/[09], loss: 5.8118, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:03:38 Iters: 393400/[09], loss: 5.3345, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:05:41 Iters: 393500/[09], loss: 5.6872, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:07:44 Iters: 393600/[09], loss: 5.6676, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:09:48 Iters: 393700/[09], loss: 4.8659, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:11:51 Iters: 393800/[09], loss: 5.4693, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:13:54 Iters: 393900/[09], loss: 5.6317, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:15:57 Iters: 394000/[09], loss: 5.5896, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:18:01 Iters: 394100/[09], loss: 5.7193, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:20:04 Iters: 394200/[09], loss: 5.2700, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:22:07 Iters: 394300/[09], loss: 5.6511, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:24:10 Iters: 394400/[09], loss: 5.3646, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:26:14 Iters: 394500/[09], loss: 5.4832, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:28:17 Iters: 394600/[09], loss: 5.4090, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:30:20 Iters: 394700/[09], loss: 5.5127, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:32:24 Iters: 394800/[09], loss: 5.4539, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:34:27 Iters: 394900/[09], loss: 6.2431, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:36:30 Iters: 395000/[09], loss: 4.9108, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:38:33 Iters: 395100/[09], loss: 5.6183, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:40:36 Iters: 395200/[09], loss: 6.2848, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:42:39 Iters: 395300/[09], loss: 6.5731, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:44:43 Iters: 395400/[09], loss: 4.8898, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:46:46 Iters: 395500/[09], loss: 5.1890, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:48:49 Iters: 395600/[09], loss: 6.0481, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:50:52 Iters: 395700/[09], loss: 5.6650, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:52:55 Iters: 395800/[09], loss: 5.0964, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:54:59 Iters: 395900/[09], loss: 5.4130, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:57:02 Iters: 396000/[09], loss: 5.9513, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-10:59:05 Iters: 396100/[09], loss: 4.9723, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:01:08 Iters: 396200/[09], loss: 5.3931, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:03:12 Iters: 396300/[09], loss: 5.3864, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:05:15 Iters: 396400/[09], loss: 6.4189, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:07:18 Iters: 396500/[09], loss: 6.3723, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:09:21 Iters: 396600/[09], loss: 5.9646, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:11:24 Iters: 396700/[09], loss: 4.7178, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:13:28 Iters: 396800/[09], loss: 5.6849, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:15:31 Iters: 396900/[09], loss: 5.3160, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:17:34 Iters: 397000/[09], loss: 5.4296, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:19:37 Iters: 397100/[09], loss: 5.5044, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:21:41 Iters: 397200/[09], loss: 5.7890, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:23:44 Iters: 397300/[09], loss: 5.6795, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:25:47 Iters: 397400/[09], loss: 5.5738, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:27:50 Iters: 397500/[09], loss: 5.5643, train_accuracy: 0.2891, time: 1.24 s/iter, learning rate: 0.005000000000000001 -20220706-11:29:54 Iters: 397600/[09], loss: 5.5629, train_accuracy: 0.3047, time: 1.24 s/iter, learning rate: 0.005000000000000001 -20220706-11:31:58 Iters: 397700/[09], loss: 5.5784, train_accuracy: 0.2734, time: 1.24 s/iter, learning rate: 0.005000000000000001 -20220706-11:34:01 Iters: 397800/[09], loss: 5.2347, train_accuracy: 0.2812, time: 1.24 s/iter, learning rate: 0.005000000000000001 -20220706-11:36:05 Iters: 397900/[09], loss: 5.4074, train_accuracy: 0.2500, time: 1.24 s/iter, learning rate: 0.005000000000000001 -20220706-11:38:08 Iters: 398000/[09], loss: 5.0177, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:40:12 Iters: 398100/[09], loss: 5.6123, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:42:15 Iters: 398200/[09], loss: 6.0247, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:44:18 Iters: 398300/[09], loss: 5.2890, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:46:21 Iters: 398400/[09], loss: 5.2626, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:48:25 Iters: 398500/[09], loss: 6.1808, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:50:28 Iters: 398600/[09], loss: 5.6915, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:52:31 Iters: 398700/[09], loss: 6.1187, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:54:34 Iters: 398800/[09], loss: 4.8438, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:56:37 Iters: 398900/[09], loss: 5.1666, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-11:58:41 Iters: 399000/[09], loss: 5.9980, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:00:44 Iters: 399100/[09], loss: 5.5076, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:02:47 Iters: 399200/[09], loss: 6.5213, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:04:51 Iters: 399300/[09], loss: 5.4668, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:06:54 Iters: 399400/[09], loss: 5.6875, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:08:57 Iters: 399500/[09], loss: 5.7727, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:11:00 Iters: 399600/[09], loss: 5.6393, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:13:03 Iters: 399700/[09], loss: 5.8751, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:15:07 Iters: 399800/[09], loss: 5.3753, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:17:10 Iters: 399900/[09], loss: 5.7983, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:19:13 Iters: 400000/[09], loss: 5.2553, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:19:13 Saving checkpoint: 400000 -20220706-12:20:30 LFW Ave Accuracy: 99.6000 -20220706-12:21:46 AgeDB-30 Ave Accuracy: 96.5000 -20220706-12:23:15 CFP-FP Ave Accuracy: 93.3000 -20220706-12:23:15 Current Best Accuracy: LFW: 99.6000 in iters: 400000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220706-12:25:17 Iters: 400100/[09], loss: 5.1502, train_accuracy: 0.3125, time: 3.65 s/iter, learning rate: 0.005000000000000001 -20220706-12:27:21 Iters: 400200/[09], loss: 5.5620, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:29:24 Iters: 400300/[09], loss: 5.2319, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:31:27 Iters: 400400/[09], loss: 5.4844, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:33:30 Iters: 400500/[09], loss: 4.8917, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:35:34 Iters: 400600/[09], loss: 5.5218, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:37:37 Iters: 400700/[09], loss: 5.9868, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:39:40 Iters: 400800/[09], loss: 5.8604, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:41:43 Iters: 400900/[09], loss: 6.0762, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:43:47 Iters: 401000/[09], loss: 5.7878, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:45:50 Iters: 401100/[09], loss: 5.5151, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:47:53 Iters: 401200/[09], loss: 5.8962, train_accuracy: 0.1953, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:49:57 Iters: 401300/[09], loss: 5.5792, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:52:00 Iters: 401400/[09], loss: 5.2775, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:54:03 Iters: 401500/[09], loss: 5.3536, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:56:06 Iters: 401600/[09], loss: 6.1007, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-12:58:10 Iters: 401700/[09], loss: 5.7473, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:00:13 Iters: 401800/[09], loss: 5.5658, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:02:16 Iters: 401900/[09], loss: 5.9901, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:04:19 Iters: 402000/[09], loss: 6.1024, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:06:23 Iters: 402100/[09], loss: 5.6988, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:08:26 Iters: 402200/[09], loss: 5.0130, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:10:29 Iters: 402300/[09], loss: 5.3796, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:12:32 Iters: 402400/[09], loss: 6.2265, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:14:36 Iters: 402500/[09], loss: 4.8971, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:16:39 Iters: 402600/[09], loss: 5.6254, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:18:42 Iters: 402700/[09], loss: 5.3604, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:20:46 Iters: 402800/[09], loss: 4.7901, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:22:49 Iters: 402900/[09], loss: 5.1113, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:24:52 Iters: 403000/[09], loss: 5.1681, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:26:56 Iters: 403100/[09], loss: 5.6791, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:28:59 Iters: 403200/[09], loss: 5.9430, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:31:02 Iters: 403300/[09], loss: 4.9324, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:33:06 Iters: 403400/[09], loss: 5.2111, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:35:09 Iters: 403500/[09], loss: 6.6720, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:37:12 Iters: 403600/[09], loss: 5.4041, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:39:15 Iters: 403700/[09], loss: 5.5886, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:41:19 Iters: 403800/[09], loss: 4.8532, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:43:22 Iters: 403900/[09], loss: 4.6402, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:45:25 Iters: 404000/[09], loss: 5.4390, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:47:28 Iters: 404100/[09], loss: 5.5043, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:49:31 Iters: 404200/[09], loss: 5.7077, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:51:35 Iters: 404300/[09], loss: 5.3110, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:53:38 Iters: 404400/[09], loss: 5.3880, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:55:41 Iters: 404500/[09], loss: 5.3982, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:57:44 Iters: 404600/[09], loss: 5.7917, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-13:59:48 Iters: 404700/[09], loss: 5.7315, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:01:51 Iters: 404800/[09], loss: 5.5307, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:03:54 Iters: 404900/[09], loss: 4.9972, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:05:57 Iters: 405000/[09], loss: 4.7727, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:08:01 Iters: 405100/[09], loss: 5.1617, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:10:04 Iters: 405200/[09], loss: 5.0877, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:12:07 Iters: 405300/[09], loss: 6.0646, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:14:10 Iters: 405400/[09], loss: 5.7159, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:16:14 Iters: 405500/[09], loss: 5.6758, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:18:17 Iters: 405600/[09], loss: 4.8455, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:20:20 Iters: 405700/[09], loss: 5.0198, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:22:23 Iters: 405800/[09], loss: 5.3100, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:24:27 Iters: 405900/[09], loss: 5.0689, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:26:30 Iters: 406000/[09], loss: 4.8936, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:28:33 Iters: 406100/[09], loss: 4.9192, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:30:37 Iters: 406200/[09], loss: 5.1534, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:32:40 Iters: 406300/[09], loss: 5.1381, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:34:43 Iters: 406400/[09], loss: 4.9971, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:36:46 Iters: 406500/[09], loss: 5.2562, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:38:50 Iters: 406600/[09], loss: 5.8395, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:40:53 Iters: 406700/[09], loss: 6.0199, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:42:56 Iters: 406800/[09], loss: 5.4056, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:44:59 Iters: 406900/[09], loss: 5.4879, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:47:03 Iters: 407000/[09], loss: 4.9335, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:49:06 Iters: 407100/[09], loss: 5.5787, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:51:09 Iters: 407200/[09], loss: 5.1057, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:53:12 Iters: 407300/[09], loss: 6.0883, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:55:16 Iters: 407400/[09], loss: 5.8218, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:57:19 Iters: 407500/[09], loss: 5.5388, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-14:59:22 Iters: 407600/[09], loss: 5.0292, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:01:26 Iters: 407700/[09], loss: 5.6816, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:03:29 Iters: 407800/[09], loss: 4.9271, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:05:32 Iters: 407900/[09], loss: 4.8813, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:07:35 Iters: 408000/[09], loss: 5.2784, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:09:39 Iters: 408100/[09], loss: 5.9947, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:11:42 Iters: 408200/[09], loss: 5.4926, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:13:45 Iters: 408300/[09], loss: 5.7092, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:15:48 Iters: 408400/[09], loss: 5.7083, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:17:52 Iters: 408500/[09], loss: 5.7913, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:19:55 Iters: 408600/[09], loss: 5.0983, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:21:58 Iters: 408700/[09], loss: 5.3800, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:24:01 Iters: 408800/[09], loss: 6.0694, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:26:05 Iters: 408900/[09], loss: 5.5017, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:28:08 Iters: 409000/[09], loss: 5.5765, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:30:11 Iters: 409100/[09], loss: 5.6460, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:32:14 Iters: 409200/[09], loss: 5.0200, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:34:18 Iters: 409300/[09], loss: 5.7850, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:36:21 Iters: 409400/[09], loss: 5.1829, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:36:32 Train Epoch: 10/18 ... -20220706-15:38:24 Iters: 409500/[10], loss: 5.0457, train_accuracy: 0.3594, time: 1.11 s/iter, learning rate: 0.005000000000000001 -20220706-15:40:27 Iters: 409600/[10], loss: 5.5892, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:42:30 Iters: 409700/[10], loss: 6.0127, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:44:34 Iters: 409800/[10], loss: 5.2521, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:46:37 Iters: 409900/[10], loss: 5.2989, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:48:40 Iters: 410000/[10], loss: 5.1748, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:48:40 Saving checkpoint: 410000 -20220706-15:49:56 LFW Ave Accuracy: 99.4499 -20220706-15:51:11 AgeDB-30 Ave Accuracy: 96.3833 -20220706-15:52:37 CFP-FP Ave Accuracy: 93.4714 -20220706-15:52:37 Current Best Accuracy: LFW: 99.6000 in iters: 400000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220706-15:54:39 Iters: 410100/[10], loss: 4.8597, train_accuracy: 0.4062, time: 3.59 s/iter, learning rate: 0.005000000000000001 -20220706-15:56:43 Iters: 410200/[10], loss: 5.3681, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-15:58:46 Iters: 410300/[10], loss: 4.8542, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:00:49 Iters: 410400/[10], loss: 5.9834, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:02:52 Iters: 410500/[10], loss: 5.6225, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:04:55 Iters: 410600/[10], loss: 5.4916, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:06:59 Iters: 410700/[10], loss: 4.3947, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:09:02 Iters: 410800/[10], loss: 5.3716, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:11:05 Iters: 410900/[10], loss: 5.7001, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:13:08 Iters: 411000/[10], loss: 5.7705, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:15:11 Iters: 411100/[10], loss: 5.3877, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:17:15 Iters: 411200/[10], loss: 5.6250, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:19:18 Iters: 411300/[10], loss: 5.7684, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:21:21 Iters: 411400/[10], loss: 4.6922, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:23:24 Iters: 411500/[10], loss: 5.5796, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:25:28 Iters: 411600/[10], loss: 5.5304, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:27:31 Iters: 411700/[10], loss: 5.9617, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:29:34 Iters: 411800/[10], loss: 5.6162, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:31:38 Iters: 411900/[10], loss: 6.2973, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:33:41 Iters: 412000/[10], loss: 5.4273, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:35:44 Iters: 412100/[10], loss: 4.6885, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:37:48 Iters: 412200/[10], loss: 5.8006, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:39:51 Iters: 412300/[10], loss: 5.2889, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:41:54 Iters: 412400/[10], loss: 4.9443, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:43:58 Iters: 412500/[10], loss: 5.2830, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:46:01 Iters: 412600/[10], loss: 5.8878, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:48:04 Iters: 412700/[10], loss: 5.6555, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:50:08 Iters: 412800/[10], loss: 5.4405, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:52:11 Iters: 412900/[10], loss: 5.0768, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:54:14 Iters: 413000/[10], loss: 5.6756, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:56:18 Iters: 413100/[10], loss: 5.4767, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-16:58:21 Iters: 413200/[10], loss: 4.9451, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:00:24 Iters: 413300/[10], loss: 5.5415, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:02:28 Iters: 413400/[10], loss: 4.8126, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:04:31 Iters: 413500/[10], loss: 4.8262, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:06:34 Iters: 413600/[10], loss: 5.2721, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:08:38 Iters: 413700/[10], loss: 5.3051, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:10:41 Iters: 413800/[10], loss: 5.5185, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:12:44 Iters: 413900/[10], loss: 4.9846, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:14:48 Iters: 414000/[10], loss: 5.3912, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:16:51 Iters: 414100/[10], loss: 4.9149, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:18:54 Iters: 414200/[10], loss: 5.8842, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:20:58 Iters: 414300/[10], loss: 4.9835, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:23:01 Iters: 414400/[10], loss: 5.5868, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:25:05 Iters: 414500/[10], loss: 5.7910, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:27:08 Iters: 414600/[10], loss: 6.1907, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:29:11 Iters: 414700/[10], loss: 5.8821, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:31:15 Iters: 414800/[10], loss: 5.0830, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:33:18 Iters: 414900/[10], loss: 5.8238, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:35:21 Iters: 415000/[10], loss: 5.4917, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:37:25 Iters: 415100/[10], loss: 5.5507, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:39:28 Iters: 415200/[10], loss: 5.7481, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:41:31 Iters: 415300/[10], loss: 5.7304, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:43:34 Iters: 415400/[10], loss: 5.4939, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:45:38 Iters: 415500/[10], loss: 6.2315, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:47:41 Iters: 415600/[10], loss: 5.5945, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:49:44 Iters: 415700/[10], loss: 5.1529, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:51:48 Iters: 415800/[10], loss: 5.6166, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:53:51 Iters: 415900/[10], loss: 4.8280, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:55:54 Iters: 416000/[10], loss: 6.3417, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-17:57:58 Iters: 416100/[10], loss: 5.4308, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:00:01 Iters: 416200/[10], loss: 5.5883, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:02:04 Iters: 416300/[10], loss: 5.1247, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:04:07 Iters: 416400/[10], loss: 5.4634, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:06:11 Iters: 416500/[10], loss: 5.7429, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:08:14 Iters: 416600/[10], loss: 5.3009, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:10:17 Iters: 416700/[10], loss: 5.3162, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:12:20 Iters: 416800/[10], loss: 5.3562, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:14:24 Iters: 416900/[10], loss: 5.5669, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:16:27 Iters: 417000/[10], loss: 5.6305, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:18:30 Iters: 417100/[10], loss: 5.5696, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:20:34 Iters: 417200/[10], loss: 5.3616, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:22:37 Iters: 417300/[10], loss: 5.1353, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:24:40 Iters: 417400/[10], loss: 5.2733, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:26:43 Iters: 417500/[10], loss: 5.9051, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:28:47 Iters: 417600/[10], loss: 5.3955, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:30:50 Iters: 417700/[10], loss: 4.8073, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:32:53 Iters: 417800/[10], loss: 4.8592, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:34:57 Iters: 417900/[10], loss: 5.7471, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:37:00 Iters: 418000/[10], loss: 5.3051, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:39:03 Iters: 418100/[10], loss: 6.0722, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:41:07 Iters: 418200/[10], loss: 5.1401, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:43:10 Iters: 418300/[10], loss: 4.5208, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:45:13 Iters: 418400/[10], loss: 5.3545, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:47:17 Iters: 418500/[10], loss: 5.2110, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:49:20 Iters: 418600/[10], loss: 4.8760, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:51:23 Iters: 418700/[10], loss: 5.7682, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:53:27 Iters: 418800/[10], loss: 4.4523, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:55:30 Iters: 418900/[10], loss: 5.4311, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:57:33 Iters: 419000/[10], loss: 5.0484, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-18:59:37 Iters: 419100/[10], loss: 6.1850, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:01:40 Iters: 419200/[10], loss: 5.2808, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:03:43 Iters: 419300/[10], loss: 5.2522, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:05:46 Iters: 419400/[10], loss: 4.9348, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:07:50 Iters: 419500/[10], loss: 5.6573, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:09:53 Iters: 419600/[10], loss: 4.9686, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:11:56 Iters: 419700/[10], loss: 5.8205, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:14:00 Iters: 419800/[10], loss: 6.6195, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:16:03 Iters: 419900/[10], loss: 5.0905, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:18:06 Iters: 420000/[10], loss: 4.8182, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:18:06 Saving checkpoint: 420000 -20220706-19:19:23 LFW Ave Accuracy: 99.5666 -20220706-19:20:38 AgeDB-30 Ave Accuracy: 96.4833 -20220706-19:22:04 CFP-FP Ave Accuracy: 93.4714 -20220706-19:22:04 Current Best Accuracy: LFW: 99.6000 in iters: 400000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220706-19:24:06 Iters: 420100/[10], loss: 5.6224, train_accuracy: 0.2734, time: 3.60 s/iter, learning rate: 0.005000000000000001 -20220706-19:26:09 Iters: 420200/[10], loss: 5.9759, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:28:13 Iters: 420300/[10], loss: 6.1261, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:30:16 Iters: 420400/[10], loss: 5.0073, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:32:19 Iters: 420500/[10], loss: 5.7784, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:34:22 Iters: 420600/[10], loss: 5.7498, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:36:25 Iters: 420700/[10], loss: 5.9473, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:38:29 Iters: 420800/[10], loss: 5.2711, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:40:32 Iters: 420900/[10], loss: 5.9991, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:42:35 Iters: 421000/[10], loss: 5.4583, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:44:38 Iters: 421100/[10], loss: 6.1255, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:46:41 Iters: 421200/[10], loss: 5.0115, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:48:45 Iters: 421300/[10], loss: 5.4572, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:50:48 Iters: 421400/[10], loss: 5.3579, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:52:51 Iters: 421500/[10], loss: 5.3984, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:54:54 Iters: 421600/[10], loss: 4.7527, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:56:57 Iters: 421700/[10], loss: 5.4051, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-19:59:01 Iters: 421800/[10], loss: 5.4448, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:01:04 Iters: 421900/[10], loss: 5.3415, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:03:07 Iters: 422000/[10], loss: 5.2869, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:05:11 Iters: 422100/[10], loss: 5.6934, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:07:14 Iters: 422200/[10], loss: 5.3704, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:09:17 Iters: 422300/[10], loss: 4.9547, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:11:20 Iters: 422400/[10], loss: 4.8532, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:13:24 Iters: 422500/[10], loss: 5.4365, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:15:27 Iters: 422600/[10], loss: 5.6926, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:17:30 Iters: 422700/[10], loss: 6.2134, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:19:33 Iters: 422800/[10], loss: 5.6684, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:21:37 Iters: 422900/[10], loss: 5.4027, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:23:40 Iters: 423000/[10], loss: 6.7495, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:25:43 Iters: 423100/[10], loss: 5.9770, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:27:47 Iters: 423200/[10], loss: 5.0763, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:29:50 Iters: 423300/[10], loss: 6.1921, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:31:53 Iters: 423400/[10], loss: 4.4677, train_accuracy: 0.4219, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:33:56 Iters: 423500/[10], loss: 5.2827, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:36:00 Iters: 423600/[10], loss: 5.5688, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:38:03 Iters: 423700/[10], loss: 5.0379, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:40:06 Iters: 423800/[10], loss: 5.2398, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:42:10 Iters: 423900/[10], loss: 5.9981, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:44:13 Iters: 424000/[10], loss: 5.3379, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:46:16 Iters: 424100/[10], loss: 5.6029, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:48:20 Iters: 424200/[10], loss: 6.0070, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:50:23 Iters: 424300/[10], loss: 6.0110, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:52:26 Iters: 424400/[10], loss: 5.5128, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:54:30 Iters: 424500/[10], loss: 6.1923, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:56:33 Iters: 424600/[10], loss: 5.2471, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-20:58:36 Iters: 424700/[10], loss: 5.5032, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:00:39 Iters: 424800/[10], loss: 5.5304, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:02:43 Iters: 424900/[10], loss: 5.9304, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:04:46 Iters: 425000/[10], loss: 5.1989, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:06:49 Iters: 425100/[10], loss: 5.7595, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:08:52 Iters: 425200/[10], loss: 6.1795, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:10:56 Iters: 425300/[10], loss: 5.0385, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:12:59 Iters: 425400/[10], loss: 6.1391, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:15:02 Iters: 425500/[10], loss: 4.5581, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:17:05 Iters: 425600/[10], loss: 5.5897, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:19:08 Iters: 425700/[10], loss: 4.6628, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:21:12 Iters: 425800/[10], loss: 5.0154, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:23:15 Iters: 425900/[10], loss: 4.7563, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:25:18 Iters: 426000/[10], loss: 4.6036, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:27:21 Iters: 426100/[10], loss: 6.5572, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:29:25 Iters: 426200/[10], loss: 6.0245, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:31:28 Iters: 426300/[10], loss: 5.6614, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:33:31 Iters: 426400/[10], loss: 5.6308, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:35:34 Iters: 426500/[10], loss: 5.8432, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:37:37 Iters: 426600/[10], loss: 5.0584, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:39:41 Iters: 426700/[10], loss: 6.0196, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:41:44 Iters: 426800/[10], loss: 5.3123, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:43:47 Iters: 426900/[10], loss: 5.5056, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:45:50 Iters: 427000/[10], loss: 5.7236, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:47:54 Iters: 427100/[10], loss: 4.4309, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:49:57 Iters: 427200/[10], loss: 5.4276, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:52:00 Iters: 427300/[10], loss: 5.4925, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:54:03 Iters: 427400/[10], loss: 5.5479, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:56:06 Iters: 427500/[10], loss: 5.3216, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-21:58:10 Iters: 427600/[10], loss: 5.4297, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:00:13 Iters: 427700/[10], loss: 5.5272, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:02:16 Iters: 427800/[10], loss: 5.6229, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:04:19 Iters: 427900/[10], loss: 4.8555, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:06:23 Iters: 428000/[10], loss: 4.8998, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:08:26 Iters: 428100/[10], loss: 6.0304, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:10:29 Iters: 428200/[10], loss: 5.5000, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:12:32 Iters: 428300/[10], loss: 4.4052, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:14:35 Iters: 428400/[10], loss: 4.8911, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:16:39 Iters: 428500/[10], loss: 5.1885, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:18:42 Iters: 428600/[10], loss: 6.9416, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:20:45 Iters: 428700/[10], loss: 6.3854, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:22:48 Iters: 428800/[10], loss: 5.3711, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:24:52 Iters: 428900/[10], loss: 5.1874, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:26:55 Iters: 429000/[10], loss: 5.6402, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:28:58 Iters: 429100/[10], loss: 6.0797, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:31:01 Iters: 429200/[10], loss: 5.1128, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:33:05 Iters: 429300/[10], loss: 6.0303, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:35:08 Iters: 429400/[10], loss: 5.0005, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:37:11 Iters: 429500/[10], loss: 5.1500, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:39:14 Iters: 429600/[10], loss: 5.8151, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:41:18 Iters: 429700/[10], loss: 5.3351, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:43:21 Iters: 429800/[10], loss: 5.9651, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:45:24 Iters: 429900/[10], loss: 5.5370, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:47:28 Iters: 430000/[10], loss: 5.7774, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:47:28 Saving checkpoint: 430000 -20220706-22:48:44 LFW Ave Accuracy: 99.5666 -20220706-22:49:59 AgeDB-30 Ave Accuracy: 96.2667 -20220706-22:51:26 CFP-FP Ave Accuracy: 93.3714 -20220706-22:51:26 Current Best Accuracy: LFW: 99.6000 in iters: 400000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220706-22:53:28 Iters: 430100/[10], loss: 5.9526, train_accuracy: 0.2500, time: 3.61 s/iter, learning rate: 0.005000000000000001 -20220706-22:55:32 Iters: 430200/[10], loss: 5.0146, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:57:35 Iters: 430300/[10], loss: 5.2617, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-22:59:38 Iters: 430400/[10], loss: 5.2432, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:01:41 Iters: 430500/[10], loss: 5.2612, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:03:45 Iters: 430600/[10], loss: 5.3561, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:05:48 Iters: 430700/[10], loss: 5.5446, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:07:51 Iters: 430800/[10], loss: 5.7679, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:09:54 Iters: 430900/[10], loss: 5.4063, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:11:58 Iters: 431000/[10], loss: 5.3731, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:14:01 Iters: 431100/[10], loss: 5.4812, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:16:04 Iters: 431200/[10], loss: 6.4585, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:18:07 Iters: 431300/[10], loss: 5.4412, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:20:10 Iters: 431400/[10], loss: 5.6861, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:22:14 Iters: 431500/[10], loss: 5.7636, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:24:17 Iters: 431600/[10], loss: 4.8788, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:26:20 Iters: 431700/[10], loss: 4.8694, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:28:23 Iters: 431800/[10], loss: 5.0738, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:30:27 Iters: 431900/[10], loss: 5.3883, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:32:30 Iters: 432000/[10], loss: 4.5440, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:34:33 Iters: 432100/[10], loss: 4.7267, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:36:37 Iters: 432200/[10], loss: 4.9080, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:38:40 Iters: 432300/[10], loss: 5.2654, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:40:43 Iters: 432400/[10], loss: 5.9735, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:42:46 Iters: 432500/[10], loss: 5.2543, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:44:50 Iters: 432600/[10], loss: 4.9036, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:46:53 Iters: 432700/[10], loss: 5.5385, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:48:56 Iters: 432800/[10], loss: 5.5562, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:50:59 Iters: 432900/[10], loss: 5.4575, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:53:03 Iters: 433000/[10], loss: 5.9738, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:55:06 Iters: 433100/[10], loss: 5.7692, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:57:09 Iters: 433200/[10], loss: 5.4205, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220706-23:59:12 Iters: 433300/[10], loss: 5.2734, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:01:16 Iters: 433400/[10], loss: 4.5723, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:03:19 Iters: 433500/[10], loss: 5.7110, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:05:22 Iters: 433600/[10], loss: 5.2973, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:07:25 Iters: 433700/[10], loss: 5.9982, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:09:29 Iters: 433800/[10], loss: 5.6202, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:11:32 Iters: 433900/[10], loss: 5.7328, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:13:35 Iters: 434000/[10], loss: 5.7594, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:15:38 Iters: 434100/[10], loss: 5.2584, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:17:41 Iters: 434200/[10], loss: 5.4536, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:19:45 Iters: 434300/[10], loss: 5.7136, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:21:48 Iters: 434400/[10], loss: 5.3277, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:23:51 Iters: 434500/[10], loss: 4.1458, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:25:54 Iters: 434600/[10], loss: 5.1500, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:27:58 Iters: 434700/[10], loss: 4.6567, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:30:01 Iters: 434800/[10], loss: 4.6666, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:32:04 Iters: 434900/[10], loss: 6.0473, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:34:07 Iters: 435000/[10], loss: 5.6752, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:36:11 Iters: 435100/[10], loss: 5.9353, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:38:14 Iters: 435200/[10], loss: 5.5168, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:40:17 Iters: 435300/[10], loss: 5.3897, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:42:20 Iters: 435400/[10], loss: 4.8080, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:44:24 Iters: 435500/[10], loss: 5.2355, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:46:27 Iters: 435600/[10], loss: 4.8099, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:48:30 Iters: 435700/[10], loss: 5.8653, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:50:33 Iters: 435800/[10], loss: 5.6802, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:52:37 Iters: 435900/[10], loss: 4.5188, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:54:40 Iters: 436000/[10], loss: 5.8713, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:56:43 Iters: 436100/[10], loss: 5.1176, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-00:58:46 Iters: 436200/[10], loss: 5.2141, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:00:50 Iters: 436300/[10], loss: 5.8085, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:02:53 Iters: 436400/[10], loss: 5.1207, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:04:56 Iters: 436500/[10], loss: 4.6079, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:06:59 Iters: 436600/[10], loss: 5.7074, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:09:02 Iters: 436700/[10], loss: 5.7936, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:11:06 Iters: 436800/[10], loss: 5.1693, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:13:09 Iters: 436900/[10], loss: 5.8711, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:15:12 Iters: 437000/[10], loss: 4.7768, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:17:15 Iters: 437100/[10], loss: 6.2540, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:19:19 Iters: 437200/[10], loss: 5.8083, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:21:22 Iters: 437300/[10], loss: 5.6117, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:23:25 Iters: 437400/[10], loss: 5.1115, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:25:28 Iters: 437500/[10], loss: 5.3578, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:27:31 Iters: 437600/[10], loss: 4.8013, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:29:35 Iters: 437700/[10], loss: 4.6142, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:31:38 Iters: 437800/[10], loss: 5.5412, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:33:41 Iters: 437900/[10], loss: 4.5387, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:35:44 Iters: 438000/[10], loss: 5.1610, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:37:47 Iters: 438100/[10], loss: 5.7838, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:39:51 Iters: 438200/[10], loss: 5.3226, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:41:54 Iters: 438300/[10], loss: 5.3137, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:43:57 Iters: 438400/[10], loss: 5.3009, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:46:00 Iters: 438500/[10], loss: 5.4739, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:48:04 Iters: 438600/[10], loss: 5.2809, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:50:07 Iters: 438700/[10], loss: 5.5804, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:52:10 Iters: 438800/[10], loss: 5.4815, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:54:13 Iters: 438900/[10], loss: 5.4328, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:56:17 Iters: 439000/[10], loss: 5.6760, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-01:58:20 Iters: 439100/[10], loss: 5.9833, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:00:23 Iters: 439200/[10], loss: 6.2226, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:02:27 Iters: 439300/[10], loss: 5.2247, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:04:30 Iters: 439400/[10], loss: 6.1390, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:06:33 Iters: 439500/[10], loss: 5.9796, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:08:36 Iters: 439600/[10], loss: 5.2972, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:10:40 Iters: 439700/[10], loss: 4.9527, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:12:43 Iters: 439800/[10], loss: 5.5583, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:14:46 Iters: 439900/[10], loss: 5.2502, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:16:49 Iters: 440000/[10], loss: 5.8688, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:16:49 Saving checkpoint: 440000 -20220707-02:18:08 LFW Ave Accuracy: 99.5666 -20220707-02:19:26 AgeDB-30 Ave Accuracy: 96.4833 -20220707-02:20:56 CFP-FP Ave Accuracy: 93.1286 -20220707-02:20:56 Current Best Accuracy: LFW: 99.6000 in iters: 400000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220707-02:22:58 Iters: 440100/[10], loss: 5.4992, train_accuracy: 0.3203, time: 3.69 s/iter, learning rate: 0.005000000000000001 -20220707-02:25:01 Iters: 440200/[10], loss: 5.4902, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:27:04 Iters: 440300/[10], loss: 5.5896, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:29:08 Iters: 440400/[10], loss: 5.0429, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:31:11 Iters: 440500/[10], loss: 5.6236, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:33:14 Iters: 440600/[10], loss: 5.7949, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:35:17 Iters: 440700/[10], loss: 4.9597, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:37:21 Iters: 440800/[10], loss: 5.5153, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:39:24 Iters: 440900/[10], loss: 5.0931, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:41:27 Iters: 441000/[10], loss: 5.6699, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:43:30 Iters: 441100/[10], loss: 5.1997, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:45:33 Iters: 441200/[10], loss: 5.6288, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:47:36 Iters: 441300/[10], loss: 5.8093, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:49:40 Iters: 441400/[10], loss: 5.3489, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:51:43 Iters: 441500/[10], loss: 6.1258, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:53:46 Iters: 441600/[10], loss: 5.4471, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:55:49 Iters: 441700/[10], loss: 5.4418, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:57:53 Iters: 441800/[10], loss: 5.0724, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-02:59:56 Iters: 441900/[10], loss: 5.2068, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:01:59 Iters: 442000/[10], loss: 5.5070, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:04:02 Iters: 442100/[10], loss: 5.3850, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:06:06 Iters: 442200/[10], loss: 5.5049, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:08:09 Iters: 442300/[10], loss: 5.8753, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:10:12 Iters: 442400/[10], loss: 5.6120, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:12:15 Iters: 442500/[10], loss: 5.7885, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:14:18 Iters: 442600/[10], loss: 5.0939, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:16:22 Iters: 442700/[10], loss: 5.4886, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:18:25 Iters: 442800/[10], loss: 5.4018, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:20:28 Iters: 442900/[10], loss: 5.1621, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:22:31 Iters: 443000/[10], loss: 5.9087, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:24:34 Iters: 443100/[10], loss: 5.8460, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:26:38 Iters: 443200/[10], loss: 5.8675, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:28:41 Iters: 443300/[10], loss: 5.6686, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:30:44 Iters: 443400/[10], loss: 5.4034, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:32:47 Iters: 443500/[10], loss: 5.7127, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:34:50 Iters: 443600/[10], loss: 5.5534, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:36:53 Iters: 443700/[10], loss: 5.5810, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:38:57 Iters: 443800/[10], loss: 5.8231, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:41:00 Iters: 443900/[10], loss: 5.7726, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:43:03 Iters: 444000/[10], loss: 5.1132, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:45:06 Iters: 444100/[10], loss: 4.8909, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:47:10 Iters: 444200/[10], loss: 5.4852, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:49:13 Iters: 444300/[10], loss: 5.7494, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:51:16 Iters: 444400/[10], loss: 5.8351, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:53:20 Iters: 444500/[10], loss: 5.0593, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:55:23 Iters: 444600/[10], loss: 5.6208, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:57:26 Iters: 444700/[10], loss: 5.7229, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-03:59:29 Iters: 444800/[10], loss: 5.5814, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:01:32 Iters: 444900/[10], loss: 5.2486, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:03:36 Iters: 445000/[10], loss: 5.7065, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:05:39 Iters: 445100/[10], loss: 5.9253, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:07:42 Iters: 445200/[10], loss: 6.2401, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:09:45 Iters: 445300/[10], loss: 4.8413, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:11:49 Iters: 445400/[10], loss: 5.6093, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:13:52 Iters: 445500/[10], loss: 5.5055, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:15:55 Iters: 445600/[10], loss: 5.8562, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:17:58 Iters: 445700/[10], loss: 5.4982, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:20:02 Iters: 445800/[10], loss: 5.9663, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:22:05 Iters: 445900/[10], loss: 5.7719, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:24:08 Iters: 446000/[10], loss: 5.9701, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:26:11 Iters: 446100/[10], loss: 5.2207, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:28:15 Iters: 446200/[10], loss: 5.6443, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:30:18 Iters: 446300/[10], loss: 4.8850, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:32:22 Iters: 446400/[10], loss: 5.3745, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:34:25 Iters: 446500/[10], loss: 6.3054, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:36:28 Iters: 446600/[10], loss: 4.9380, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:38:31 Iters: 446700/[10], loss: 5.4593, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:40:35 Iters: 446800/[10], loss: 6.1816, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:42:38 Iters: 446900/[10], loss: 5.1790, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:44:42 Iters: 447000/[10], loss: 5.9051, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:46:45 Iters: 447100/[10], loss: 5.8669, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:48:48 Iters: 447200/[10], loss: 5.5288, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:50:51 Iters: 447300/[10], loss: 4.8230, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:52:55 Iters: 447400/[10], loss: 4.9854, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:54:58 Iters: 447500/[10], loss: 5.3959, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:57:01 Iters: 447600/[10], loss: 5.5825, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-04:59:04 Iters: 447700/[10], loss: 4.9348, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:01:08 Iters: 447800/[10], loss: 5.4034, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:03:11 Iters: 447900/[10], loss: 5.6726, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:05:14 Iters: 448000/[10], loss: 5.7430, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:07:17 Iters: 448100/[10], loss: 5.8581, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:09:21 Iters: 448200/[10], loss: 5.8431, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:11:24 Iters: 448300/[10], loss: 5.1941, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:13:27 Iters: 448400/[10], loss: 5.7917, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:15:30 Iters: 448500/[10], loss: 5.4011, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:17:33 Iters: 448600/[10], loss: 5.8368, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:19:37 Iters: 448700/[10], loss: 4.5112, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:21:40 Iters: 448800/[10], loss: 5.5718, train_accuracy: 0.2031, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:23:43 Iters: 448900/[10], loss: 5.2615, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:25:46 Iters: 449000/[10], loss: 5.4319, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:27:49 Iters: 449100/[10], loss: 5.5263, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:29:53 Iters: 449200/[10], loss: 5.3030, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:31:56 Iters: 449300/[10], loss: 5.1550, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:33:59 Iters: 449400/[10], loss: 6.2430, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:36:02 Iters: 449500/[10], loss: 5.7197, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:38:06 Iters: 449600/[10], loss: 5.0727, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:40:09 Iters: 449700/[10], loss: 6.0503, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:42:12 Iters: 449800/[10], loss: 4.8430, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:44:15 Iters: 449900/[10], loss: 5.6025, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:46:19 Iters: 450000/[10], loss: 5.7591, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:46:19 Saving checkpoint: 450000 -20220707-05:47:35 LFW Ave Accuracy: 99.5666 -20220707-05:48:50 AgeDB-30 Ave Accuracy: 96.3000 -20220707-05:50:18 CFP-FP Ave Accuracy: 93.4143 -20220707-05:50:18 Current Best Accuracy: LFW: 99.6000 in iters: 400000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220707-05:52:20 Iters: 450100/[10], loss: 4.9451, train_accuracy: 0.2812, time: 3.61 s/iter, learning rate: 0.005000000000000001 -20220707-05:54:23 Iters: 450200/[10], loss: 5.0982, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:56:26 Iters: 450300/[10], loss: 5.3657, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-05:58:29 Iters: 450400/[10], loss: 5.1253, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:00:33 Iters: 450500/[10], loss: 5.6830, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:02:36 Iters: 450600/[10], loss: 5.0742, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:04:39 Iters: 450700/[10], loss: 5.6950, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:06:42 Iters: 450800/[10], loss: 5.7861, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:08:46 Iters: 450900/[10], loss: 5.1682, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:10:49 Iters: 451000/[10], loss: 5.3210, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:12:52 Iters: 451100/[10], loss: 4.6802, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:14:55 Iters: 451200/[10], loss: 5.2049, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:16:59 Iters: 451300/[10], loss: 5.5625, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:19:02 Iters: 451400/[10], loss: 4.9593, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:21:05 Iters: 451500/[10], loss: 5.4732, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:23:08 Iters: 451600/[10], loss: 5.6993, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:25:11 Iters: 451700/[10], loss: 5.5274, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:27:15 Iters: 451800/[10], loss: 4.9427, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:29:18 Iters: 451900/[10], loss: 5.3715, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:31:21 Iters: 452000/[10], loss: 5.2376, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:33:24 Iters: 452100/[10], loss: 5.6387, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:35:28 Iters: 452200/[10], loss: 5.1017, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:37:31 Iters: 452300/[10], loss: 5.9462, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:39:34 Iters: 452400/[10], loss: 5.9375, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:41:38 Iters: 452500/[10], loss: 6.6783, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:43:41 Iters: 452600/[10], loss: 5.2298, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:45:44 Iters: 452700/[10], loss: 5.6890, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:47:47 Iters: 452800/[10], loss: 5.6504, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:49:51 Iters: 452900/[10], loss: 5.2245, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:51:54 Iters: 453000/[10], loss: 5.5724, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:53:57 Iters: 453100/[10], loss: 5.8387, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:56:00 Iters: 453200/[10], loss: 6.0830, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-06:58:04 Iters: 453300/[10], loss: 5.3293, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:00:07 Iters: 453400/[10], loss: 5.6070, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:02:10 Iters: 453500/[10], loss: 4.9226, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:04:14 Iters: 453600/[10], loss: 5.5499, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:06:17 Iters: 453700/[10], loss: 5.5067, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:08:20 Iters: 453800/[10], loss: 5.8949, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:10:23 Iters: 453900/[10], loss: 5.3694, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:12:26 Iters: 454000/[10], loss: 5.6097, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:14:30 Iters: 454100/[10], loss: 5.6927, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:16:33 Iters: 454200/[10], loss: 5.3208, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:18:36 Iters: 454300/[10], loss: 5.7923, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:20:39 Iters: 454400/[10], loss: 4.8753, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:22:43 Iters: 454500/[10], loss: 4.9987, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:24:46 Iters: 454600/[10], loss: 4.8352, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:26:49 Iters: 454700/[10], loss: 5.2144, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:28:52 Iters: 454800/[10], loss: 4.8669, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:30:55 Iters: 454900/[10], loss: 5.1183, train_accuracy: 0.3934, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:30:55 Train Epoch: 11/18 ... -20220707-07:32:59 Iters: 455000/[11], loss: 4.8943, train_accuracy: 0.3906, time: 1.24 s/iter, learning rate: 0.005000000000000001 -20220707-07:35:02 Iters: 455100/[11], loss: 4.8602, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:37:06 Iters: 455200/[11], loss: 6.1110, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:39:09 Iters: 455300/[11], loss: 5.1687, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:41:12 Iters: 455400/[11], loss: 5.0898, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:43:15 Iters: 455500/[11], loss: 5.3607, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:45:19 Iters: 455600/[11], loss: 5.4724, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:47:22 Iters: 455700/[11], loss: 5.4680, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:49:25 Iters: 455800/[11], loss: 5.2900, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:51:28 Iters: 455900/[11], loss: 5.7596, train_accuracy: 0.2188, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:53:32 Iters: 456000/[11], loss: 5.6499, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:55:35 Iters: 456100/[11], loss: 4.9632, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:57:38 Iters: 456200/[11], loss: 4.9952, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-07:59:41 Iters: 456300/[11], loss: 4.8447, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:01:45 Iters: 456400/[11], loss: 4.6903, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:03:48 Iters: 456500/[11], loss: 5.1954, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:05:51 Iters: 456600/[11], loss: 4.8599, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:07:54 Iters: 456700/[11], loss: 5.1432, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:09:57 Iters: 456800/[11], loss: 5.2344, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:12:01 Iters: 456900/[11], loss: 5.5326, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:14:04 Iters: 457000/[11], loss: 5.7714, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:16:07 Iters: 457100/[11], loss: 5.5870, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:18:10 Iters: 457200/[11], loss: 5.1863, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:20:13 Iters: 457300/[11], loss: 5.2186, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:22:17 Iters: 457400/[11], loss: 5.8401, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:24:20 Iters: 457500/[11], loss: 5.3295, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:26:23 Iters: 457600/[11], loss: 5.5744, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:28:26 Iters: 457700/[11], loss: 5.1260, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:30:30 Iters: 457800/[11], loss: 5.1611, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:32:33 Iters: 457900/[11], loss: 5.1791, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:34:36 Iters: 458000/[11], loss: 4.9432, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:36:39 Iters: 458100/[11], loss: 5.7908, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:38:42 Iters: 458200/[11], loss: 5.5581, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:40:46 Iters: 458300/[11], loss: 5.1261, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:42:49 Iters: 458400/[11], loss: 4.4755, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:44:52 Iters: 458500/[11], loss: 5.2684, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:46:55 Iters: 458600/[11], loss: 5.3747, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:48:58 Iters: 458700/[11], loss: 5.9042, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:51:02 Iters: 458800/[11], loss: 5.7349, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:53:05 Iters: 458900/[11], loss: 5.1520, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:55:08 Iters: 459000/[11], loss: 4.8220, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:57:11 Iters: 459100/[11], loss: 5.0201, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-08:59:15 Iters: 459200/[11], loss: 5.2108, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:01:18 Iters: 459300/[11], loss: 5.3132, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:03:21 Iters: 459400/[11], loss: 5.3184, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:05:24 Iters: 459500/[11], loss: 5.1621, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:07:28 Iters: 459600/[11], loss: 5.6613, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:09:31 Iters: 459700/[11], loss: 5.3161, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:11:34 Iters: 459800/[11], loss: 5.2968, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:13:38 Iters: 459900/[11], loss: 5.6364, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:15:41 Iters: 460000/[11], loss: 5.3161, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:15:41 Saving checkpoint: 460000 -20220707-09:16:57 LFW Ave Accuracy: 99.6333 -20220707-09:18:12 AgeDB-30 Ave Accuracy: 96.4667 -20220707-09:19:39 CFP-FP Ave Accuracy: 93.6000 -20220707-09:19:39 Current Best Accuracy: LFW: 99.6333 in iters: 460000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220707-09:21:42 Iters: 460100/[11], loss: 5.3516, train_accuracy: 0.3125, time: 3.61 s/iter, learning rate: 0.005000000000000001 -20220707-09:23:45 Iters: 460200/[11], loss: 4.6994, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:25:48 Iters: 460300/[11], loss: 5.5972, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:27:51 Iters: 460400/[11], loss: 5.5603, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:29:55 Iters: 460500/[11], loss: 6.2794, train_accuracy: 0.1875, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:31:58 Iters: 460600/[11], loss: 5.2358, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:34:01 Iters: 460700/[11], loss: 4.4456, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:36:04 Iters: 460800/[11], loss: 6.2862, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:38:08 Iters: 460900/[11], loss: 5.3493, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:40:11 Iters: 461000/[11], loss: 5.2785, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:42:14 Iters: 461100/[11], loss: 5.2321, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:44:17 Iters: 461200/[11], loss: 4.9977, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:46:21 Iters: 461300/[11], loss: 5.4082, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:48:24 Iters: 461400/[11], loss: 5.2672, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:50:27 Iters: 461500/[11], loss: 4.4671, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:52:30 Iters: 461600/[11], loss: 5.2942, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:54:34 Iters: 461700/[11], loss: 6.0276, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:56:37 Iters: 461800/[11], loss: 5.4965, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-09:58:40 Iters: 461900/[11], loss: 5.0820, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:00:43 Iters: 462000/[11], loss: 5.7497, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:02:47 Iters: 462100/[11], loss: 5.3595, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:04:50 Iters: 462200/[11], loss: 5.3767, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:06:53 Iters: 462300/[11], loss: 4.9695, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:08:57 Iters: 462400/[11], loss: 5.0226, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:11:00 Iters: 462500/[11], loss: 5.8295, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:13:03 Iters: 462600/[11], loss: 4.5937, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:15:06 Iters: 462700/[11], loss: 5.5761, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:17:10 Iters: 462800/[11], loss: 5.2412, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:19:13 Iters: 462900/[11], loss: 5.8190, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:21:16 Iters: 463000/[11], loss: 4.9520, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:23:20 Iters: 463100/[11], loss: 5.6048, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:25:23 Iters: 463200/[11], loss: 5.3365, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:27:26 Iters: 463300/[11], loss: 4.9820, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:29:29 Iters: 463400/[11], loss: 5.3829, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:31:33 Iters: 463500/[11], loss: 5.7681, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:33:36 Iters: 463600/[11], loss: 5.5300, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:35:39 Iters: 463700/[11], loss: 6.5945, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:37:42 Iters: 463800/[11], loss: 6.0673, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:39:45 Iters: 463900/[11], loss: 5.2466, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:41:49 Iters: 464000/[11], loss: 5.4698, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:43:52 Iters: 464100/[11], loss: 5.7191, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:45:55 Iters: 464200/[11], loss: 5.0310, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:47:58 Iters: 464300/[11], loss: 5.2304, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:50:02 Iters: 464400/[11], loss: 6.0047, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:52:05 Iters: 464500/[11], loss: 5.5150, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:54:08 Iters: 464600/[11], loss: 5.2616, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:56:11 Iters: 464700/[11], loss: 5.4305, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-10:58:14 Iters: 464800/[11], loss: 5.1744, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:00:18 Iters: 464900/[11], loss: 4.7236, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:02:21 Iters: 465000/[11], loss: 5.7522, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:04:24 Iters: 465100/[11], loss: 5.4309, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:06:28 Iters: 465200/[11], loss: 5.4794, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:08:31 Iters: 465300/[11], loss: 5.4277, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:10:34 Iters: 465400/[11], loss: 5.1723, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:12:37 Iters: 465500/[11], loss: 4.5351, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:14:41 Iters: 465600/[11], loss: 6.2361, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:16:44 Iters: 465700/[11], loss: 5.3375, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:18:47 Iters: 465800/[11], loss: 6.0938, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:20:50 Iters: 465900/[11], loss: 5.5825, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:22:54 Iters: 466000/[11], loss: 5.6493, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:24:57 Iters: 466100/[11], loss: 5.0642, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:27:00 Iters: 466200/[11], loss: 6.5985, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:29:04 Iters: 466300/[11], loss: 4.9649, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:31:07 Iters: 466400/[11], loss: 4.5714, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:33:10 Iters: 466500/[11], loss: 4.9483, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:35:13 Iters: 466600/[11], loss: 4.9600, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:37:17 Iters: 466700/[11], loss: 4.6970, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:39:20 Iters: 466800/[11], loss: 4.8109, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:41:23 Iters: 466900/[11], loss: 5.6416, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:43:26 Iters: 467000/[11], loss: 5.1572, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:45:30 Iters: 467100/[11], loss: 5.8610, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:47:33 Iters: 467200/[11], loss: 5.1774, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:49:36 Iters: 467300/[11], loss: 5.4189, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:51:39 Iters: 467400/[11], loss: 6.2421, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:53:42 Iters: 467500/[11], loss: 5.3057, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:55:46 Iters: 467600/[11], loss: 5.3559, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:57:49 Iters: 467700/[11], loss: 5.8391, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-11:59:52 Iters: 467800/[11], loss: 5.5148, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:01:55 Iters: 467900/[11], loss: 5.6439, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:03:59 Iters: 468000/[11], loss: 5.2336, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:06:02 Iters: 468100/[11], loss: 5.2595, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:08:05 Iters: 468200/[11], loss: 5.6445, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:10:08 Iters: 468300/[11], loss: 5.3145, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:12:12 Iters: 468400/[11], loss: 5.5857, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:14:15 Iters: 468500/[11], loss: 5.5464, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:16:18 Iters: 468600/[11], loss: 5.2214, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:18:21 Iters: 468700/[11], loss: 5.3316, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:20:24 Iters: 468800/[11], loss: 5.3989, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:22:28 Iters: 468900/[11], loss: 5.1674, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:24:31 Iters: 469000/[11], loss: 5.0217, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:26:34 Iters: 469100/[11], loss: 5.8476, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:28:37 Iters: 469200/[11], loss: 4.8248, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:30:41 Iters: 469300/[11], loss: 5.8135, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:32:44 Iters: 469400/[11], loss: 5.6216, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:34:47 Iters: 469500/[11], loss: 5.5950, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:36:50 Iters: 469600/[11], loss: 5.9078, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:38:54 Iters: 469700/[11], loss: 6.3896, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:40:57 Iters: 469800/[11], loss: 5.3401, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:43:00 Iters: 469900/[11], loss: 5.4261, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:45:03 Iters: 470000/[11], loss: 4.9032, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:45:03 Saving checkpoint: 470000 -20220707-12:46:21 LFW Ave Accuracy: 99.5833 -20220707-12:47:36 AgeDB-30 Ave Accuracy: 96.2500 -20220707-12:49:02 CFP-FP Ave Accuracy: 93.3571 -20220707-12:49:02 Current Best Accuracy: LFW: 99.6333 in iters: 460000, AgeDB-30: 96.5000 in iters: 400000 and CFP-FP: 93.6143 in iters: 370000 -20220707-12:51:05 Iters: 470100/[11], loss: 5.2432, train_accuracy: 0.3359, time: 3.62 s/iter, learning rate: 0.005000000000000001 -20220707-12:53:08 Iters: 470200/[11], loss: 6.0191, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:55:11 Iters: 470300/[11], loss: 5.3209, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:57:14 Iters: 470400/[11], loss: 5.2475, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-12:59:18 Iters: 470500/[11], loss: 5.0704, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:01:21 Iters: 470600/[11], loss: 5.9693, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:03:24 Iters: 470700/[11], loss: 4.9861, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:05:27 Iters: 470800/[11], loss: 4.5116, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:07:30 Iters: 470900/[11], loss: 4.8953, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:09:34 Iters: 471000/[11], loss: 5.7794, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:11:37 Iters: 471100/[11], loss: 4.8736, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:13:40 Iters: 471200/[11], loss: 6.3340, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:15:43 Iters: 471300/[11], loss: 5.4169, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:17:46 Iters: 471400/[11], loss: 5.6612, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:19:50 Iters: 471500/[11], loss: 6.2199, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:21:53 Iters: 471600/[11], loss: 5.6405, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:23:56 Iters: 471700/[11], loss: 5.7912, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:25:59 Iters: 471800/[11], loss: 5.3992, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:28:03 Iters: 471900/[11], loss: 5.6762, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:30:06 Iters: 472000/[11], loss: 5.8175, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:32:09 Iters: 472100/[11], loss: 6.4511, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:34:12 Iters: 472200/[11], loss: 5.4579, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:36:16 Iters: 472300/[11], loss: 5.1851, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:38:19 Iters: 472400/[11], loss: 5.8080, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:40:22 Iters: 472500/[11], loss: 5.4829, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:42:25 Iters: 472600/[11], loss: 5.3643, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:44:29 Iters: 472700/[11], loss: 5.9461, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:46:32 Iters: 472800/[11], loss: 4.7108, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:48:35 Iters: 472900/[11], loss: 5.7660, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:50:39 Iters: 473000/[11], loss: 5.5376, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:52:42 Iters: 473100/[11], loss: 5.2145, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:54:45 Iters: 473200/[11], loss: 5.7302, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:56:48 Iters: 473300/[11], loss: 5.0993, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-13:58:52 Iters: 473400/[11], loss: 5.5379, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:00:55 Iters: 473500/[11], loss: 5.0441, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:02:58 Iters: 473600/[11], loss: 6.1266, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:05:01 Iters: 473700/[11], loss: 4.7934, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:07:05 Iters: 473800/[11], loss: 4.4065, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:09:08 Iters: 473900/[11], loss: 5.6597, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:11:11 Iters: 474000/[11], loss: 5.1931, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:13:14 Iters: 474100/[11], loss: 5.9618, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:15:18 Iters: 474200/[11], loss: 4.9627, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:17:21 Iters: 474300/[11], loss: 6.3885, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:19:24 Iters: 474400/[11], loss: 4.9283, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:21:27 Iters: 474500/[11], loss: 6.0584, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:23:30 Iters: 474600/[11], loss: 5.0195, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:25:34 Iters: 474700/[11], loss: 5.2718, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:27:37 Iters: 474800/[11], loss: 5.7276, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:29:40 Iters: 474900/[11], loss: 5.3885, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:31:44 Iters: 475000/[11], loss: 4.9780, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:33:47 Iters: 475100/[11], loss: 4.9106, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:35:50 Iters: 475200/[11], loss: 4.6521, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:37:54 Iters: 475300/[11], loss: 5.5402, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:39:57 Iters: 475400/[11], loss: 4.8665, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:42:00 Iters: 475500/[11], loss: 5.0319, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:44:03 Iters: 475600/[11], loss: 4.3321, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:46:06 Iters: 475700/[11], loss: 5.3769, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:48:10 Iters: 475800/[11], loss: 4.9730, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:50:13 Iters: 475900/[11], loss: 5.7272, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:52:16 Iters: 476000/[11], loss: 5.8109, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:54:20 Iters: 476100/[11], loss: 5.5683, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:56:23 Iters: 476200/[11], loss: 4.9717, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-14:58:26 Iters: 476300/[11], loss: 5.1953, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:00:29 Iters: 476400/[11], loss: 6.0561, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:02:33 Iters: 476500/[11], loss: 5.9453, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:04:36 Iters: 476600/[11], loss: 4.9705, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:06:39 Iters: 476700/[11], loss: 5.4648, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:08:43 Iters: 476800/[11], loss: 5.4359, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:10:46 Iters: 476900/[11], loss: 5.4936, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:12:49 Iters: 477000/[11], loss: 5.2615, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:14:52 Iters: 477100/[11], loss: 5.5160, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:16:55 Iters: 477200/[11], loss: 5.5415, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:18:59 Iters: 477300/[11], loss: 5.2350, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:21:02 Iters: 477400/[11], loss: 5.3306, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:23:05 Iters: 477500/[11], loss: 5.0943, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:25:09 Iters: 477600/[11], loss: 4.6953, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:27:12 Iters: 477700/[11], loss: 5.8385, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:29:15 Iters: 477800/[11], loss: 5.8648, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:31:18 Iters: 477900/[11], loss: 5.5083, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:33:22 Iters: 478000/[11], loss: 5.6888, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:35:25 Iters: 478100/[11], loss: 4.5802, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:37:28 Iters: 478200/[11], loss: 4.8672, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:39:32 Iters: 478300/[11], loss: 5.5394, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:41:35 Iters: 478400/[11], loss: 6.0360, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:43:38 Iters: 478500/[11], loss: 5.6109, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:45:41 Iters: 478600/[11], loss: 5.5452, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:47:45 Iters: 478700/[11], loss: 5.8498, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:49:48 Iters: 478800/[11], loss: 5.9171, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:51:51 Iters: 478900/[11], loss: 5.5267, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:53:54 Iters: 479000/[11], loss: 4.4589, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:55:58 Iters: 479100/[11], loss: 5.1116, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-15:58:01 Iters: 479200/[11], loss: 5.3324, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:00:05 Iters: 479300/[11], loss: 5.7531, train_accuracy: 0.2969, time: 1.24 s/iter, learning rate: 0.005000000000000001 -20220707-16:02:08 Iters: 479400/[11], loss: 5.1110, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:04:11 Iters: 479500/[11], loss: 5.3546, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:06:14 Iters: 479600/[11], loss: 5.2720, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:08:18 Iters: 479700/[11], loss: 5.6845, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:10:21 Iters: 479800/[11], loss: 5.1142, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:12:24 Iters: 479900/[11], loss: 5.1523, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:14:28 Iters: 480000/[11], loss: 6.0421, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:14:28 Saving checkpoint: 480000 -20220707-16:15:45 LFW Ave Accuracy: 99.5166 -20220707-16:17:03 AgeDB-30 Ave Accuracy: 96.5333 -20220707-16:18:33 CFP-FP Ave Accuracy: 93.2857 -20220707-16:18:33 Current Best Accuracy: LFW: 99.6333 in iters: 460000, AgeDB-30: 96.5333 in iters: 480000 and CFP-FP: 93.6143 in iters: 370000 -20220707-16:20:35 Iters: 480100/[11], loss: 5.5761, train_accuracy: 0.2344, time: 3.67 s/iter, learning rate: 0.005000000000000001 -20220707-16:22:38 Iters: 480200/[11], loss: 5.4200, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:24:41 Iters: 480300/[11], loss: 5.1173, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:26:44 Iters: 480400/[11], loss: 5.3901, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:28:48 Iters: 480500/[11], loss: 5.3082, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:30:51 Iters: 480600/[11], loss: 5.9640, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:32:54 Iters: 480700/[11], loss: 5.3075, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:34:57 Iters: 480800/[11], loss: 5.1820, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:37:00 Iters: 480900/[11], loss: 5.0724, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:39:03 Iters: 481000/[11], loss: 5.5157, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:41:07 Iters: 481100/[11], loss: 5.5073, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:43:10 Iters: 481200/[11], loss: 5.8124, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:45:13 Iters: 481300/[11], loss: 4.8397, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:47:16 Iters: 481400/[11], loss: 5.6182, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:49:19 Iters: 481500/[11], loss: 4.9952, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:51:22 Iters: 481600/[11], loss: 5.4337, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:53:26 Iters: 481700/[11], loss: 5.2577, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:55:29 Iters: 481800/[11], loss: 4.4975, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:57:32 Iters: 481900/[11], loss: 5.6530, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-16:59:35 Iters: 482000/[11], loss: 5.2414, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:01:38 Iters: 482100/[11], loss: 4.7965, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:03:42 Iters: 482200/[11], loss: 4.8506, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:05:45 Iters: 482300/[11], loss: 5.0948, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:07:48 Iters: 482400/[11], loss: 5.9171, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:09:51 Iters: 482500/[11], loss: 5.2924, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:11:55 Iters: 482600/[11], loss: 5.0347, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:13:58 Iters: 482700/[11], loss: 5.1253, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:16:01 Iters: 482800/[11], loss: 5.8462, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:18:04 Iters: 482900/[11], loss: 5.6839, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:20:08 Iters: 483000/[11], loss: 4.4749, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:22:11 Iters: 483100/[11], loss: 5.3242, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:24:14 Iters: 483200/[11], loss: 5.3742, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:26:17 Iters: 483300/[11], loss: 5.0976, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:28:21 Iters: 483400/[11], loss: 5.2949, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:30:24 Iters: 483500/[11], loss: 5.6919, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:32:27 Iters: 483600/[11], loss: 5.4947, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:34:30 Iters: 483700/[11], loss: 5.2241, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:36:33 Iters: 483800/[11], loss: 4.7543, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:38:37 Iters: 483900/[11], loss: 5.7311, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:40:40 Iters: 484000/[11], loss: 5.9675, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:42:43 Iters: 484100/[11], loss: 5.6391, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:44:46 Iters: 484200/[11], loss: 6.2707, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:46:49 Iters: 484300/[11], loss: 5.3145, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:48:52 Iters: 484400/[11], loss: 5.4259, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:50:55 Iters: 484500/[11], loss: 6.3953, train_accuracy: 0.1641, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:52:58 Iters: 484600/[11], loss: 5.1580, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:55:01 Iters: 484700/[11], loss: 4.8291, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:57:05 Iters: 484800/[11], loss: 5.2974, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-17:59:08 Iters: 484900/[11], loss: 6.1730, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:01:11 Iters: 485000/[11], loss: 5.1193, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:03:14 Iters: 485100/[11], loss: 5.0800, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:05:17 Iters: 485200/[11], loss: 4.9438, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:07:20 Iters: 485300/[11], loss: 6.3331, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:09:23 Iters: 485400/[11], loss: 5.4074, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:11:26 Iters: 485500/[11], loss: 6.1077, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:13:30 Iters: 485600/[11], loss: 5.5098, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:15:33 Iters: 485700/[11], loss: 5.5955, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:17:36 Iters: 485800/[11], loss: 5.5800, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:19:39 Iters: 485900/[11], loss: 5.2412, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:21:42 Iters: 486000/[11], loss: 5.0293, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:23:45 Iters: 486100/[11], loss: 4.9545, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:25:48 Iters: 486200/[11], loss: 6.0748, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:27:51 Iters: 486300/[11], loss: 5.7684, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:29:54 Iters: 486400/[11], loss: 4.6853, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:31:57 Iters: 486500/[11], loss: 5.1734, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:34:00 Iters: 486600/[11], loss: 5.9474, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:36:03 Iters: 486700/[11], loss: 5.1875, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:38:06 Iters: 486800/[11], loss: 5.0706, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:40:09 Iters: 486900/[11], loss: 4.7856, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:42:12 Iters: 487000/[11], loss: 5.8904, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:44:15 Iters: 487100/[11], loss: 5.2772, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:46:18 Iters: 487200/[11], loss: 5.7156, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:48:21 Iters: 487300/[11], loss: 5.9883, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:50:24 Iters: 487400/[11], loss: 5.6225, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:52:27 Iters: 487500/[11], loss: 4.7747, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:54:30 Iters: 487600/[11], loss: 5.6486, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:56:33 Iters: 487700/[11], loss: 5.2492, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-18:58:36 Iters: 487800/[11], loss: 5.1079, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:00:39 Iters: 487900/[11], loss: 6.4975, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:02:42 Iters: 488000/[11], loss: 5.2556, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:04:45 Iters: 488100/[11], loss: 5.8234, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:06:48 Iters: 488200/[11], loss: 5.2923, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:08:51 Iters: 488300/[11], loss: 5.0636, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:10:54 Iters: 488400/[11], loss: 5.5363, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:12:57 Iters: 488500/[11], loss: 5.8637, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:15:00 Iters: 488600/[11], loss: 4.9333, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:17:03 Iters: 488700/[11], loss: 5.3279, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:19:06 Iters: 488800/[11], loss: 4.9237, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:21:09 Iters: 488900/[11], loss: 5.0516, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:23:12 Iters: 489000/[11], loss: 5.1873, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:25:15 Iters: 489100/[11], loss: 5.5486, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:27:18 Iters: 489200/[11], loss: 5.1923, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:29:21 Iters: 489300/[11], loss: 5.3006, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:31:24 Iters: 489400/[11], loss: 5.0884, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:33:27 Iters: 489500/[11], loss: 5.5073, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:35:30 Iters: 489600/[11], loss: 5.1648, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:37:34 Iters: 489700/[11], loss: 5.2416, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:39:37 Iters: 489800/[11], loss: 5.1651, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:41:40 Iters: 489900/[11], loss: 5.5772, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:43:43 Iters: 490000/[11], loss: 4.5694, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:43:43 Saving checkpoint: 490000 -20220707-19:45:00 LFW Ave Accuracy: 99.5165 -20220707-19:46:17 AgeDB-30 Ave Accuracy: 96.5167 -20220707-19:47:46 CFP-FP Ave Accuracy: 93.5571 -20220707-19:47:46 Current Best Accuracy: LFW: 99.6333 in iters: 460000, AgeDB-30: 96.5333 in iters: 480000 and CFP-FP: 93.6143 in iters: 370000 -20220707-19:49:48 Iters: 490100/[11], loss: 5.0954, train_accuracy: 0.2969, time: 3.65 s/iter, learning rate: 0.005000000000000001 -20220707-19:51:51 Iters: 490200/[11], loss: 5.4295, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:53:54 Iters: 490300/[11], loss: 5.9747, train_accuracy: 0.2422, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:55:57 Iters: 490400/[11], loss: 5.8303, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-19:58:00 Iters: 490500/[11], loss: 5.4452, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:00:03 Iters: 490600/[11], loss: 5.1889, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:02:06 Iters: 490700/[11], loss: 5.4822, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:04:09 Iters: 490800/[11], loss: 6.0764, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:06:12 Iters: 490900/[11], loss: 5.0735, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:08:15 Iters: 491000/[11], loss: 5.7044, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:10:18 Iters: 491100/[11], loss: 5.0865, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:12:21 Iters: 491200/[11], loss: 5.9784, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:14:24 Iters: 491300/[11], loss: 5.1185, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:16:28 Iters: 491400/[11], loss: 5.9982, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:18:31 Iters: 491500/[11], loss: 5.4441, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:20:34 Iters: 491600/[11], loss: 5.4349, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:22:37 Iters: 491700/[11], loss: 6.2597, train_accuracy: 0.2266, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:24:40 Iters: 491800/[11], loss: 4.9083, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:26:44 Iters: 491900/[11], loss: 5.6708, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:28:47 Iters: 492000/[11], loss: 6.0602, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:30:50 Iters: 492100/[11], loss: 5.5329, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:32:53 Iters: 492200/[11], loss: 4.7443, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:34:56 Iters: 492300/[11], loss: 5.5147, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:37:00 Iters: 492400/[11], loss: 5.3097, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:39:03 Iters: 492500/[11], loss: 5.0942, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:41:06 Iters: 492600/[11], loss: 5.2429, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:43:09 Iters: 492700/[11], loss: 6.3494, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:45:13 Iters: 492800/[11], loss: 6.1103, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:47:16 Iters: 492900/[11], loss: 5.2217, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:49:19 Iters: 493000/[11], loss: 5.1325, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:51:22 Iters: 493100/[11], loss: 5.1400, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:53:25 Iters: 493200/[11], loss: 5.0466, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:55:28 Iters: 493300/[11], loss: 5.8173, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:57:32 Iters: 493400/[11], loss: 5.2914, train_accuracy: 0.3594, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-20:59:35 Iters: 493500/[11], loss: 5.2247, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:01:38 Iters: 493600/[11], loss: 4.8864, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:03:41 Iters: 493700/[11], loss: 5.1505, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:05:45 Iters: 493800/[11], loss: 5.6448, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:07:48 Iters: 493900/[11], loss: 5.7010, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:09:51 Iters: 494000/[11], loss: 5.7588, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:11:55 Iters: 494100/[11], loss: 5.1911, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:13:58 Iters: 494200/[11], loss: 5.5131, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:16:01 Iters: 494300/[11], loss: 5.3401, train_accuracy: 0.2656, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:18:04 Iters: 494400/[11], loss: 5.4750, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:20:08 Iters: 494500/[11], loss: 5.3062, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:22:11 Iters: 494600/[11], loss: 5.6459, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:24:14 Iters: 494700/[11], loss: 5.5970, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:26:17 Iters: 494800/[11], loss: 5.3602, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:28:21 Iters: 494900/[11], loss: 5.2405, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:30:24 Iters: 495000/[11], loss: 5.9222, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:32:27 Iters: 495100/[11], loss: 5.5178, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:34:30 Iters: 495200/[11], loss: 5.0618, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:36:34 Iters: 495300/[11], loss: 5.1847, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:38:37 Iters: 495400/[11], loss: 5.9694, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:40:40 Iters: 495500/[11], loss: 5.5064, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:42:43 Iters: 495600/[11], loss: 5.4586, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:44:47 Iters: 495700/[11], loss: 5.4207, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:46:50 Iters: 495800/[11], loss: 5.5042, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:48:53 Iters: 495900/[11], loss: 5.5473, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:50:56 Iters: 496000/[11], loss: 4.8887, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:53:00 Iters: 496100/[11], loss: 4.9818, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:55:03 Iters: 496200/[11], loss: 5.5717, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:57:06 Iters: 496300/[11], loss: 5.7386, train_accuracy: 0.2109, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-21:59:09 Iters: 496400/[11], loss: 5.2738, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:01:13 Iters: 496500/[11], loss: 5.5421, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:03:16 Iters: 496600/[11], loss: 6.0075, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:05:19 Iters: 496700/[11], loss: 4.9070, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:07:22 Iters: 496800/[11], loss: 5.1895, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:09:25 Iters: 496900/[11], loss: 5.6243, train_accuracy: 0.3125, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:11:29 Iters: 497000/[11], loss: 6.6365, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:13:32 Iters: 497100/[11], loss: 4.6826, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:15:35 Iters: 497200/[11], loss: 5.7838, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:17:38 Iters: 497300/[11], loss: 4.9419, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:19:41 Iters: 497400/[11], loss: 5.1454, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:21:45 Iters: 497500/[11], loss: 5.1266, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:23:48 Iters: 497600/[11], loss: 4.7404, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:25:51 Iters: 497700/[11], loss: 5.2193, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:27:54 Iters: 497800/[11], loss: 5.0557, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:29:58 Iters: 497900/[11], loss: 5.9342, train_accuracy: 0.2578, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:32:01 Iters: 498000/[11], loss: 5.3454, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:34:04 Iters: 498100/[11], loss: 5.2987, train_accuracy: 0.2891, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:36:07 Iters: 498200/[11], loss: 4.8685, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:38:10 Iters: 498300/[11], loss: 5.1317, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:40:14 Iters: 498400/[11], loss: 4.9875, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:42:17 Iters: 498500/[11], loss: 5.0087, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:44:20 Iters: 498600/[11], loss: 4.9226, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:46:23 Iters: 498700/[11], loss: 5.6348, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:48:27 Iters: 498800/[11], loss: 5.1181, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:50:30 Iters: 498900/[11], loss: 5.5975, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:52:33 Iters: 499000/[11], loss: 5.5130, train_accuracy: 0.2344, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:54:36 Iters: 499100/[11], loss: 4.2478, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:56:40 Iters: 499200/[11], loss: 5.6281, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-22:58:43 Iters: 499300/[11], loss: 5.6994, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:00:46 Iters: 499400/[11], loss: 5.8441, train_accuracy: 0.2500, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:02:49 Iters: 499500/[11], loss: 5.7657, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:04:53 Iters: 499600/[11], loss: 5.1938, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:06:56 Iters: 499700/[11], loss: 5.2907, train_accuracy: 0.2734, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:08:59 Iters: 499800/[11], loss: 5.6695, train_accuracy: 0.3047, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:11:03 Iters: 499900/[11], loss: 5.5174, train_accuracy: 0.2969, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:13:06 Iters: 500000/[11], loss: 5.4552, train_accuracy: 0.3359, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:13:06 Saving checkpoint: 500000 -20220707-23:14:24 LFW Ave Accuracy: 99.5666 -20220707-23:15:41 AgeDB-30 Ave Accuracy: 95.9000 -20220707-23:17:11 CFP-FP Ave Accuracy: 93.5857 -20220707-23:17:11 Current Best Accuracy: LFW: 99.6333 in iters: 460000, AgeDB-30: 96.5333 in iters: 480000 and CFP-FP: 93.6143 in iters: 370000 -20220707-23:19:13 Iters: 500100/[11], loss: 4.4125, train_accuracy: 0.3906, time: 3.67 s/iter, learning rate: 0.005000000000000001 -20220707-23:21:16 Iters: 500200/[11], loss: 5.3389, train_accuracy: 0.3203, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:23:19 Iters: 500300/[11], loss: 5.0035, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 0.005000000000000001 -20220707-23:25:09 Train Epoch: 12/18 ... -20220707-23:25:22 Iters: 500400/[12], loss: 4.9105, train_accuracy: 0.3438, time: 0.13 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:27:25 Iters: 500500/[12], loss: 4.6982, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:29:29 Iters: 500600/[12], loss: 4.4895, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:31:32 Iters: 500700/[12], loss: 4.8026, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:33:35 Iters: 500800/[12], loss: 4.3025, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:35:39 Iters: 500900/[12], loss: 4.7232, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:37:42 Iters: 501000/[12], loss: 3.8666, train_accuracy: 0.4219, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:39:45 Iters: 501100/[12], loss: 4.2215, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:41:48 Iters: 501200/[12], loss: 4.1172, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:43:52 Iters: 501300/[12], loss: 4.2172, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:45:55 Iters: 501400/[12], loss: 4.4437, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:47:58 Iters: 501500/[12], loss: 4.2005, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:50:01 Iters: 501600/[12], loss: 4.7081, train_accuracy: 0.3281, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:52:05 Iters: 501700/[12], loss: 5.3833, train_accuracy: 0.2812, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:54:08 Iters: 501800/[12], loss: 3.8497, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:56:11 Iters: 501900/[12], loss: 3.7752, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220707-23:58:14 Iters: 502000/[12], loss: 4.2698, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:00:18 Iters: 502100/[12], loss: 4.1617, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:02:21 Iters: 502200/[12], loss: 4.6605, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:04:24 Iters: 502300/[12], loss: 4.4697, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:06:28 Iters: 502400/[12], loss: 5.1106, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:08:31 Iters: 502500/[12], loss: 4.5623, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:10:34 Iters: 502600/[12], loss: 4.3414, train_accuracy: 0.3750, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:12:37 Iters: 502700/[12], loss: 4.2058, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:14:41 Iters: 502800/[12], loss: 4.2330, train_accuracy: 0.4219, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:16:44 Iters: 502900/[12], loss: 4.4657, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:18:47 Iters: 503000/[12], loss: 4.7995, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:20:50 Iters: 503100/[12], loss: 3.9044, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:22:53 Iters: 503200/[12], loss: 4.4666, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:24:57 Iters: 503300/[12], loss: 3.7412, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:27:00 Iters: 503400/[12], loss: 3.7659, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:29:03 Iters: 503500/[12], loss: 4.9893, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:31:06 Iters: 503600/[12], loss: 4.3642, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:33:09 Iters: 503700/[12], loss: 4.7351, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:35:13 Iters: 503800/[12], loss: 4.1379, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:37:16 Iters: 503900/[12], loss: 4.0958, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:39:19 Iters: 504000/[12], loss: 3.7945, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:41:22 Iters: 504100/[12], loss: 4.2439, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:43:26 Iters: 504200/[12], loss: 4.4575, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:45:29 Iters: 504300/[12], loss: 4.2549, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:47:32 Iters: 504400/[12], loss: 3.6290, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:49:35 Iters: 504500/[12], loss: 3.9043, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:51:38 Iters: 504600/[12], loss: 4.3846, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:53:42 Iters: 504700/[12], loss: 3.9054, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:55:45 Iters: 504800/[12], loss: 3.6869, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:57:48 Iters: 504900/[12], loss: 4.3043, train_accuracy: 0.4141, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-00:59:51 Iters: 505000/[12], loss: 4.4996, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:01:55 Iters: 505100/[12], loss: 3.7250, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:03:58 Iters: 505200/[12], loss: 4.2451, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:06:01 Iters: 505300/[12], loss: 4.3670, train_accuracy: 0.3516, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:08:04 Iters: 505400/[12], loss: 4.1730, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:10:08 Iters: 505500/[12], loss: 3.8483, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:12:11 Iters: 505600/[12], loss: 4.6724, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:14:14 Iters: 505700/[12], loss: 3.8542, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:16:17 Iters: 505800/[12], loss: 3.6159, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:18:21 Iters: 505900/[12], loss: 3.9621, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:20:24 Iters: 506000/[12], loss: 4.1625, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:22:27 Iters: 506100/[12], loss: 4.6853, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:24:31 Iters: 506200/[12], loss: 4.1515, train_accuracy: 0.4141, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:26:34 Iters: 506300/[12], loss: 3.9713, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:28:37 Iters: 506400/[12], loss: 3.9582, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:30:40 Iters: 506500/[12], loss: 4.1404, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:32:44 Iters: 506600/[12], loss: 4.3213, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:34:47 Iters: 506700/[12], loss: 4.2091, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:36:50 Iters: 506800/[12], loss: 4.1754, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:38:53 Iters: 506900/[12], loss: 4.2558, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:40:56 Iters: 507000/[12], loss: 3.4892, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:43:00 Iters: 507100/[12], loss: 3.9671, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:45:03 Iters: 507200/[12], loss: 3.7660, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:47:06 Iters: 507300/[12], loss: 3.7882, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:49:09 Iters: 507400/[12], loss: 4.6336, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:51:12 Iters: 507500/[12], loss: 4.1848, train_accuracy: 0.3438, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:53:16 Iters: 507600/[12], loss: 4.4492, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:55:19 Iters: 507700/[12], loss: 4.5866, train_accuracy: 0.3672, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:57:22 Iters: 507800/[12], loss: 3.7418, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-01:59:25 Iters: 507900/[12], loss: 3.4318, train_accuracy: 0.4219, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:01:29 Iters: 508000/[12], loss: 3.8035, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:03:32 Iters: 508100/[12], loss: 4.2869, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:05:35 Iters: 508200/[12], loss: 3.4668, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:07:38 Iters: 508300/[12], loss: 4.4921, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:09:42 Iters: 508400/[12], loss: 4.1111, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:11:45 Iters: 508500/[12], loss: 3.6666, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:13:48 Iters: 508600/[12], loss: 4.2737, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:15:52 Iters: 508700/[12], loss: 4.9771, train_accuracy: 0.3828, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:17:55 Iters: 508800/[12], loss: 3.9414, train_accuracy: 0.4141, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:19:58 Iters: 508900/[12], loss: 3.7661, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:22:02 Iters: 509000/[12], loss: 4.3718, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:24:05 Iters: 509100/[12], loss: 3.6488, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:26:09 Iters: 509200/[12], loss: 4.1959, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:28:12 Iters: 509300/[12], loss: 3.9373, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:30:15 Iters: 509400/[12], loss: 3.7154, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:32:18 Iters: 509500/[12], loss: 3.3347, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:34:21 Iters: 509600/[12], loss: 4.2143, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:36:24 Iters: 509700/[12], loss: 4.0542, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:38:27 Iters: 509800/[12], loss: 3.2625, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:40:30 Iters: 509900/[12], loss: 4.6548, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:42:33 Iters: 510000/[12], loss: 3.9993, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:42:33 Saving checkpoint: 510000 -20220708-02:43:49 LFW Ave Accuracy: 99.6000 -20220708-02:45:06 AgeDB-30 Ave Accuracy: 97.0167 -20220708-02:46:34 CFP-FP Ave Accuracy: 94.7143 -20220708-02:46:34 Current Best Accuracy: LFW: 99.6333 in iters: 460000, AgeDB-30: 97.0167 in iters: 510000 and CFP-FP: 94.7143 in iters: 510000 -20220708-02:48:37 Iters: 510100/[12], loss: 4.2740, train_accuracy: 0.4844, time: 3.64 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:50:40 Iters: 510200/[12], loss: 3.3865, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:52:43 Iters: 510300/[12], loss: 3.5658, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:54:45 Iters: 510400/[12], loss: 4.3553, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:56:48 Iters: 510500/[12], loss: 4.5393, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-02:58:51 Iters: 510600/[12], loss: 4.2130, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:00:54 Iters: 510700/[12], loss: 4.3845, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:02:57 Iters: 510800/[12], loss: 4.0886, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:05:00 Iters: 510900/[12], loss: 3.5711, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:07:03 Iters: 511000/[12], loss: 4.2083, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:09:06 Iters: 511100/[12], loss: 3.3718, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:11:09 Iters: 511200/[12], loss: 3.4204, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:13:12 Iters: 511300/[12], loss: 3.7623, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:15:15 Iters: 511400/[12], loss: 3.6431, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:17:18 Iters: 511500/[12], loss: 4.1133, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:19:21 Iters: 511600/[12], loss: 3.2772, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:21:24 Iters: 511700/[12], loss: 3.9678, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:23:27 Iters: 511800/[12], loss: 3.6323, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:25:29 Iters: 511900/[12], loss: 3.3380, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:27:32 Iters: 512000/[12], loss: 3.7517, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:29:35 Iters: 512100/[12], loss: 3.8524, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:31:39 Iters: 512200/[12], loss: 3.7746, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:33:42 Iters: 512300/[12], loss: 4.0064, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:35:45 Iters: 512400/[12], loss: 3.9372, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:37:48 Iters: 512500/[12], loss: 3.9973, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:39:52 Iters: 512600/[12], loss: 3.7327, train_accuracy: 0.5000, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:41:55 Iters: 512700/[12], loss: 3.6711, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:43:58 Iters: 512800/[12], loss: 3.3818, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:46:02 Iters: 512900/[12], loss: 3.5309, train_accuracy: 0.5156, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:48:05 Iters: 513000/[12], loss: 4.2535, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:50:09 Iters: 513100/[12], loss: 3.0142, train_accuracy: 0.5000, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:52:12 Iters: 513200/[12], loss: 3.1880, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:54:15 Iters: 513300/[12], loss: 4.1675, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:56:19 Iters: 513400/[12], loss: 3.8649, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-03:58:22 Iters: 513500/[12], loss: 4.3519, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:00:26 Iters: 513600/[12], loss: 3.7852, train_accuracy: 0.4922, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:02:29 Iters: 513700/[12], loss: 3.6997, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:04:32 Iters: 513800/[12], loss: 3.1791, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:06:36 Iters: 513900/[12], loss: 3.2463, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:08:39 Iters: 514000/[12], loss: 3.6300, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:10:42 Iters: 514100/[12], loss: 3.6579, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:12:46 Iters: 514200/[12], loss: 3.3845, train_accuracy: 0.5391, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:14:50 Iters: 514300/[12], loss: 3.4865, train_accuracy: 0.4609, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:16:53 Iters: 514400/[12], loss: 3.4391, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:18:56 Iters: 514500/[12], loss: 3.5603, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:21:00 Iters: 514600/[12], loss: 3.9507, train_accuracy: 0.4766, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:23:04 Iters: 514700/[12], loss: 3.6655, train_accuracy: 0.4531, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:25:07 Iters: 514800/[12], loss: 3.2235, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:27:10 Iters: 514900/[12], loss: 3.2679, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:29:13 Iters: 515000/[12], loss: 3.0586, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:31:16 Iters: 515100/[12], loss: 3.8078, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:33:19 Iters: 515200/[12], loss: 3.3923, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:35:22 Iters: 515300/[12], loss: 3.5409, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:37:25 Iters: 515400/[12], loss: 3.2441, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:39:28 Iters: 515500/[12], loss: 3.9492, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:41:31 Iters: 515600/[12], loss: 4.0181, train_accuracy: 0.4141, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:43:34 Iters: 515700/[12], loss: 4.6566, train_accuracy: 0.3906, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:45:37 Iters: 515800/[12], loss: 3.3865, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:47:40 Iters: 515900/[12], loss: 3.8249, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:49:43 Iters: 516000/[12], loss: 3.6235, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:51:45 Iters: 516100/[12], loss: 4.3341, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:53:48 Iters: 516200/[12], loss: 3.7117, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:55:51 Iters: 516300/[12], loss: 3.9127, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:57:54 Iters: 516400/[12], loss: 3.8612, train_accuracy: 0.4219, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-04:59:57 Iters: 516500/[12], loss: 3.5685, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:02:00 Iters: 516600/[12], loss: 4.0672, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:04:03 Iters: 516700/[12], loss: 4.1272, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:06:06 Iters: 516800/[12], loss: 3.2464, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:08:09 Iters: 516900/[12], loss: 4.2821, train_accuracy: 0.3984, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:10:12 Iters: 517000/[12], loss: 3.5115, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:12:15 Iters: 517100/[12], loss: 4.3220, train_accuracy: 0.4062, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:14:18 Iters: 517200/[12], loss: 3.8911, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:16:21 Iters: 517300/[12], loss: 3.3484, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:18:24 Iters: 517400/[12], loss: 3.8849, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:20:27 Iters: 517500/[12], loss: 3.8331, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:22:30 Iters: 517600/[12], loss: 4.1165, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:24:33 Iters: 517700/[12], loss: 3.7508, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:26:36 Iters: 517800/[12], loss: 3.4877, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:28:39 Iters: 517900/[12], loss: 3.3053, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:30:42 Iters: 518000/[12], loss: 3.6605, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:32:45 Iters: 518100/[12], loss: 3.8208, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:34:48 Iters: 518200/[12], loss: 3.7756, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:36:51 Iters: 518300/[12], loss: 3.8348, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:38:53 Iters: 518400/[12], loss: 3.3956, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:40:56 Iters: 518500/[12], loss: 2.8435, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:42:59 Iters: 518600/[12], loss: 2.9828, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:45:02 Iters: 518700/[12], loss: 3.1438, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:47:05 Iters: 518800/[12], loss: 4.0483, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:49:08 Iters: 518900/[12], loss: 3.4859, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:51:11 Iters: 519000/[12], loss: 3.8176, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:53:14 Iters: 519100/[12], loss: 3.2157, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:55:17 Iters: 519200/[12], loss: 3.3027, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:57:20 Iters: 519300/[12], loss: 3.8531, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-05:59:23 Iters: 519400/[12], loss: 4.5191, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:01:26 Iters: 519500/[12], loss: 3.0816, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:03:30 Iters: 519600/[12], loss: 3.8236, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:05:33 Iters: 519700/[12], loss: 4.0633, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:07:36 Iters: 519800/[12], loss: 3.5127, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:09:39 Iters: 519900/[12], loss: 3.9804, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:11:42 Iters: 520000/[12], loss: 3.1086, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:11:42 Saving checkpoint: 520000 -20220708-06:13:01 LFW Ave Accuracy: 99.6000 -20220708-06:14:20 AgeDB-30 Ave Accuracy: 97.0333 -20220708-06:15:50 CFP-FP Ave Accuracy: 94.6571 -20220708-06:15:50 Current Best Accuracy: LFW: 99.6333 in iters: 460000, AgeDB-30: 97.0333 in iters: 520000 and CFP-FP: 94.7143 in iters: 510000 -20220708-06:17:52 Iters: 520100/[12], loss: 3.3695, train_accuracy: 0.5312, time: 3.71 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:19:55 Iters: 520200/[12], loss: 3.8556, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:21:59 Iters: 520300/[12], loss: 4.0299, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:24:02 Iters: 520400/[12], loss: 3.7772, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:26:05 Iters: 520500/[12], loss: 3.5381, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:28:08 Iters: 520600/[12], loss: 4.5420, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:30:12 Iters: 520700/[12], loss: 3.7863, train_accuracy: 0.4844, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:32:15 Iters: 520800/[12], loss: 2.9080, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:34:19 Iters: 520900/[12], loss: 3.5370, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:36:22 Iters: 521000/[12], loss: 3.9331, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:38:25 Iters: 521100/[12], loss: 3.3592, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:40:28 Iters: 521200/[12], loss: 3.6804, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:42:31 Iters: 521300/[12], loss: 3.8937, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:44:34 Iters: 521400/[12], loss: 3.2752, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:46:37 Iters: 521500/[12], loss: 3.3242, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:48:40 Iters: 521600/[12], loss: 2.9937, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:50:43 Iters: 521700/[12], loss: 4.0667, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:52:46 Iters: 521800/[12], loss: 2.8120, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:54:49 Iters: 521900/[12], loss: 3.3237, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:56:51 Iters: 522000/[12], loss: 3.7177, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-06:58:54 Iters: 522100/[12], loss: 3.1949, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:00:57 Iters: 522200/[12], loss: 3.1283, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:03:00 Iters: 522300/[12], loss: 3.2841, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:05:03 Iters: 522400/[12], loss: 3.4813, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:07:06 Iters: 522500/[12], loss: 3.4149, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:09:09 Iters: 522600/[12], loss: 4.4441, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:11:12 Iters: 522700/[12], loss: 3.6693, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:13:15 Iters: 522800/[12], loss: 3.0067, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:15:18 Iters: 522900/[12], loss: 3.4320, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:17:21 Iters: 523000/[12], loss: 4.1577, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:19:24 Iters: 523100/[12], loss: 3.1737, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:21:27 Iters: 523200/[12], loss: 3.4038, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:23:30 Iters: 523300/[12], loss: 3.5602, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:25:32 Iters: 523400/[12], loss: 4.0029, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:27:36 Iters: 523500/[12], loss: 3.8885, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:29:39 Iters: 523600/[12], loss: 3.3178, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:31:42 Iters: 523700/[12], loss: 3.1362, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:33:45 Iters: 523800/[12], loss: 3.1815, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:35:48 Iters: 523900/[12], loss: 3.2132, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:37:51 Iters: 524000/[12], loss: 3.0657, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:39:54 Iters: 524100/[12], loss: 3.2931, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:41:57 Iters: 524200/[12], loss: 3.2155, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:44:00 Iters: 524300/[12], loss: 3.7368, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:46:03 Iters: 524400/[12], loss: 3.6495, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:48:06 Iters: 524500/[12], loss: 3.8442, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:50:09 Iters: 524600/[12], loss: 3.8468, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:52:12 Iters: 524700/[12], loss: 4.1362, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:54:15 Iters: 524800/[12], loss: 3.3755, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:56:18 Iters: 524900/[12], loss: 2.9236, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-07:58:21 Iters: 525000/[12], loss: 4.0502, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:00:24 Iters: 525100/[12], loss: 3.4574, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:02:27 Iters: 525200/[12], loss: 3.2426, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:04:30 Iters: 525300/[12], loss: 3.7143, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:06:33 Iters: 525400/[12], loss: 3.7474, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:08:36 Iters: 525500/[12], loss: 3.4617, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:10:39 Iters: 525600/[12], loss: 3.9148, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:12:42 Iters: 525700/[12], loss: 2.4111, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:14:45 Iters: 525800/[12], loss: 3.6610, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:16:48 Iters: 525900/[12], loss: 3.1508, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:18:51 Iters: 526000/[12], loss: 3.6059, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:20:54 Iters: 526100/[12], loss: 3.8213, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:22:58 Iters: 526200/[12], loss: 3.3295, train_accuracy: 0.4922, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:25:01 Iters: 526300/[12], loss: 3.5529, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:27:05 Iters: 526400/[12], loss: 3.5204, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:29:08 Iters: 526500/[12], loss: 3.5576, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:31:11 Iters: 526600/[12], loss: 4.1535, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:33:14 Iters: 526700/[12], loss: 3.7029, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:35:17 Iters: 526800/[12], loss: 3.0354, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:37:21 Iters: 526900/[12], loss: 3.6490, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:39:24 Iters: 527000/[12], loss: 2.7465, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:41:27 Iters: 527100/[12], loss: 3.2952, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:43:30 Iters: 527200/[12], loss: 3.4348, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:45:34 Iters: 527300/[12], loss: 4.4294, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:47:37 Iters: 527400/[12], loss: 2.9798, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:49:40 Iters: 527500/[12], loss: 3.9167, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:51:43 Iters: 527600/[12], loss: 3.5199, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:53:46 Iters: 527700/[12], loss: 3.7986, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:55:49 Iters: 527800/[12], loss: 2.9081, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:57:52 Iters: 527900/[12], loss: 3.1081, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-08:59:56 Iters: 528000/[12], loss: 3.0981, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:01:59 Iters: 528100/[12], loss: 3.5912, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:04:02 Iters: 528200/[12], loss: 3.9277, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:06:05 Iters: 528300/[12], loss: 3.8298, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:08:09 Iters: 528400/[12], loss: 3.6492, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:10:12 Iters: 528500/[12], loss: 4.6560, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:12:15 Iters: 528600/[12], loss: 3.5150, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:14:18 Iters: 528700/[12], loss: 2.7262, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:16:22 Iters: 528800/[12], loss: 4.1461, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:18:25 Iters: 528900/[12], loss: 3.9513, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:20:28 Iters: 529000/[12], loss: 3.8066, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:22:32 Iters: 529100/[12], loss: 3.4681, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:24:35 Iters: 529200/[12], loss: 4.0496, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:26:38 Iters: 529300/[12], loss: 2.7971, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:28:41 Iters: 529400/[12], loss: 3.1981, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:30:45 Iters: 529500/[12], loss: 3.3993, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:32:48 Iters: 529600/[12], loss: 3.5305, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:34:51 Iters: 529700/[12], loss: 2.9218, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:36:54 Iters: 529800/[12], loss: 3.7414, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:38:58 Iters: 529900/[12], loss: 3.1911, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:41:01 Iters: 530000/[12], loss: 3.4987, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:41:01 Saving checkpoint: 530000 -20220708-09:42:19 LFW Ave Accuracy: 99.6333 -20220708-09:43:35 AgeDB-30 Ave Accuracy: 97.0500 -20220708-09:45:05 CFP-FP Ave Accuracy: 94.5143 -20220708-09:45:05 Current Best Accuracy: LFW: 99.6333 in iters: 530000, AgeDB-30: 97.0500 in iters: 530000 and CFP-FP: 94.7143 in iters: 510000 -20220708-09:47:08 Iters: 530100/[12], loss: 3.7724, train_accuracy: 0.4922, time: 3.68 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:49:12 Iters: 530200/[12], loss: 3.0806, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:51:15 Iters: 530300/[12], loss: 3.0470, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:53:18 Iters: 530400/[12], loss: 3.2699, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:55:21 Iters: 530500/[12], loss: 3.9905, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:57:24 Iters: 530600/[12], loss: 3.3155, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-09:59:27 Iters: 530700/[12], loss: 3.0176, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:01:31 Iters: 530800/[12], loss: 3.9901, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:03:34 Iters: 530900/[12], loss: 3.0669, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:05:37 Iters: 531000/[12], loss: 4.5584, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:07:40 Iters: 531100/[12], loss: 3.4305, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:09:43 Iters: 531200/[12], loss: 3.8173, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:11:47 Iters: 531300/[12], loss: 3.4217, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:13:50 Iters: 531400/[12], loss: 2.9055, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:15:53 Iters: 531500/[12], loss: 2.1942, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:17:56 Iters: 531600/[12], loss: 4.1232, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:19:59 Iters: 531700/[12], loss: 3.3881, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:22:02 Iters: 531800/[12], loss: 3.2824, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:24:05 Iters: 531900/[12], loss: 3.5067, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:26:09 Iters: 532000/[12], loss: 3.8612, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:28:12 Iters: 532100/[12], loss: 3.4392, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:30:15 Iters: 532200/[12], loss: 3.7561, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:32:18 Iters: 532300/[12], loss: 3.4361, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:34:21 Iters: 532400/[12], loss: 3.6435, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:36:24 Iters: 532500/[12], loss: 3.9320, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:38:27 Iters: 532600/[12], loss: 2.8930, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:40:31 Iters: 532700/[12], loss: 3.4935, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:42:34 Iters: 532800/[12], loss: 3.4652, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:44:37 Iters: 532900/[12], loss: 3.6012, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:46:40 Iters: 533000/[12], loss: 3.4861, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:48:43 Iters: 533100/[12], loss: 3.6873, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:50:46 Iters: 533200/[12], loss: 3.7098, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:52:49 Iters: 533300/[12], loss: 3.3487, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:54:52 Iters: 533400/[12], loss: 3.3013, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:56:56 Iters: 533500/[12], loss: 4.0831, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-10:58:59 Iters: 533600/[12], loss: 3.6850, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:01:02 Iters: 533700/[12], loss: 3.1826, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:03:05 Iters: 533800/[12], loss: 3.8824, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:05:08 Iters: 533900/[12], loss: 2.9400, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:07:12 Iters: 534000/[12], loss: 3.4864, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:09:15 Iters: 534100/[12], loss: 3.6171, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:11:18 Iters: 534200/[12], loss: 3.7534, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:13:21 Iters: 534300/[12], loss: 4.1229, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:15:24 Iters: 534400/[12], loss: 3.9858, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:17:27 Iters: 534500/[12], loss: 3.7987, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:19:30 Iters: 534600/[12], loss: 3.2659, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:21:34 Iters: 534700/[12], loss: 3.4520, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:23:37 Iters: 534800/[12], loss: 2.6340, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:25:40 Iters: 534900/[12], loss: 3.4273, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:27:43 Iters: 535000/[12], loss: 3.1260, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:29:46 Iters: 535100/[12], loss: 2.9569, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:31:49 Iters: 535200/[12], loss: 3.1587, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:33:52 Iters: 535300/[12], loss: 2.9853, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:35:56 Iters: 535400/[12], loss: 3.2445, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:37:59 Iters: 535500/[12], loss: 3.6083, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:40:02 Iters: 535600/[12], loss: 3.5127, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:42:05 Iters: 535700/[12], loss: 2.9207, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:44:08 Iters: 535800/[12], loss: 2.9108, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:46:11 Iters: 535900/[12], loss: 3.4444, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:48:15 Iters: 536000/[12], loss: 2.8802, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:50:18 Iters: 536100/[12], loss: 4.1135, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:52:21 Iters: 536200/[12], loss: 3.0698, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:54:24 Iters: 536300/[12], loss: 3.3661, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:56:27 Iters: 536400/[12], loss: 3.5864, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-11:58:30 Iters: 536500/[12], loss: 3.3649, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:00:34 Iters: 536600/[12], loss: 3.4705, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:02:37 Iters: 536700/[12], loss: 2.9423, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:04:40 Iters: 536800/[12], loss: 2.8304, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:06:43 Iters: 536900/[12], loss: 3.4102, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:08:46 Iters: 537000/[12], loss: 3.1550, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:10:49 Iters: 537100/[12], loss: 3.2472, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:12:53 Iters: 537200/[12], loss: 3.4774, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:14:56 Iters: 537300/[12], loss: 3.3882, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:16:59 Iters: 537400/[12], loss: 3.8455, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:19:02 Iters: 537500/[12], loss: 3.2747, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:21:05 Iters: 537600/[12], loss: 3.8852, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:23:08 Iters: 537700/[12], loss: 3.4209, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:25:11 Iters: 537800/[12], loss: 3.3440, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:27:15 Iters: 537900/[12], loss: 3.8564, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:29:18 Iters: 538000/[12], loss: 3.4972, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:31:21 Iters: 538100/[12], loss: 3.4300, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:33:25 Iters: 538200/[12], loss: 4.4237, train_accuracy: 0.4297, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:35:28 Iters: 538300/[12], loss: 3.5656, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:37:31 Iters: 538400/[12], loss: 3.8241, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:39:34 Iters: 538500/[12], loss: 3.7981, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:41:37 Iters: 538600/[12], loss: 2.8329, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:43:40 Iters: 538700/[12], loss: 3.0563, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:45:44 Iters: 538800/[12], loss: 3.3858, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:47:47 Iters: 538900/[12], loss: 3.1838, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:49:50 Iters: 539000/[12], loss: 3.0677, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:51:53 Iters: 539100/[12], loss: 3.4848, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:53:56 Iters: 539200/[12], loss: 3.3732, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:56:00 Iters: 539300/[12], loss: 3.3008, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-12:58:03 Iters: 539400/[12], loss: 3.9783, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:00:06 Iters: 539500/[12], loss: 3.2235, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:02:09 Iters: 539600/[12], loss: 3.9870, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:04:13 Iters: 539700/[12], loss: 3.5537, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:06:16 Iters: 539800/[12], loss: 3.4234, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:08:19 Iters: 539900/[12], loss: 3.5838, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:10:22 Iters: 540000/[12], loss: 3.5072, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:10:22 Saving checkpoint: 540000 -20220708-13:11:41 LFW Ave Accuracy: 99.6333 -20220708-13:12:58 AgeDB-30 Ave Accuracy: 96.9667 -20220708-13:14:29 CFP-FP Ave Accuracy: 94.7857 -20220708-13:14:29 Current Best Accuracy: LFW: 99.6333 in iters: 540000, AgeDB-30: 97.0500 in iters: 530000 and CFP-FP: 94.7857 in iters: 540000 -20220708-13:16:32 Iters: 540100/[12], loss: 2.9476, train_accuracy: 0.5703, time: 3.70 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:18:35 Iters: 540200/[12], loss: 3.5375, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:20:38 Iters: 540300/[12], loss: 3.4141, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:22:41 Iters: 540400/[12], loss: 2.8290, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:24:45 Iters: 540500/[12], loss: 3.8258, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:26:48 Iters: 540600/[12], loss: 3.0204, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:28:51 Iters: 540700/[12], loss: 3.1065, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:30:54 Iters: 540800/[12], loss: 3.5650, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:32:57 Iters: 540900/[12], loss: 3.3094, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:35:00 Iters: 541000/[12], loss: 3.7390, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:37:03 Iters: 541100/[12], loss: 2.7926, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:39:07 Iters: 541200/[12], loss: 3.4441, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:41:10 Iters: 541300/[12], loss: 3.5014, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:43:13 Iters: 541400/[12], loss: 3.5559, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:45:16 Iters: 541500/[12], loss: 2.9021, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:47:19 Iters: 541600/[12], loss: 3.4972, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:49:22 Iters: 541700/[12], loss: 3.1263, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:51:26 Iters: 541800/[12], loss: 2.9789, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:53:29 Iters: 541900/[12], loss: 3.4708, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:55:32 Iters: 542000/[12], loss: 3.6584, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:57:35 Iters: 542100/[12], loss: 3.3531, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-13:59:38 Iters: 542200/[12], loss: 3.2166, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:01:41 Iters: 542300/[12], loss: 2.8615, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:03:45 Iters: 542400/[12], loss: 3.0999, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:05:48 Iters: 542500/[12], loss: 2.9674, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:07:51 Iters: 542600/[12], loss: 3.8240, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:09:54 Iters: 542700/[12], loss: 3.6342, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:11:57 Iters: 542800/[12], loss: 3.1042, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:14:00 Iters: 542900/[12], loss: 4.1338, train_accuracy: 0.4141, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:16:03 Iters: 543000/[12], loss: 2.7791, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:18:06 Iters: 543100/[12], loss: 3.6449, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:20:09 Iters: 543200/[12], loss: 3.4932, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:22:13 Iters: 543300/[12], loss: 3.4816, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:24:16 Iters: 543400/[12], loss: 2.9773, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:26:19 Iters: 543500/[12], loss: 3.1545, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:28:22 Iters: 543600/[12], loss: 3.7921, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:30:25 Iters: 543700/[12], loss: 3.7831, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:32:29 Iters: 543800/[12], loss: 3.1895, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:34:32 Iters: 543900/[12], loss: 4.0786, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:36:35 Iters: 544000/[12], loss: 2.8560, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:38:38 Iters: 544100/[12], loss: 3.2577, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:40:41 Iters: 544200/[12], loss: 3.4217, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:42:45 Iters: 544300/[12], loss: 3.6040, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:44:48 Iters: 544400/[12], loss: 2.8560, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:46:51 Iters: 544500/[12], loss: 2.7916, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:48:55 Iters: 544600/[12], loss: 3.2711, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:50:58 Iters: 544700/[12], loss: 3.7305, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:53:01 Iters: 544800/[12], loss: 3.0191, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:55:04 Iters: 544900/[12], loss: 3.6148, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:57:08 Iters: 545000/[12], loss: 2.8795, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-14:59:11 Iters: 545100/[12], loss: 3.4069, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:01:14 Iters: 545200/[12], loss: 3.3179, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:03:17 Iters: 545300/[12], loss: 3.1639, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:05:21 Iters: 545400/[12], loss: 3.4076, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:07:24 Iters: 545500/[12], loss: 3.2415, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:09:27 Iters: 545600/[12], loss: 3.3982, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:11:30 Iters: 545700/[12], loss: 3.1028, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:13:34 Iters: 545800/[12], loss: 3.8440, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220708-15:15:11 Train Epoch: 13/18 ... -20220708-15:15:37 Iters: 545900/[13], loss: 2.7690, train_accuracy: 0.5703, time: 0.26 s/iter, learning rate: 0.0005000000000000001 -20220708-15:17:40 Iters: 546000/[13], loss: 3.2982, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:19:44 Iters: 546100/[13], loss: 3.0298, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:21:47 Iters: 546200/[13], loss: 3.1800, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:23:50 Iters: 546300/[13], loss: 3.3276, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:25:54 Iters: 546400/[13], loss: 3.2401, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:27:57 Iters: 546500/[13], loss: 3.0621, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:30:00 Iters: 546600/[13], loss: 3.0777, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:32:03 Iters: 546700/[13], loss: 2.4689, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:34:07 Iters: 546800/[13], loss: 4.4592, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:36:10 Iters: 546900/[13], loss: 3.4790, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:38:13 Iters: 547000/[13], loss: 3.0617, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:40:16 Iters: 547100/[13], loss: 2.8435, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:42:19 Iters: 547200/[13], loss: 2.9959, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:44:23 Iters: 547300/[13], loss: 3.7850, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:46:26 Iters: 547400/[13], loss: 3.2607, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:48:29 Iters: 547500/[13], loss: 2.8956, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:50:32 Iters: 547600/[13], loss: 3.5172, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:52:35 Iters: 547700/[13], loss: 2.7656, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:54:38 Iters: 547800/[13], loss: 2.9057, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:56:42 Iters: 547900/[13], loss: 2.9383, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-15:58:45 Iters: 548000/[13], loss: 2.7564, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:00:48 Iters: 548100/[13], loss: 3.3900, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:02:51 Iters: 548200/[13], loss: 3.7974, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:04:54 Iters: 548300/[13], loss: 2.5897, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:06:57 Iters: 548400/[13], loss: 3.1201, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:09:01 Iters: 548500/[13], loss: 3.0897, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:11:04 Iters: 548600/[13], loss: 2.9622, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:13:07 Iters: 548700/[13], loss: 3.3209, train_accuracy: 0.5547, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220708-16:15:11 Iters: 548800/[13], loss: 3.3944, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:17:14 Iters: 548900/[13], loss: 2.6343, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:19:17 Iters: 549000/[13], loss: 3.4641, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:21:21 Iters: 549100/[13], loss: 3.0399, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:23:24 Iters: 549200/[13], loss: 2.8585, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:25:27 Iters: 549300/[13], loss: 2.9576, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:27:30 Iters: 549400/[13], loss: 3.6218, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:29:33 Iters: 549500/[13], loss: 3.1640, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:31:36 Iters: 549600/[13], loss: 3.3073, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:33:40 Iters: 549700/[13], loss: 3.3561, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:35:43 Iters: 549800/[13], loss: 2.9024, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:37:46 Iters: 549900/[13], loss: 3.6948, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:39:49 Iters: 550000/[13], loss: 2.9487, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:39:49 Saving checkpoint: 550000 -20220708-16:41:05 LFW Ave Accuracy: 99.6166 -20220708-16:42:20 AgeDB-30 Ave Accuracy: 96.9333 -20220708-16:43:46 CFP-FP Ave Accuracy: 94.9714 -20220708-16:43:46 Current Best Accuracy: LFW: 99.6333 in iters: 540000, AgeDB-30: 97.0500 in iters: 530000 and CFP-FP: 94.9714 in iters: 550000 -20220708-16:45:49 Iters: 550100/[13], loss: 3.3208, train_accuracy: 0.5391, time: 3.60 s/iter, learning rate: 0.0005000000000000001 -20220708-16:47:52 Iters: 550200/[13], loss: 3.3728, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:49:55 Iters: 550300/[13], loss: 3.2119, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:51:58 Iters: 550400/[13], loss: 3.2787, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:54:01 Iters: 550500/[13], loss: 3.1430, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:56:04 Iters: 550600/[13], loss: 3.1111, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-16:58:08 Iters: 550700/[13], loss: 3.3240, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:00:11 Iters: 550800/[13], loss: 3.1909, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:02:14 Iters: 550900/[13], loss: 2.6840, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:04:17 Iters: 551000/[13], loss: 3.3816, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:06:20 Iters: 551100/[13], loss: 3.3496, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:08:24 Iters: 551200/[13], loss: 3.8466, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:10:27 Iters: 551300/[13], loss: 2.7119, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:12:30 Iters: 551400/[13], loss: 2.5593, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:14:33 Iters: 551500/[13], loss: 3.4218, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:16:36 Iters: 551600/[13], loss: 3.4325, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:18:40 Iters: 551700/[13], loss: 3.4909, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:20:43 Iters: 551800/[13], loss: 3.6939, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:22:46 Iters: 551900/[13], loss: 3.3687, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:24:49 Iters: 552000/[13], loss: 3.1744, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:26:52 Iters: 552100/[13], loss: 3.3105, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:28:55 Iters: 552200/[13], loss: 2.7966, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:30:58 Iters: 552300/[13], loss: 3.1024, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:33:01 Iters: 552400/[13], loss: 3.2992, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:35:05 Iters: 552500/[13], loss: 3.2982, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:37:08 Iters: 552600/[13], loss: 3.5486, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:39:11 Iters: 552700/[13], loss: 3.3705, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:41:14 Iters: 552800/[13], loss: 2.8626, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:43:17 Iters: 552900/[13], loss: 3.2630, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:45:20 Iters: 553000/[13], loss: 3.0408, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:47:23 Iters: 553100/[13], loss: 2.8218, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:49:27 Iters: 553200/[13], loss: 3.1394, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:51:30 Iters: 553300/[13], loss: 3.5649, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:53:33 Iters: 553400/[13], loss: 3.6012, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:55:36 Iters: 553500/[13], loss: 3.1530, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:57:39 Iters: 553600/[13], loss: 3.6841, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-17:59:43 Iters: 553700/[13], loss: 3.6692, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:01:46 Iters: 553800/[13], loss: 3.0526, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:03:49 Iters: 553900/[13], loss: 2.6457, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:05:52 Iters: 554000/[13], loss: 2.5896, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:07:55 Iters: 554100/[13], loss: 3.6426, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:09:58 Iters: 554200/[13], loss: 2.8517, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:12:01 Iters: 554300/[13], loss: 3.3215, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:14:04 Iters: 554400/[13], loss: 3.3559, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:16:07 Iters: 554500/[13], loss: 3.5413, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:18:10 Iters: 554600/[13], loss: 2.7406, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:20:14 Iters: 554700/[13], loss: 3.6034, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:22:17 Iters: 554800/[13], loss: 3.0800, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:24:20 Iters: 554900/[13], loss: 2.4500, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:26:23 Iters: 555000/[13], loss: 2.5991, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:28:26 Iters: 555100/[13], loss: 3.0647, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:30:29 Iters: 555200/[13], loss: 2.6695, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:32:32 Iters: 555300/[13], loss: 3.4229, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:34:35 Iters: 555400/[13], loss: 3.1616, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:36:38 Iters: 555500/[13], loss: 3.0310, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:38:41 Iters: 555600/[13], loss: 2.4361, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:40:44 Iters: 555700/[13], loss: 3.0259, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:42:47 Iters: 555800/[13], loss: 2.6239, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:44:50 Iters: 555900/[13], loss: 3.1680, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:46:53 Iters: 556000/[13], loss: 3.0153, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:48:56 Iters: 556100/[13], loss: 3.1480, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:50:58 Iters: 556200/[13], loss: 3.9841, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:53:01 Iters: 556300/[13], loss: 3.7178, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:55:04 Iters: 556400/[13], loss: 3.2490, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:57:07 Iters: 556500/[13], loss: 3.7554, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-18:59:10 Iters: 556600/[13], loss: 2.4589, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:01:13 Iters: 556700/[13], loss: 3.2803, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:03:16 Iters: 556800/[13], loss: 3.0028, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:05:19 Iters: 556900/[13], loss: 3.1763, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:07:22 Iters: 557000/[13], loss: 2.9443, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:09:25 Iters: 557100/[13], loss: 2.8866, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:11:28 Iters: 557200/[13], loss: 3.3582, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:13:31 Iters: 557300/[13], loss: 3.4129, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:15:33 Iters: 557400/[13], loss: 3.0224, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:17:36 Iters: 557500/[13], loss: 3.4068, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:19:39 Iters: 557600/[13], loss: 2.8342, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:21:42 Iters: 557700/[13], loss: 3.4718, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:23:45 Iters: 557800/[13], loss: 3.0665, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:25:48 Iters: 557900/[13], loss: 3.1682, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:27:51 Iters: 558000/[13], loss: 3.0202, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:29:54 Iters: 558100/[13], loss: 3.7339, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:31:57 Iters: 558200/[13], loss: 3.6646, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:34:00 Iters: 558300/[13], loss: 3.4664, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:36:03 Iters: 558400/[13], loss: 2.9276, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:38:06 Iters: 558500/[13], loss: 2.7800, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:40:09 Iters: 558600/[13], loss: 3.1776, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:42:11 Iters: 558700/[13], loss: 2.9136, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:44:14 Iters: 558800/[13], loss: 3.1490, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:46:17 Iters: 558900/[13], loss: 3.5306, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:48:20 Iters: 559000/[13], loss: 2.8379, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:50:24 Iters: 559100/[13], loss: 3.1952, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:52:27 Iters: 559200/[13], loss: 2.9256, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:54:30 Iters: 559300/[13], loss: 2.3865, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:56:33 Iters: 559400/[13], loss: 2.9232, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-19:58:36 Iters: 559500/[13], loss: 3.3960, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:00:39 Iters: 559600/[13], loss: 3.0069, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:02:43 Iters: 559700/[13], loss: 2.9036, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:04:46 Iters: 559800/[13], loss: 3.6985, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:06:49 Iters: 559900/[13], loss: 2.6513, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:08:53 Iters: 560000/[13], loss: 2.8748, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:08:53 Saving checkpoint: 560000 -20220708-20:10:10 LFW Ave Accuracy: 99.6166 -20220708-20:11:26 AgeDB-30 Ave Accuracy: 97.0833 -20220708-20:12:52 CFP-FP Ave Accuracy: 95.0714 -20220708-20:12:52 Current Best Accuracy: LFW: 99.6333 in iters: 540000, AgeDB-30: 97.0833 in iters: 560000 and CFP-FP: 95.0714 in iters: 560000 -20220708-20:14:55 Iters: 560100/[13], loss: 3.2549, train_accuracy: 0.5938, time: 3.62 s/iter, learning rate: 0.0005000000000000001 -20220708-20:16:58 Iters: 560200/[13], loss: 3.8192, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:19:01 Iters: 560300/[13], loss: 3.5325, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:21:05 Iters: 560400/[13], loss: 2.5392, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:23:08 Iters: 560500/[13], loss: 3.0924, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:25:11 Iters: 560600/[13], loss: 2.9350, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:27:14 Iters: 560700/[13], loss: 3.2834, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:29:18 Iters: 560800/[13], loss: 3.3322, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:31:21 Iters: 560900/[13], loss: 2.9139, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:33:24 Iters: 561000/[13], loss: 2.6975, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:35:27 Iters: 561100/[13], loss: 3.1987, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:37:31 Iters: 561200/[13], loss: 3.2886, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:39:34 Iters: 561300/[13], loss: 2.8331, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:41:37 Iters: 561400/[13], loss: 3.4247, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:43:40 Iters: 561500/[13], loss: 3.0167, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:45:43 Iters: 561600/[13], loss: 3.7289, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:47:47 Iters: 561700/[13], loss: 3.1421, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:49:49 Iters: 561800/[13], loss: 3.2955, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:51:52 Iters: 561900/[13], loss: 3.0522, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:53:55 Iters: 562000/[13], loss: 3.2255, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:55:58 Iters: 562100/[13], loss: 3.1313, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-20:58:01 Iters: 562200/[13], loss: 3.1751, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:00:04 Iters: 562300/[13], loss: 3.1439, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:02:07 Iters: 562400/[13], loss: 3.3064, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:04:10 Iters: 562500/[13], loss: 2.6000, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:06:13 Iters: 562600/[13], loss: 3.4032, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:08:16 Iters: 562700/[13], loss: 3.4082, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:10:19 Iters: 562800/[13], loss: 3.5027, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:12:21 Iters: 562900/[13], loss: 3.2367, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:14:24 Iters: 563000/[13], loss: 2.8757, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:16:27 Iters: 563100/[13], loss: 2.8103, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:18:30 Iters: 563200/[13], loss: 2.9140, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:20:33 Iters: 563300/[13], loss: 3.4990, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:22:36 Iters: 563400/[13], loss: 3.4945, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:24:38 Iters: 563500/[13], loss: 3.6282, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:26:41 Iters: 563600/[13], loss: 3.6896, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:28:44 Iters: 563700/[13], loss: 2.6557, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:30:47 Iters: 563800/[13], loss: 3.6816, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:32:50 Iters: 563900/[13], loss: 2.9585, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:34:53 Iters: 564000/[13], loss: 3.5686, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:36:56 Iters: 564100/[13], loss: 3.0687, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:38:59 Iters: 564200/[13], loss: 3.6105, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:41:01 Iters: 564300/[13], loss: 3.6227, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:43:04 Iters: 564400/[13], loss: 2.7303, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:45:07 Iters: 564500/[13], loss: 2.7806, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:47:10 Iters: 564600/[13], loss: 3.4537, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:49:13 Iters: 564700/[13], loss: 3.2247, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:51:16 Iters: 564800/[13], loss: 2.9144, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:53:18 Iters: 564900/[13], loss: 3.7259, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:55:21 Iters: 565000/[13], loss: 3.2804, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:57:24 Iters: 565100/[13], loss: 3.6476, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-21:59:27 Iters: 565200/[13], loss: 3.1920, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:01:30 Iters: 565300/[13], loss: 2.9821, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:03:32 Iters: 565400/[13], loss: 2.9352, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:05:35 Iters: 565500/[13], loss: 3.4072, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:07:38 Iters: 565600/[13], loss: 2.9444, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:09:41 Iters: 565700/[13], loss: 3.2127, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:11:44 Iters: 565800/[13], loss: 3.1635, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:13:47 Iters: 565900/[13], loss: 2.6370, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:15:50 Iters: 566000/[13], loss: 2.7864, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:17:53 Iters: 566100/[13], loss: 3.1616, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:19:55 Iters: 566200/[13], loss: 3.3234, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:21:58 Iters: 566300/[13], loss: 2.7501, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:24:01 Iters: 566400/[13], loss: 2.8169, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:26:04 Iters: 566500/[13], loss: 3.2601, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:28:07 Iters: 566600/[13], loss: 3.0439, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:30:10 Iters: 566700/[13], loss: 3.5201, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:32:13 Iters: 566800/[13], loss: 3.3527, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:34:16 Iters: 566900/[13], loss: 2.6771, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:36:19 Iters: 567000/[13], loss: 3.3724, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:38:22 Iters: 567100/[13], loss: 2.6643, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:40:25 Iters: 567200/[13], loss: 3.0966, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:42:28 Iters: 567300/[13], loss: 3.7286, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:44:31 Iters: 567400/[13], loss: 3.8216, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:46:34 Iters: 567500/[13], loss: 3.4829, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:48:37 Iters: 567600/[13], loss: 3.6835, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:50:39 Iters: 567700/[13], loss: 2.9696, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:52:42 Iters: 567800/[13], loss: 3.2053, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:54:45 Iters: 567900/[13], loss: 2.9939, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:56:48 Iters: 568000/[13], loss: 3.1027, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-22:58:51 Iters: 568100/[13], loss: 3.2892, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:00:54 Iters: 568200/[13], loss: 3.0518, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:02:57 Iters: 568300/[13], loss: 3.4673, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:05:00 Iters: 568400/[13], loss: 2.3880, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:07:03 Iters: 568500/[13], loss: 3.2725, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:09:06 Iters: 568600/[13], loss: 2.7605, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:11:09 Iters: 568700/[13], loss: 3.9596, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:13:12 Iters: 568800/[13], loss: 2.3851, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:15:15 Iters: 568900/[13], loss: 3.2666, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:17:18 Iters: 569000/[13], loss: 2.7029, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:19:21 Iters: 569100/[13], loss: 2.6695, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:21:24 Iters: 569200/[13], loss: 3.5665, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:23:28 Iters: 569300/[13], loss: 3.5128, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:25:31 Iters: 569400/[13], loss: 3.3669, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:27:34 Iters: 569500/[13], loss: 3.1152, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:29:37 Iters: 569600/[13], loss: 3.0437, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:31:40 Iters: 569700/[13], loss: 3.5774, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:33:43 Iters: 569800/[13], loss: 2.7550, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:35:45 Iters: 569900/[13], loss: 2.9212, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:37:48 Iters: 570000/[13], loss: 3.8033, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:37:48 Saving checkpoint: 570000 -20220708-23:39:07 LFW Ave Accuracy: 99.6333 -20220708-23:40:25 AgeDB-30 Ave Accuracy: 97.2167 -20220708-23:41:58 CFP-FP Ave Accuracy: 94.9571 -20220708-23:41:58 Current Best Accuracy: LFW: 99.6333 in iters: 570000, AgeDB-30: 97.2167 in iters: 570000 and CFP-FP: 95.0714 in iters: 560000 -20220708-23:44:00 Iters: 570100/[13], loss: 2.9519, train_accuracy: 0.6016, time: 3.72 s/iter, learning rate: 0.0005000000000000001 -20220708-23:46:03 Iters: 570200/[13], loss: 3.3638, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:48:07 Iters: 570300/[13], loss: 3.1501, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:50:10 Iters: 570400/[13], loss: 3.4138, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:52:13 Iters: 570500/[13], loss: 2.7183, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:54:16 Iters: 570600/[13], loss: 3.2666, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:56:19 Iters: 570700/[13], loss: 3.3274, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220708-23:58:22 Iters: 570800/[13], loss: 3.2458, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:00:25 Iters: 570900/[13], loss: 3.0952, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:02:28 Iters: 571000/[13], loss: 3.2384, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:04:31 Iters: 571100/[13], loss: 3.8934, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:06:34 Iters: 571200/[13], loss: 3.3160, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:08:37 Iters: 571300/[13], loss: 3.7116, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:10:40 Iters: 571400/[13], loss: 3.4504, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:12:44 Iters: 571500/[13], loss: 3.1922, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:14:47 Iters: 571600/[13], loss: 3.1776, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:16:50 Iters: 571700/[13], loss: 3.3238, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:18:52 Iters: 571800/[13], loss: 3.0753, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:20:55 Iters: 571900/[13], loss: 3.2041, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:22:58 Iters: 572000/[13], loss: 2.6278, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:25:01 Iters: 572100/[13], loss: 3.4377, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:27:04 Iters: 572200/[13], loss: 2.8231, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:29:07 Iters: 572300/[13], loss: 3.4708, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:31:10 Iters: 572400/[13], loss: 2.7130, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:33:13 Iters: 572500/[13], loss: 3.2694, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:35:16 Iters: 572600/[13], loss: 3.8669, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:37:19 Iters: 572700/[13], loss: 3.6833, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:39:22 Iters: 572800/[13], loss: 2.9422, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:41:25 Iters: 572900/[13], loss: 2.8729, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:43:28 Iters: 573000/[13], loss: 3.4753, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:45:31 Iters: 573100/[13], loss: 3.5200, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:47:34 Iters: 573200/[13], loss: 2.8721, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:49:37 Iters: 573300/[13], loss: 2.9355, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:51:40 Iters: 573400/[13], loss: 3.3783, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:53:43 Iters: 573500/[13], loss: 3.1785, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:55:46 Iters: 573600/[13], loss: 3.0761, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:57:49 Iters: 573700/[13], loss: 2.1440, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-00:59:52 Iters: 573800/[13], loss: 3.1350, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:01:56 Iters: 573900/[13], loss: 2.8469, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:03:59 Iters: 574000/[13], loss: 3.2407, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:06:02 Iters: 574100/[13], loss: 3.5923, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:08:05 Iters: 574200/[13], loss: 3.6763, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:10:08 Iters: 574300/[13], loss: 3.2765, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:12:11 Iters: 574400/[13], loss: 2.8856, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:14:15 Iters: 574500/[13], loss: 2.9957, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:16:18 Iters: 574600/[13], loss: 3.6594, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:18:21 Iters: 574700/[13], loss: 2.9432, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:20:25 Iters: 574800/[13], loss: 3.4627, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:22:28 Iters: 574900/[13], loss: 3.2074, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:24:31 Iters: 575000/[13], loss: 3.3283, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:26:34 Iters: 575100/[13], loss: 3.3600, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:28:38 Iters: 575200/[13], loss: 2.9741, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:30:41 Iters: 575300/[13], loss: 3.3044, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:32:44 Iters: 575400/[13], loss: 3.1200, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:34:47 Iters: 575500/[13], loss: 3.6809, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:36:50 Iters: 575600/[13], loss: 4.0742, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:38:53 Iters: 575700/[13], loss: 2.6990, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:40:56 Iters: 575800/[13], loss: 2.5884, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:43:00 Iters: 575900/[13], loss: 3.1097, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:45:03 Iters: 576000/[13], loss: 3.7320, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:47:06 Iters: 576100/[13], loss: 3.0668, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:49:09 Iters: 576200/[13], loss: 3.6037, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:51:12 Iters: 576300/[13], loss: 3.7275, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:53:15 Iters: 576400/[13], loss: 3.1265, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:55:18 Iters: 576500/[13], loss: 3.4105, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:57:21 Iters: 576600/[13], loss: 2.9322, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-01:59:24 Iters: 576700/[13], loss: 3.4912, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:01:27 Iters: 576800/[13], loss: 2.8959, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:03:31 Iters: 576900/[13], loss: 3.8201, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:05:34 Iters: 577000/[13], loss: 3.0344, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:07:37 Iters: 577100/[13], loss: 2.9543, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:09:40 Iters: 577200/[13], loss: 3.2921, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:11:43 Iters: 577300/[13], loss: 3.1844, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:13:46 Iters: 577400/[13], loss: 2.6670, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:15:49 Iters: 577500/[13], loss: 2.9340, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:17:52 Iters: 577600/[13], loss: 2.8591, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:19:55 Iters: 577700/[13], loss: 3.3134, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:21:58 Iters: 577800/[13], loss: 2.7842, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:24:01 Iters: 577900/[13], loss: 3.1002, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:26:04 Iters: 578000/[13], loss: 3.4108, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:28:07 Iters: 578100/[13], loss: 3.4555, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:30:10 Iters: 578200/[13], loss: 3.3020, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:32:13 Iters: 578300/[13], loss: 3.2162, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:34:16 Iters: 578400/[13], loss: 2.8263, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:36:19 Iters: 578500/[13], loss: 3.2963, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:38:22 Iters: 578600/[13], loss: 2.7881, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:40:26 Iters: 578700/[13], loss: 2.8065, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220709-02:42:29 Iters: 578800/[13], loss: 2.9750, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:44:32 Iters: 578900/[13], loss: 2.6062, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:46:35 Iters: 579000/[13], loss: 3.1690, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:48:38 Iters: 579100/[13], loss: 3.2425, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:50:41 Iters: 579200/[13], loss: 3.4346, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:52:44 Iters: 579300/[13], loss: 2.9069, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:54:47 Iters: 579400/[13], loss: 3.0003, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:56:50 Iters: 579500/[13], loss: 2.7132, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-02:58:52 Iters: 579600/[13], loss: 2.9121, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:00:55 Iters: 579700/[13], loss: 3.2476, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:02:58 Iters: 579800/[13], loss: 2.4134, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:05:01 Iters: 579900/[13], loss: 3.3700, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:07:04 Iters: 580000/[13], loss: 3.1507, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:07:04 Saving checkpoint: 580000 -20220709-03:08:21 LFW Ave Accuracy: 99.6166 -20220709-03:09:36 AgeDB-30 Ave Accuracy: 97.2000 -20220709-03:11:03 CFP-FP Ave Accuracy: 95.0571 -20220709-03:11:03 Current Best Accuracy: LFW: 99.6333 in iters: 570000, AgeDB-30: 97.2167 in iters: 570000 and CFP-FP: 95.0714 in iters: 560000 -20220709-03:13:06 Iters: 580100/[13], loss: 3.8088, train_accuracy: 0.5703, time: 3.61 s/iter, learning rate: 0.0005000000000000001 -20220709-03:15:09 Iters: 580200/[13], loss: 3.5336, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:17:11 Iters: 580300/[13], loss: 3.1797, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:19:14 Iters: 580400/[13], loss: 3.4489, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:21:18 Iters: 580500/[13], loss: 3.2848, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:23:21 Iters: 580600/[13], loss: 3.3819, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:25:24 Iters: 580700/[13], loss: 2.5523, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:27:27 Iters: 580800/[13], loss: 2.9747, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:29:30 Iters: 580900/[13], loss: 2.4942, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:31:33 Iters: 581000/[13], loss: 3.7447, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:33:36 Iters: 581100/[13], loss: 3.3319, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:35:39 Iters: 581200/[13], loss: 4.0387, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:37:42 Iters: 581300/[13], loss: 3.0692, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:39:45 Iters: 581400/[13], loss: 3.3809, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:41:48 Iters: 581500/[13], loss: 3.1140, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:43:51 Iters: 581600/[13], loss: 2.8097, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:45:54 Iters: 581700/[13], loss: 3.2941, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:47:57 Iters: 581800/[13], loss: 3.1005, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:50:00 Iters: 581900/[13], loss: 3.6680, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:52:03 Iters: 582000/[13], loss: 2.9866, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:54:07 Iters: 582100/[13], loss: 3.1110, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:56:10 Iters: 582200/[13], loss: 2.9174, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-03:58:13 Iters: 582300/[13], loss: 3.5897, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:00:16 Iters: 582400/[13], loss: 3.5313, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:02:19 Iters: 582500/[13], loss: 3.3953, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:04:22 Iters: 582600/[13], loss: 3.3450, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:06:25 Iters: 582700/[13], loss: 2.8890, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:08:28 Iters: 582800/[13], loss: 3.0490, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:10:31 Iters: 582900/[13], loss: 3.1339, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:12:34 Iters: 583000/[13], loss: 3.0763, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:14:37 Iters: 583100/[13], loss: 3.2021, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:16:40 Iters: 583200/[13], loss: 3.0799, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:18:43 Iters: 583300/[13], loss: 3.4180, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:20:46 Iters: 583400/[13], loss: 3.1979, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:22:49 Iters: 583500/[13], loss: 3.4750, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:24:52 Iters: 583600/[13], loss: 2.9536, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:26:55 Iters: 583700/[13], loss: 3.3007, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:28:58 Iters: 583800/[13], loss: 3.4305, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:31:01 Iters: 583900/[13], loss: 3.3457, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:33:05 Iters: 584000/[13], loss: 3.6745, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:35:08 Iters: 584100/[13], loss: 3.0659, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:37:11 Iters: 584200/[13], loss: 3.3546, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:39:14 Iters: 584300/[13], loss: 3.5635, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:41:17 Iters: 584400/[13], loss: 3.2861, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:43:20 Iters: 584500/[13], loss: 3.3244, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:45:23 Iters: 584600/[13], loss: 2.4938, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:47:26 Iters: 584700/[13], loss: 2.9484, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:49:29 Iters: 584800/[13], loss: 3.1801, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:51:32 Iters: 584900/[13], loss: 2.8890, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:53:35 Iters: 585000/[13], loss: 2.9988, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:55:38 Iters: 585100/[13], loss: 3.7518, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:57:42 Iters: 585200/[13], loss: 3.4473, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-04:59:45 Iters: 585300/[13], loss: 2.8996, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:01:48 Iters: 585400/[13], loss: 3.5145, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:03:51 Iters: 585500/[13], loss: 3.4692, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:05:54 Iters: 585600/[13], loss: 2.6847, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:07:57 Iters: 585700/[13], loss: 3.0420, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:09:59 Iters: 585800/[13], loss: 3.1688, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:12:02 Iters: 585900/[13], loss: 3.2274, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:14:05 Iters: 586000/[13], loss: 3.2901, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:16:08 Iters: 586100/[13], loss: 2.9996, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:18:11 Iters: 586200/[13], loss: 3.3275, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:20:14 Iters: 586300/[13], loss: 2.8019, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:22:17 Iters: 586400/[13], loss: 3.5593, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:24:20 Iters: 586500/[13], loss: 3.0662, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:26:23 Iters: 586600/[13], loss: 3.2284, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:28:26 Iters: 586700/[13], loss: 2.7706, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:30:29 Iters: 586800/[13], loss: 2.6295, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:32:32 Iters: 586900/[13], loss: 3.4240, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:34:35 Iters: 587000/[13], loss: 3.3017, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:36:38 Iters: 587100/[13], loss: 3.2638, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:38:41 Iters: 587200/[13], loss: 2.9017, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:40:43 Iters: 587300/[13], loss: 3.7549, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:42:46 Iters: 587400/[13], loss: 3.2549, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:44:49 Iters: 587500/[13], loss: 3.2113, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:46:52 Iters: 587600/[13], loss: 2.5497, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:48:55 Iters: 587700/[13], loss: 3.1278, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:50:58 Iters: 587800/[13], loss: 2.8169, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:53:01 Iters: 587900/[13], loss: 3.1486, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:55:04 Iters: 588000/[13], loss: 3.1668, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:57:08 Iters: 588100/[13], loss: 3.4026, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-05:59:11 Iters: 588200/[13], loss: 3.1051, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:01:14 Iters: 588300/[13], loss: 3.5573, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:03:18 Iters: 588400/[13], loss: 3.0497, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:05:21 Iters: 588500/[13], loss: 2.9619, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:07:24 Iters: 588600/[13], loss: 3.2176, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:09:27 Iters: 588700/[13], loss: 3.0630, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:11:30 Iters: 588800/[13], loss: 3.0236, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:13:33 Iters: 588900/[13], loss: 3.2936, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:15:36 Iters: 589000/[13], loss: 3.5114, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:17:39 Iters: 589100/[13], loss: 2.9218, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:19:42 Iters: 589200/[13], loss: 2.7707, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:21:45 Iters: 589300/[13], loss: 3.8978, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:23:48 Iters: 589400/[13], loss: 3.4250, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:25:51 Iters: 589500/[13], loss: 3.1138, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:27:54 Iters: 589600/[13], loss: 3.2626, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:29:58 Iters: 589700/[13], loss: 3.4273, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:32:01 Iters: 589800/[13], loss: 3.6746, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:34:04 Iters: 589900/[13], loss: 2.6497, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:36:07 Iters: 590000/[13], loss: 2.9710, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:36:07 Saving checkpoint: 590000 -20220709-06:37:26 LFW Ave Accuracy: 99.6500 -20220709-06:38:45 AgeDB-30 Ave Accuracy: 97.1667 -20220709-06:40:17 CFP-FP Ave Accuracy: 94.8714 -20220709-06:40:17 Current Best Accuracy: LFW: 99.6500 in iters: 590000, AgeDB-30: 97.2167 in iters: 570000 and CFP-FP: 95.0714 in iters: 560000 -20220709-06:42:20 Iters: 590100/[13], loss: 3.7771, train_accuracy: 0.5312, time: 3.73 s/iter, learning rate: 0.0005000000000000001 -20220709-06:44:23 Iters: 590200/[13], loss: 3.3010, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:46:27 Iters: 590300/[13], loss: 3.1683, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:48:30 Iters: 590400/[13], loss: 3.2930, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:50:33 Iters: 590500/[13], loss: 3.1827, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:52:36 Iters: 590600/[13], loss: 3.3761, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:54:39 Iters: 590700/[13], loss: 2.4834, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:56:42 Iters: 590800/[13], loss: 3.7662, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-06:58:45 Iters: 590900/[13], loss: 3.1639, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:00:49 Iters: 591000/[13], loss: 3.8619, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:02:52 Iters: 591100/[13], loss: 3.0734, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:04:55 Iters: 591200/[13], loss: 3.1408, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:06:58 Iters: 591300/[13], loss: 2.8131, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:08:24 Train Epoch: 14/18 ... -20220709-07:09:01 Iters: 591400/[14], loss: 2.6771, train_accuracy: 0.6328, time: 0.38 s/iter, learning rate: 0.0005000000000000001 -20220709-07:11:05 Iters: 591500/[14], loss: 2.8399, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:13:08 Iters: 591600/[14], loss: 3.6744, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:15:11 Iters: 591700/[14], loss: 3.2697, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:17:14 Iters: 591800/[14], loss: 3.5016, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:19:18 Iters: 591900/[14], loss: 3.1012, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:21:21 Iters: 592000/[14], loss: 2.9844, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:23:24 Iters: 592100/[14], loss: 3.2107, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:25:27 Iters: 592200/[14], loss: 3.3858, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:27:31 Iters: 592300/[14], loss: 2.8368, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:29:34 Iters: 592400/[14], loss: 3.1326, train_accuracy: 0.5391, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220709-07:31:37 Iters: 592500/[14], loss: 2.7969, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:33:41 Iters: 592600/[14], loss: 3.1048, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:35:44 Iters: 592700/[14], loss: 2.9726, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:37:47 Iters: 592800/[14], loss: 3.6476, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:39:50 Iters: 592900/[14], loss: 3.5173, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:41:53 Iters: 593000/[14], loss: 2.8491, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:43:56 Iters: 593100/[14], loss: 2.7434, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:45:59 Iters: 593200/[14], loss: 2.9499, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:48:02 Iters: 593300/[14], loss: 2.1376, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:50:06 Iters: 593400/[14], loss: 2.9579, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:52:09 Iters: 593500/[14], loss: 3.2884, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:54:12 Iters: 593600/[14], loss: 3.1248, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:56:15 Iters: 593700/[14], loss: 2.7254, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-07:58:18 Iters: 593800/[14], loss: 3.3039, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:00:21 Iters: 593900/[14], loss: 2.9137, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:02:24 Iters: 594000/[14], loss: 3.8798, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:04:27 Iters: 594100/[14], loss: 2.8208, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:06:30 Iters: 594200/[14], loss: 3.3653, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:08:33 Iters: 594300/[14], loss: 2.7797, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:10:36 Iters: 594400/[14], loss: 2.8489, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:12:39 Iters: 594500/[14], loss: 3.0824, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:14:42 Iters: 594600/[14], loss: 3.6395, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:16:45 Iters: 594700/[14], loss: 3.4120, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:18:49 Iters: 594800/[14], loss: 3.1523, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:20:52 Iters: 594900/[14], loss: 2.8651, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:22:55 Iters: 595000/[14], loss: 2.9520, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:24:58 Iters: 595100/[14], loss: 3.2539, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:27:01 Iters: 595200/[14], loss: 2.4288, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:29:04 Iters: 595300/[14], loss: 3.1235, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:31:07 Iters: 595400/[14], loss: 2.9779, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:33:11 Iters: 595500/[14], loss: 2.7932, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:35:14 Iters: 595600/[14], loss: 3.0092, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:37:17 Iters: 595700/[14], loss: 2.8079, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:39:20 Iters: 595800/[14], loss: 2.9847, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:41:23 Iters: 595900/[14], loss: 2.9674, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:43:26 Iters: 596000/[14], loss: 3.2368, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:45:29 Iters: 596100/[14], loss: 2.9130, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:47:32 Iters: 596200/[14], loss: 3.6132, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:49:35 Iters: 596300/[14], loss: 3.2148, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:51:38 Iters: 596400/[14], loss: 2.8245, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:53:41 Iters: 596500/[14], loss: 3.4488, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:55:44 Iters: 596600/[14], loss: 3.0894, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:57:47 Iters: 596700/[14], loss: 3.6485, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-08:59:50 Iters: 596800/[14], loss: 2.6241, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:01:54 Iters: 596900/[14], loss: 3.2394, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:03:57 Iters: 597000/[14], loss: 2.5949, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:06:00 Iters: 597100/[14], loss: 3.0232, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:08:03 Iters: 597200/[14], loss: 2.9142, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:10:06 Iters: 597300/[14], loss: 2.7734, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:12:09 Iters: 597400/[14], loss: 3.3594, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:14:12 Iters: 597500/[14], loss: 3.2097, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:16:15 Iters: 597600/[14], loss: 2.7069, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:18:18 Iters: 597700/[14], loss: 3.2376, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:20:21 Iters: 597800/[14], loss: 2.9281, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:22:24 Iters: 597900/[14], loss: 3.2055, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:24:27 Iters: 598000/[14], loss: 2.4608, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:26:30 Iters: 598100/[14], loss: 2.9603, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:28:33 Iters: 598200/[14], loss: 2.9800, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:30:36 Iters: 598300/[14], loss: 2.4410, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:32:39 Iters: 598400/[14], loss: 3.1383, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:34:42 Iters: 598500/[14], loss: 3.5748, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:36:45 Iters: 598600/[14], loss: 3.2090, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:38:48 Iters: 598700/[14], loss: 3.0295, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:40:51 Iters: 598800/[14], loss: 3.4384, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:42:54 Iters: 598900/[14], loss: 2.9461, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:44:57 Iters: 599000/[14], loss: 3.4736, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:47:00 Iters: 599100/[14], loss: 3.2519, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:49:04 Iters: 599200/[14], loss: 3.2472, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:51:07 Iters: 599300/[14], loss: 3.3813, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:53:10 Iters: 599400/[14], loss: 3.3245, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:55:14 Iters: 599500/[14], loss: 2.9510, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:57:17 Iters: 599600/[14], loss: 3.0432, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-09:59:20 Iters: 599700/[14], loss: 3.0715, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:01:24 Iters: 599800/[14], loss: 3.5193, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:03:27 Iters: 599900/[14], loss: 3.3485, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:05:30 Iters: 600000/[14], loss: 3.0502, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:05:30 Saving checkpoint: 600000 -20220709-10:06:47 LFW Ave Accuracy: 99.6666 -20220709-10:08:02 AgeDB-30 Ave Accuracy: 97.2833 -20220709-10:09:29 CFP-FP Ave Accuracy: 95.1286 -20220709-10:09:29 Current Best Accuracy: LFW: 99.6666 in iters: 600000, AgeDB-30: 97.2833 in iters: 600000 and CFP-FP: 95.1286 in iters: 600000 -20220709-10:11:32 Iters: 600100/[14], loss: 3.4286, train_accuracy: 0.5703, time: 3.62 s/iter, learning rate: 0.0005000000000000001 -20220709-10:13:35 Iters: 600200/[14], loss: 2.5018, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:15:39 Iters: 600300/[14], loss: 3.0295, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:17:42 Iters: 600400/[14], loss: 3.5669, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:19:45 Iters: 600500/[14], loss: 3.1980, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:21:49 Iters: 600600/[14], loss: 3.4768, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:23:52 Iters: 600700/[14], loss: 3.5689, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:25:55 Iters: 600800/[14], loss: 2.4753, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:27:59 Iters: 600900/[14], loss: 3.2682, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:30:02 Iters: 601000/[14], loss: 3.4180, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:32:05 Iters: 601100/[14], loss: 2.7064, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:34:08 Iters: 601200/[14], loss: 3.1104, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:36:12 Iters: 601300/[14], loss: 2.6096, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:38:15 Iters: 601400/[14], loss: 2.9503, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:40:18 Iters: 601500/[14], loss: 3.6450, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:42:22 Iters: 601600/[14], loss: 3.4333, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:44:25 Iters: 601700/[14], loss: 3.5753, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:46:28 Iters: 601800/[14], loss: 3.1425, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:48:32 Iters: 601900/[14], loss: 3.3012, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:50:35 Iters: 602000/[14], loss: 3.3202, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:52:38 Iters: 602100/[14], loss: 2.9457, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:54:42 Iters: 602200/[14], loss: 3.1182, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:56:45 Iters: 602300/[14], loss: 2.6721, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-10:58:49 Iters: 602400/[14], loss: 2.8463, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:00:52 Iters: 602500/[14], loss: 3.1204, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:02:55 Iters: 602600/[14], loss: 2.8839, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:04:59 Iters: 602700/[14], loss: 2.6780, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:07:02 Iters: 602800/[14], loss: 2.9013, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:09:05 Iters: 602900/[14], loss: 2.6891, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:11:09 Iters: 603000/[14], loss: 2.6576, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:13:12 Iters: 603100/[14], loss: 2.7662, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:15:15 Iters: 603200/[14], loss: 3.0377, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:17:19 Iters: 603300/[14], loss: 2.6150, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:19:22 Iters: 603400/[14], loss: 2.8605, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:21:25 Iters: 603500/[14], loss: 2.7132, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:23:29 Iters: 603600/[14], loss: 2.8352, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220709-11:25:32 Iters: 603700/[14], loss: 3.1467, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:27:36 Iters: 603800/[14], loss: 3.2065, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:29:39 Iters: 603900/[14], loss: 3.1367, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:31:42 Iters: 604000/[14], loss: 2.8090, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:33:46 Iters: 604100/[14], loss: 3.3177, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:35:49 Iters: 604200/[14], loss: 3.2600, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:37:52 Iters: 604300/[14], loss: 2.4912, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:39:55 Iters: 604400/[14], loss: 2.7985, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:41:59 Iters: 604500/[14], loss: 2.8401, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:44:02 Iters: 604600/[14], loss: 3.1397, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:46:05 Iters: 604700/[14], loss: 3.0959, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:48:09 Iters: 604800/[14], loss: 2.8924, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:50:12 Iters: 604900/[14], loss: 3.3947, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:52:15 Iters: 605000/[14], loss: 3.1544, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:54:19 Iters: 605100/[14], loss: 3.0536, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:56:22 Iters: 605200/[14], loss: 2.7900, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-11:58:25 Iters: 605300/[14], loss: 2.9996, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:00:29 Iters: 605400/[14], loss: 2.6793, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:02:32 Iters: 605500/[14], loss: 3.1633, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:04:35 Iters: 605600/[14], loss: 3.3813, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:06:39 Iters: 605700/[14], loss: 3.1408, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:08:42 Iters: 605800/[14], loss: 2.8813, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:10:45 Iters: 605900/[14], loss: 2.5866, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:12:49 Iters: 606000/[14], loss: 3.5322, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:14:52 Iters: 606100/[14], loss: 2.7192, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:16:55 Iters: 606200/[14], loss: 3.0285, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:18:59 Iters: 606300/[14], loss: 3.5989, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:21:02 Iters: 606400/[14], loss: 3.3536, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:23:05 Iters: 606500/[14], loss: 3.0833, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220709-12:25:09 Iters: 606600/[14], loss: 2.9272, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:27:12 Iters: 606700/[14], loss: 2.7626, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:29:15 Iters: 606800/[14], loss: 2.9860, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:31:19 Iters: 606900/[14], loss: 3.4734, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:33:22 Iters: 607000/[14], loss: 2.6543, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:35:26 Iters: 607100/[14], loss: 3.2918, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:37:29 Iters: 607200/[14], loss: 2.5361, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:39:32 Iters: 607300/[14], loss: 2.9315, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:41:36 Iters: 607400/[14], loss: 3.5520, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:43:39 Iters: 607500/[14], loss: 3.2783, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:45:42 Iters: 607600/[14], loss: 4.0574, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:47:46 Iters: 607700/[14], loss: 2.8459, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:49:49 Iters: 607800/[14], loss: 3.2417, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:51:52 Iters: 607900/[14], loss: 3.3291, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:53:55 Iters: 608000/[14], loss: 2.9709, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:55:59 Iters: 608100/[14], loss: 2.8459, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-12:58:02 Iters: 608200/[14], loss: 2.8771, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:00:05 Iters: 608300/[14], loss: 3.1411, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:02:09 Iters: 608400/[14], loss: 2.7864, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:04:12 Iters: 608500/[14], loss: 3.0132, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:06:15 Iters: 608600/[14], loss: 2.5683, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:08:19 Iters: 608700/[14], loss: 2.8128, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:10:22 Iters: 608800/[14], loss: 3.1193, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:12:25 Iters: 608900/[14], loss: 2.9749, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:14:29 Iters: 609000/[14], loss: 3.7638, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:16:32 Iters: 609100/[14], loss: 3.0866, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:18:35 Iters: 609200/[14], loss: 2.9505, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:20:39 Iters: 609300/[14], loss: 3.1841, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:22:42 Iters: 609400/[14], loss: 3.4253, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:24:45 Iters: 609500/[14], loss: 2.9002, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:26:48 Iters: 609600/[14], loss: 2.6845, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:28:52 Iters: 609700/[14], loss: 2.7861, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:30:55 Iters: 609800/[14], loss: 3.4582, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:32:59 Iters: 609900/[14], loss: 3.4207, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:35:02 Iters: 610000/[14], loss: 3.5836, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:35:02 Saving checkpoint: 610000 -20220709-13:36:21 LFW Ave Accuracy: 99.6500 -20220709-13:37:37 AgeDB-30 Ave Accuracy: 97.2500 -20220709-13:39:05 CFP-FP Ave Accuracy: 95.0571 -20220709-13:39:05 Current Best Accuracy: LFW: 99.6666 in iters: 600000, AgeDB-30: 97.2833 in iters: 600000 and CFP-FP: 95.1286 in iters: 600000 -20220709-13:41:08 Iters: 610100/[14], loss: 3.2902, train_accuracy: 0.5859, time: 3.66 s/iter, learning rate: 0.0005000000000000001 -20220709-13:43:11 Iters: 610200/[14], loss: 2.8781, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:45:15 Iters: 610300/[14], loss: 3.1686, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:47:18 Iters: 610400/[14], loss: 2.6364, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:49:21 Iters: 610500/[14], loss: 2.6420, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:51:25 Iters: 610600/[14], loss: 2.8623, train_accuracy: 0.5391, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220709-13:53:28 Iters: 610700/[14], loss: 3.1295, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:55:32 Iters: 610800/[14], loss: 3.1396, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:57:35 Iters: 610900/[14], loss: 2.9753, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-13:59:38 Iters: 611000/[14], loss: 2.8950, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:01:41 Iters: 611100/[14], loss: 2.6832, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:03:44 Iters: 611200/[14], loss: 3.3263, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:05:48 Iters: 611300/[14], loss: 2.9206, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:07:51 Iters: 611400/[14], loss: 3.0832, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:09:54 Iters: 611500/[14], loss: 2.4560, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:11:57 Iters: 611600/[14], loss: 3.5505, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:14:00 Iters: 611700/[14], loss: 2.9564, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:16:03 Iters: 611800/[14], loss: 3.0118, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:18:06 Iters: 611900/[14], loss: 3.3578, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:20:08 Iters: 612000/[14], loss: 3.0957, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:22:11 Iters: 612100/[14], loss: 3.4378, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:24:14 Iters: 612200/[14], loss: 2.9082, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:26:17 Iters: 612300/[14], loss: 3.1910, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:28:20 Iters: 612400/[14], loss: 2.5100, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:30:23 Iters: 612500/[14], loss: 2.7507, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:32:26 Iters: 612600/[14], loss: 2.6722, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:34:28 Iters: 612700/[14], loss: 3.6106, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:36:31 Iters: 612800/[14], loss: 3.3498, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:38:34 Iters: 612900/[14], loss: 3.0842, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:40:37 Iters: 613000/[14], loss: 3.5989, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:42:40 Iters: 613100/[14], loss: 2.9438, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:44:43 Iters: 613200/[14], loss: 3.2017, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:46:46 Iters: 613300/[14], loss: 2.7175, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:48:49 Iters: 613400/[14], loss: 2.7700, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:50:51 Iters: 613500/[14], loss: 4.0367, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:52:54 Iters: 613600/[14], loss: 2.7030, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:54:57 Iters: 613700/[14], loss: 3.0543, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:57:00 Iters: 613800/[14], loss: 3.1826, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-14:59:03 Iters: 613900/[14], loss: 3.4679, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:01:06 Iters: 614000/[14], loss: 3.2217, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:03:09 Iters: 614100/[14], loss: 3.2831, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:05:12 Iters: 614200/[14], loss: 2.6711, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:07:14 Iters: 614300/[14], loss: 2.9125, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:09:17 Iters: 614400/[14], loss: 2.9492, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:11:20 Iters: 614500/[14], loss: 3.3908, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:13:23 Iters: 614600/[14], loss: 3.2771, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:15:26 Iters: 614700/[14], loss: 2.9109, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:17:29 Iters: 614800/[14], loss: 3.5405, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:19:32 Iters: 614900/[14], loss: 2.8019, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:21:35 Iters: 615000/[14], loss: 2.7127, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:23:38 Iters: 615100/[14], loss: 3.2471, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:25:41 Iters: 615200/[14], loss: 3.7511, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:27:43 Iters: 615300/[14], loss: 2.8175, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:29:46 Iters: 615400/[14], loss: 3.7329, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:31:49 Iters: 615500/[14], loss: 3.3674, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:33:52 Iters: 615600/[14], loss: 3.4473, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:35:54 Iters: 615700/[14], loss: 2.5858, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:37:57 Iters: 615800/[14], loss: 3.3044, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:40:00 Iters: 615900/[14], loss: 2.7334, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:42:03 Iters: 616000/[14], loss: 3.4520, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:44:05 Iters: 616100/[14], loss: 3.3198, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:46:08 Iters: 616200/[14], loss: 3.0871, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:48:11 Iters: 616300/[14], loss: 3.1834, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:50:14 Iters: 616400/[14], loss: 2.8872, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:52:17 Iters: 616500/[14], loss: 3.3014, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:54:19 Iters: 616600/[14], loss: 4.0065, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:56:22 Iters: 616700/[14], loss: 2.8298, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-15:58:25 Iters: 616800/[14], loss: 3.4891, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:00:28 Iters: 616900/[14], loss: 2.6257, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:02:31 Iters: 617000/[14], loss: 3.9859, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:04:33 Iters: 617100/[14], loss: 2.7410, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:06:36 Iters: 617200/[14], loss: 3.3278, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:08:39 Iters: 617300/[14], loss: 2.8714, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:10:42 Iters: 617400/[14], loss: 3.8549, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:12:45 Iters: 617500/[14], loss: 2.8546, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:14:48 Iters: 617600/[14], loss: 3.0866, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:16:51 Iters: 617700/[14], loss: 2.7664, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:18:53 Iters: 617800/[14], loss: 2.9929, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:20:56 Iters: 617900/[14], loss: 2.6761, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:22:59 Iters: 618000/[14], loss: 3.1066, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:25:02 Iters: 618100/[14], loss: 3.2617, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:27:05 Iters: 618200/[14], loss: 2.8930, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:29:08 Iters: 618300/[14], loss: 2.6065, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:31:10 Iters: 618400/[14], loss: 2.5837, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:33:13 Iters: 618500/[14], loss: 2.8873, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:35:16 Iters: 618600/[14], loss: 2.8427, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:37:19 Iters: 618700/[14], loss: 2.7079, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:39:22 Iters: 618800/[14], loss: 3.4230, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:41:24 Iters: 618900/[14], loss: 3.2699, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:43:27 Iters: 619000/[14], loss: 2.9234, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:45:30 Iters: 619100/[14], loss: 3.0598, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:47:33 Iters: 619200/[14], loss: 2.8822, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:49:36 Iters: 619300/[14], loss: 3.1070, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:51:39 Iters: 619400/[14], loss: 2.6427, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:53:42 Iters: 619500/[14], loss: 3.0712, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:55:44 Iters: 619600/[14], loss: 3.3505, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:57:47 Iters: 619700/[14], loss: 3.3540, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-16:59:50 Iters: 619800/[14], loss: 2.8394, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:01:53 Iters: 619900/[14], loss: 2.2169, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:03:56 Iters: 620000/[14], loss: 3.0551, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:03:56 Saving checkpoint: 620000 -20220709-17:05:14 LFW Ave Accuracy: 99.6500 -20220709-17:06:31 AgeDB-30 Ave Accuracy: 97.2500 -20220709-17:08:01 CFP-FP Ave Accuracy: 94.8286 -20220709-17:08:01 Current Best Accuracy: LFW: 99.6666 in iters: 600000, AgeDB-30: 97.2833 in iters: 600000 and CFP-FP: 95.1286 in iters: 600000 -20220709-17:10:03 Iters: 620100/[14], loss: 3.6303, train_accuracy: 0.5000, time: 3.68 s/iter, learning rate: 0.0005000000000000001 -20220709-17:12:06 Iters: 620200/[14], loss: 3.1588, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:14:09 Iters: 620300/[14], loss: 2.7426, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:16:12 Iters: 620400/[14], loss: 3.5926, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:18:15 Iters: 620500/[14], loss: 3.6818, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:20:18 Iters: 620600/[14], loss: 3.4750, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:22:21 Iters: 620700/[14], loss: 2.9616, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:24:24 Iters: 620800/[14], loss: 3.8640, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:26:26 Iters: 620900/[14], loss: 3.2488, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:28:29 Iters: 621000/[14], loss: 3.0671, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:30:32 Iters: 621100/[14], loss: 3.2244, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:32:35 Iters: 621200/[14], loss: 3.0139, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:34:38 Iters: 621300/[14], loss: 2.2686, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:36:41 Iters: 621400/[14], loss: 3.6168, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:38:44 Iters: 621500/[14], loss: 2.9380, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:40:47 Iters: 621600/[14], loss: 2.9200, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:42:50 Iters: 621700/[14], loss: 3.4899, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:44:53 Iters: 621800/[14], loss: 2.7551, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:46:56 Iters: 621900/[14], loss: 3.0364, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:48:58 Iters: 622000/[14], loss: 3.2374, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:51:01 Iters: 622100/[14], loss: 3.0914, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:53:04 Iters: 622200/[14], loss: 3.4377, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:55:07 Iters: 622300/[14], loss: 2.9979, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:57:10 Iters: 622400/[14], loss: 3.8193, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-17:59:13 Iters: 622500/[14], loss: 3.3280, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:01:15 Iters: 622600/[14], loss: 3.2725, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:03:18 Iters: 622700/[14], loss: 2.8892, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:05:21 Iters: 622800/[14], loss: 2.5863, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:07:24 Iters: 622900/[14], loss: 2.8094, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:09:27 Iters: 623000/[14], loss: 3.3163, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:11:30 Iters: 623100/[14], loss: 3.5893, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:13:33 Iters: 623200/[14], loss: 3.0952, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:15:35 Iters: 623300/[14], loss: 3.1344, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:17:38 Iters: 623400/[14], loss: 3.3117, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:19:41 Iters: 623500/[14], loss: 3.7030, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:21:44 Iters: 623600/[14], loss: 2.7511, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:23:47 Iters: 623700/[14], loss: 3.1918, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:25:50 Iters: 623800/[14], loss: 2.9052, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:27:52 Iters: 623900/[14], loss: 3.2642, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:29:55 Iters: 624000/[14], loss: 3.3692, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:31:58 Iters: 624100/[14], loss: 3.2856, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:34:01 Iters: 624200/[14], loss: 2.9088, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:36:04 Iters: 624300/[14], loss: 2.9715, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:38:06 Iters: 624400/[14], loss: 3.6071, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:40:09 Iters: 624500/[14], loss: 2.9206, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:42:12 Iters: 624600/[14], loss: 2.5788, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:44:15 Iters: 624700/[14], loss: 3.3062, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:46:18 Iters: 624800/[14], loss: 2.9907, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:48:21 Iters: 624900/[14], loss: 3.5391, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:50:23 Iters: 625000/[14], loss: 3.5138, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:52:26 Iters: 625100/[14], loss: 2.9929, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:54:29 Iters: 625200/[14], loss: 3.1280, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:56:32 Iters: 625300/[14], loss: 3.5068, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-18:58:35 Iters: 625400/[14], loss: 3.1206, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:00:38 Iters: 625500/[14], loss: 3.3528, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:02:41 Iters: 625600/[14], loss: 2.7165, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:04:44 Iters: 625700/[14], loss: 2.9732, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:06:46 Iters: 625800/[14], loss: 2.7716, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:08:49 Iters: 625900/[14], loss: 2.9763, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:10:52 Iters: 626000/[14], loss: 3.9800, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:12:55 Iters: 626100/[14], loss: 3.1047, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:14:58 Iters: 626200/[14], loss: 3.1972, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:17:01 Iters: 626300/[14], loss: 2.5844, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:19:04 Iters: 626400/[14], loss: 3.0317, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:21:07 Iters: 626500/[14], loss: 2.6909, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:23:09 Iters: 626600/[14], loss: 3.7200, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:25:12 Iters: 626700/[14], loss: 2.9672, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:27:15 Iters: 626800/[14], loss: 3.8378, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:29:18 Iters: 626900/[14], loss: 3.4753, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:31:21 Iters: 627000/[14], loss: 3.1979, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:33:24 Iters: 627100/[14], loss: 2.6625, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:35:27 Iters: 627200/[14], loss: 2.7665, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:37:30 Iters: 627300/[14], loss: 2.9628, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:39:33 Iters: 627400/[14], loss: 3.3627, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:41:35 Iters: 627500/[14], loss: 3.2260, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:43:38 Iters: 627600/[14], loss: 2.9293, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:45:41 Iters: 627700/[14], loss: 2.7873, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:47:44 Iters: 627800/[14], loss: 3.5277, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:49:47 Iters: 627900/[14], loss: 2.8608, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:51:50 Iters: 628000/[14], loss: 3.4370, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:53:53 Iters: 628100/[14], loss: 2.9459, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:55:56 Iters: 628200/[14], loss: 3.2557, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-19:57:59 Iters: 628300/[14], loss: 2.9600, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:00:02 Iters: 628400/[14], loss: 3.1652, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:02:05 Iters: 628500/[14], loss: 3.1422, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:04:07 Iters: 628600/[14], loss: 3.0738, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:06:10 Iters: 628700/[14], loss: 3.2099, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:08:13 Iters: 628800/[14], loss: 3.5107, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:10:16 Iters: 628900/[14], loss: 2.7066, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:12:19 Iters: 629000/[14], loss: 2.6727, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:14:22 Iters: 629100/[14], loss: 2.3250, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:16:24 Iters: 629200/[14], loss: 3.3360, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:18:27 Iters: 629300/[14], loss: 2.7638, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:20:30 Iters: 629400/[14], loss: 3.9672, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:22:33 Iters: 629500/[14], loss: 2.6808, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:24:37 Iters: 629600/[14], loss: 3.1298, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:26:40 Iters: 629700/[14], loss: 3.5143, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:28:43 Iters: 629800/[14], loss: 2.9186, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:30:45 Iters: 629900/[14], loss: 3.3004, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:32:48 Iters: 630000/[14], loss: 2.8684, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:32:48 Saving checkpoint: 630000 -20220709-20:34:06 LFW Ave Accuracy: 99.5833 -20220709-20:35:23 AgeDB-30 Ave Accuracy: 97.3333 -20220709-20:36:53 CFP-FP Ave Accuracy: 95.2000 -20220709-20:36:53 Current Best Accuracy: LFW: 99.6666 in iters: 600000, AgeDB-30: 97.3333 in iters: 630000 and CFP-FP: 95.2000 in iters: 630000 -20220709-20:38:56 Iters: 630100/[14], loss: 3.1870, train_accuracy: 0.5625, time: 3.67 s/iter, learning rate: 0.0005000000000000001 -20220709-20:40:59 Iters: 630200/[14], loss: 2.9040, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:43:02 Iters: 630300/[14], loss: 3.3034, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:45:05 Iters: 630400/[14], loss: 3.8878, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:47:08 Iters: 630500/[14], loss: 3.0231, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:49:11 Iters: 630600/[14], loss: 3.4556, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:51:14 Iters: 630700/[14], loss: 2.9903, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:53:17 Iters: 630800/[14], loss: 3.6984, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:55:20 Iters: 630900/[14], loss: 3.3739, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:57:23 Iters: 631000/[14], loss: 3.1628, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-20:59:26 Iters: 631100/[14], loss: 3.4982, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:01:29 Iters: 631200/[14], loss: 2.6830, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:03:33 Iters: 631300/[14], loss: 3.5960, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:05:36 Iters: 631400/[14], loss: 3.0488, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:07:39 Iters: 631500/[14], loss: 3.3228, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:09:43 Iters: 631600/[14], loss: 3.5531, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:11:46 Iters: 631700/[14], loss: 2.9649, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:13:49 Iters: 631800/[14], loss: 3.3182, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:15:52 Iters: 631900/[14], loss: 2.8685, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:17:56 Iters: 632000/[14], loss: 2.5408, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220709-21:19:59 Iters: 632100/[14], loss: 3.2175, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:22:02 Iters: 632200/[14], loss: 3.1819, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:24:05 Iters: 632300/[14], loss: 2.9580, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:26:09 Iters: 632400/[14], loss: 3.3545, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:28:12 Iters: 632500/[14], loss: 3.2112, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:30:15 Iters: 632600/[14], loss: 3.3029, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:32:18 Iters: 632700/[14], loss: 2.6243, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:34:22 Iters: 632800/[14], loss: 3.3560, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:36:25 Iters: 632900/[14], loss: 3.2001, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:38:28 Iters: 633000/[14], loss: 3.5171, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:40:31 Iters: 633100/[14], loss: 2.8612, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:42:34 Iters: 633200/[14], loss: 3.4312, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:44:37 Iters: 633300/[14], loss: 2.9575, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:46:40 Iters: 633400/[14], loss: 2.8178, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:48:44 Iters: 633500/[14], loss: 3.4072, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:50:47 Iters: 633600/[14], loss: 3.1285, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:52:50 Iters: 633700/[14], loss: 3.3825, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:54:53 Iters: 633800/[14], loss: 3.3599, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:56:56 Iters: 633900/[14], loss: 3.2026, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-21:58:59 Iters: 634000/[14], loss: 2.7360, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:01:03 Iters: 634100/[14], loss: 3.7904, train_accuracy: 0.4844, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220709-22:03:06 Iters: 634200/[14], loss: 2.7909, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:05:09 Iters: 634300/[14], loss: 3.0340, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:07:13 Iters: 634400/[14], loss: 2.9133, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:09:16 Iters: 634500/[14], loss: 2.7528, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:11:19 Iters: 634600/[14], loss: 3.1957, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:13:22 Iters: 634700/[14], loss: 3.0990, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:15:25 Iters: 634800/[14], loss: 2.9897, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:17:28 Iters: 634900/[14], loss: 3.1399, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:19:32 Iters: 635000/[14], loss: 3.1947, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:21:35 Iters: 635100/[14], loss: 3.5715, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:23:38 Iters: 635200/[14], loss: 3.1614, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:25:41 Iters: 635300/[14], loss: 2.7525, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:27:45 Iters: 635400/[14], loss: 3.2669, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:29:48 Iters: 635500/[14], loss: 2.6636, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:31:51 Iters: 635600/[14], loss: 3.0415, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:33:54 Iters: 635700/[14], loss: 3.7882, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:35:58 Iters: 635800/[14], loss: 2.9856, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:38:01 Iters: 635900/[14], loss: 3.9160, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:40:04 Iters: 636000/[14], loss: 3.7077, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:42:07 Iters: 636100/[14], loss: 3.1813, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:44:10 Iters: 636200/[14], loss: 2.9054, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:46:14 Iters: 636300/[14], loss: 3.1857, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:48:17 Iters: 636400/[14], loss: 2.8826, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:50:20 Iters: 636500/[14], loss: 3.1044, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:52:24 Iters: 636600/[14], loss: 2.5784, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:54:27 Iters: 636700/[14], loss: 3.2868, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:56:30 Iters: 636800/[14], loss: 3.1654, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-22:57:43 Train Epoch: 15/18 ... -20220709-22:58:33 Iters: 636900/[15], loss: 2.0534, train_accuracy: 0.6406, time: 0.50 s/iter, learning rate: 0.0005000000000000001 -20220709-23:00:36 Iters: 637000/[15], loss: 3.2710, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:02:39 Iters: 637100/[15], loss: 2.9517, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:04:43 Iters: 637200/[15], loss: 3.2644, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:06:46 Iters: 637300/[15], loss: 2.4244, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:08:49 Iters: 637400/[15], loss: 3.4143, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:10:52 Iters: 637500/[15], loss: 3.7499, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:12:55 Iters: 637600/[15], loss: 2.5760, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:14:59 Iters: 637700/[15], loss: 2.8924, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:17:02 Iters: 637800/[15], loss: 2.9731, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:19:05 Iters: 637900/[15], loss: 2.4691, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:21:08 Iters: 638000/[15], loss: 3.3839, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:23:11 Iters: 638100/[15], loss: 4.1004, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:25:14 Iters: 638200/[15], loss: 3.2666, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:27:17 Iters: 638300/[15], loss: 2.7844, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:29:20 Iters: 638400/[15], loss: 3.0100, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:31:23 Iters: 638500/[15], loss: 2.1732, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:33:26 Iters: 638600/[15], loss: 2.9902, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:35:29 Iters: 638700/[15], loss: 3.9692, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:37:32 Iters: 638800/[15], loss: 2.5629, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:39:35 Iters: 638900/[15], loss: 2.8481, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:41:39 Iters: 639000/[15], loss: 3.1068, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:43:42 Iters: 639100/[15], loss: 2.5182, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:45:45 Iters: 639200/[15], loss: 3.4404, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:47:48 Iters: 639300/[15], loss: 3.0820, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:49:51 Iters: 639400/[15], loss: 3.1689, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:51:54 Iters: 639500/[15], loss: 2.7047, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:53:57 Iters: 639600/[15], loss: 2.6657, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:56:00 Iters: 639700/[15], loss: 2.3375, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220709-23:58:03 Iters: 639800/[15], loss: 2.7698, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:00:06 Iters: 639900/[15], loss: 2.5542, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:02:09 Iters: 640000/[15], loss: 3.4961, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:02:09 Saving checkpoint: 640000 -20220710-00:03:25 LFW Ave Accuracy: 99.6666 -20220710-00:04:40 AgeDB-30 Ave Accuracy: 97.2167 -20220710-00:06:06 CFP-FP Ave Accuracy: 95.1714 -20220710-00:06:06 Current Best Accuracy: LFW: 99.6666 in iters: 640000, AgeDB-30: 97.3333 in iters: 630000 and CFP-FP: 95.2000 in iters: 630000 -20220710-00:08:09 Iters: 640100/[15], loss: 3.6250, train_accuracy: 0.5156, time: 3.60 s/iter, learning rate: 0.0005000000000000001 -20220710-00:10:12 Iters: 640200/[15], loss: 3.2572, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:12:15 Iters: 640300/[15], loss: 2.7236, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:14:19 Iters: 640400/[15], loss: 3.1219, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:16:22 Iters: 640500/[15], loss: 2.7345, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:18:25 Iters: 640600/[15], loss: 3.2594, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:20:28 Iters: 640700/[15], loss: 2.6509, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:22:32 Iters: 640800/[15], loss: 3.0248, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:24:35 Iters: 640900/[15], loss: 2.9236, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:26:38 Iters: 641000/[15], loss: 2.4488, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:28:41 Iters: 641100/[15], loss: 3.0045, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:30:45 Iters: 641200/[15], loss: 3.3930, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:32:48 Iters: 641300/[15], loss: 2.6016, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:34:51 Iters: 641400/[15], loss: 2.8885, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:36:54 Iters: 641500/[15], loss: 3.0862, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:38:58 Iters: 641600/[15], loss: 2.8037, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:41:01 Iters: 641700/[15], loss: 2.8874, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:43:04 Iters: 641800/[15], loss: 2.7923, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:45:07 Iters: 641900/[15], loss: 3.4639, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:47:11 Iters: 642000/[15], loss: 2.5083, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:49:14 Iters: 642100/[15], loss: 3.5048, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:51:17 Iters: 642200/[15], loss: 3.4265, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:53:20 Iters: 642300/[15], loss: 2.7980, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:55:23 Iters: 642400/[15], loss: 3.3365, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:57:26 Iters: 642500/[15], loss: 2.6658, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-00:59:30 Iters: 642600/[15], loss: 2.9408, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:01:33 Iters: 642700/[15], loss: 3.2226, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:03:36 Iters: 642800/[15], loss: 2.9657, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:05:39 Iters: 642900/[15], loss: 3.2155, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:07:42 Iters: 643000/[15], loss: 3.0792, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:09:46 Iters: 643100/[15], loss: 3.6962, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:11:49 Iters: 643200/[15], loss: 2.8649, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:13:52 Iters: 643300/[15], loss: 3.7558, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:15:55 Iters: 643400/[15], loss: 3.5643, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:17:59 Iters: 643500/[15], loss: 2.8690, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:20:02 Iters: 643600/[15], loss: 3.2660, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:22:05 Iters: 643700/[15], loss: 3.2294, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:24:08 Iters: 643800/[15], loss: 2.6654, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:26:11 Iters: 643900/[15], loss: 2.9990, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:28:14 Iters: 644000/[15], loss: 3.2658, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:30:17 Iters: 644100/[15], loss: 3.8156, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:32:21 Iters: 644200/[15], loss: 2.9607, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:34:24 Iters: 644300/[15], loss: 2.6361, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:36:27 Iters: 644400/[15], loss: 2.7576, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:38:30 Iters: 644500/[15], loss: 3.6456, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:40:33 Iters: 644600/[15], loss: 2.9512, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:42:37 Iters: 644700/[15], loss: 2.8461, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:44:40 Iters: 644800/[15], loss: 2.6973, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:46:43 Iters: 644900/[15], loss: 2.6549, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:48:46 Iters: 645000/[15], loss: 2.4489, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:50:49 Iters: 645100/[15], loss: 3.4870, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:52:53 Iters: 645200/[15], loss: 3.1217, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:54:56 Iters: 645300/[15], loss: 3.4310, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:56:59 Iters: 645400/[15], loss: 3.7197, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-01:59:02 Iters: 645500/[15], loss: 2.5509, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:01:06 Iters: 645600/[15], loss: 3.0520, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:03:09 Iters: 645700/[15], loss: 3.2806, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:05:12 Iters: 645800/[15], loss: 2.6815, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:07:15 Iters: 645900/[15], loss: 3.2258, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:09:18 Iters: 646000/[15], loss: 3.7008, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:11:22 Iters: 646100/[15], loss: 3.4281, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:13:25 Iters: 646200/[15], loss: 2.6242, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:15:28 Iters: 646300/[15], loss: 3.3786, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:17:31 Iters: 646400/[15], loss: 2.9982, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:19:35 Iters: 646500/[15], loss: 2.7587, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:21:38 Iters: 646600/[15], loss: 3.1923, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:23:41 Iters: 646700/[15], loss: 3.4028, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:25:45 Iters: 646800/[15], loss: 3.2087, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:27:48 Iters: 646900/[15], loss: 2.6128, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:29:51 Iters: 647000/[15], loss: 3.0494, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:31:54 Iters: 647100/[15], loss: 2.5992, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:33:58 Iters: 647200/[15], loss: 3.0937, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:36:01 Iters: 647300/[15], loss: 3.1759, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:38:04 Iters: 647400/[15], loss: 3.2787, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:40:07 Iters: 647500/[15], loss: 2.7310, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:42:10 Iters: 647600/[15], loss: 2.9664, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:44:14 Iters: 647700/[15], loss: 3.2336, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:46:17 Iters: 647800/[15], loss: 2.7647, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:48:20 Iters: 647900/[15], loss: 3.1066, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:50:23 Iters: 648000/[15], loss: 3.4906, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:52:26 Iters: 648100/[15], loss: 2.8935, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:54:30 Iters: 648200/[15], loss: 3.4905, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:56:33 Iters: 648300/[15], loss: 2.4945, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-02:58:36 Iters: 648400/[15], loss: 3.1154, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:00:39 Iters: 648500/[15], loss: 2.9620, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:02:42 Iters: 648600/[15], loss: 2.7259, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:04:45 Iters: 648700/[15], loss: 2.7562, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:06:49 Iters: 648800/[15], loss: 2.6585, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:08:52 Iters: 648900/[15], loss: 2.6399, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:10:55 Iters: 649000/[15], loss: 3.2605, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:12:58 Iters: 649100/[15], loss: 3.1669, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:15:01 Iters: 649200/[15], loss: 2.8579, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:17:04 Iters: 649300/[15], loss: 2.8684, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:19:07 Iters: 649400/[15], loss: 2.7674, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:21:10 Iters: 649500/[15], loss: 2.7326, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:23:13 Iters: 649600/[15], loss: 3.5302, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:25:16 Iters: 649700/[15], loss: 3.6700, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:27:19 Iters: 649800/[15], loss: 2.9042, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:29:22 Iters: 649900/[15], loss: 2.4018, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:31:25 Iters: 650000/[15], loss: 3.4015, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:31:25 Saving checkpoint: 650000 -20220710-03:32:42 LFW Ave Accuracy: 99.6666 -20220710-03:33:57 AgeDB-30 Ave Accuracy: 97.2000 -20220710-03:35:24 CFP-FP Ave Accuracy: 95.2143 -20220710-03:35:24 Current Best Accuracy: LFW: 99.6666 in iters: 640000, AgeDB-30: 97.3333 in iters: 630000 and CFP-FP: 95.2143 in iters: 650000 -20220710-03:37:27 Iters: 650100/[15], loss: 2.8568, train_accuracy: 0.5703, time: 3.61 s/iter, learning rate: 0.0005000000000000001 -20220710-03:39:30 Iters: 650200/[15], loss: 3.7349, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:41:33 Iters: 650300/[15], loss: 2.9174, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:43:36 Iters: 650400/[15], loss: 3.2063, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:45:39 Iters: 650500/[15], loss: 2.6444, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:47:42 Iters: 650600/[15], loss: 3.3917, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:49:45 Iters: 650700/[15], loss: 2.1337, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:51:48 Iters: 650800/[15], loss: 2.9572, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:53:51 Iters: 650900/[15], loss: 2.8896, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:55:53 Iters: 651000/[15], loss: 2.9354, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:57:56 Iters: 651100/[15], loss: 3.4927, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-03:59:59 Iters: 651200/[15], loss: 3.8510, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:02:02 Iters: 651300/[15], loss: 3.0057, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:04:05 Iters: 651400/[15], loss: 3.1854, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:06:08 Iters: 651500/[15], loss: 3.8548, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:08:11 Iters: 651600/[15], loss: 2.9388, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:10:14 Iters: 651700/[15], loss: 3.8737, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:12:17 Iters: 651800/[15], loss: 2.9808, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:14:20 Iters: 651900/[15], loss: 2.7073, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:16:23 Iters: 652000/[15], loss: 3.0451, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:18:27 Iters: 652100/[15], loss: 2.8520, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:20:30 Iters: 652200/[15], loss: 3.5143, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:22:33 Iters: 652300/[15], loss: 3.3163, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:24:36 Iters: 652400/[15], loss: 3.0108, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:26:38 Iters: 652500/[15], loss: 2.9450, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:28:41 Iters: 652600/[15], loss: 2.7141, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:30:44 Iters: 652700/[15], loss: 2.9970, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:32:47 Iters: 652800/[15], loss: 2.9094, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:34:50 Iters: 652900/[15], loss: 2.7881, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:36:53 Iters: 653000/[15], loss: 2.9369, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:38:56 Iters: 653100/[15], loss: 3.1210, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:40:59 Iters: 653200/[15], loss: 3.0963, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:43:02 Iters: 653300/[15], loss: 3.9268, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:45:05 Iters: 653400/[15], loss: 3.0608, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:47:08 Iters: 653500/[15], loss: 2.9435, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:49:11 Iters: 653600/[15], loss: 2.8257, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:51:14 Iters: 653700/[15], loss: 3.2065, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:53:17 Iters: 653800/[15], loss: 2.7566, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:55:20 Iters: 653900/[15], loss: 2.9258, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:57:23 Iters: 654000/[15], loss: 2.8742, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-04:59:26 Iters: 654100/[15], loss: 3.1402, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:01:29 Iters: 654200/[15], loss: 3.1546, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:03:32 Iters: 654300/[15], loss: 3.6824, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:05:35 Iters: 654400/[15], loss: 3.0525, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:07:38 Iters: 654500/[15], loss: 2.7050, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:09:41 Iters: 654600/[15], loss: 3.1299, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:11:44 Iters: 654700/[15], loss: 3.5471, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:13:47 Iters: 654800/[15], loss: 2.6380, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:15:50 Iters: 654900/[15], loss: 3.1755, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:17:54 Iters: 655000/[15], loss: 2.9981, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:19:57 Iters: 655100/[15], loss: 3.2991, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:22:00 Iters: 655200/[15], loss: 3.4199, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:24:03 Iters: 655300/[15], loss: 2.9370, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:26:06 Iters: 655400/[15], loss: 3.6490, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:28:09 Iters: 655500/[15], loss: 2.7979, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:30:13 Iters: 655600/[15], loss: 3.4902, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:32:16 Iters: 655700/[15], loss: 3.3463, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:34:19 Iters: 655800/[15], loss: 3.0096, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:36:22 Iters: 655900/[15], loss: 3.3527, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:38:25 Iters: 656000/[15], loss: 3.1397, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:40:28 Iters: 656100/[15], loss: 3.5201, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:42:31 Iters: 656200/[15], loss: 3.0602, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:44:34 Iters: 656300/[15], loss: 3.1421, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:46:38 Iters: 656400/[15], loss: 2.6490, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:48:41 Iters: 656500/[15], loss: 2.5134, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:50:44 Iters: 656600/[15], loss: 2.5819, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:52:47 Iters: 656700/[15], loss: 3.5414, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:54:50 Iters: 656800/[15], loss: 3.1607, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:56:53 Iters: 656900/[15], loss: 2.7015, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-05:58:56 Iters: 657000/[15], loss: 3.0761, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:00:59 Iters: 657100/[15], loss: 2.4683, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:03:02 Iters: 657200/[15], loss: 2.9371, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:05:06 Iters: 657300/[15], loss: 3.0022, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:07:09 Iters: 657400/[15], loss: 3.2613, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:09:12 Iters: 657500/[15], loss: 3.2323, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:11:15 Iters: 657600/[15], loss: 3.4795, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:13:18 Iters: 657700/[15], loss: 2.9786, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:15:21 Iters: 657800/[15], loss: 3.2875, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:17:24 Iters: 657900/[15], loss: 3.4846, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:19:27 Iters: 658000/[15], loss: 3.1802, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:21:30 Iters: 658100/[15], loss: 2.7421, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:23:34 Iters: 658200/[15], loss: 2.9387, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:25:37 Iters: 658300/[15], loss: 3.0271, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:27:40 Iters: 658400/[15], loss: 3.1828, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:29:43 Iters: 658500/[15], loss: 4.0962, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:31:46 Iters: 658600/[15], loss: 3.2946, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:33:49 Iters: 658700/[15], loss: 3.2093, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:35:52 Iters: 658800/[15], loss: 2.8201, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:37:55 Iters: 658900/[15], loss: 3.6480, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:39:58 Iters: 659000/[15], loss: 3.2572, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:42:01 Iters: 659100/[15], loss: 4.2607, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:44:04 Iters: 659200/[15], loss: 3.2901, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:46:08 Iters: 659300/[15], loss: 3.5502, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:48:11 Iters: 659400/[15], loss: 3.0628, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:50:14 Iters: 659500/[15], loss: 3.2552, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:52:17 Iters: 659600/[15], loss: 2.7063, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:54:20 Iters: 659700/[15], loss: 2.7815, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:56:23 Iters: 659800/[15], loss: 2.7724, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-06:58:26 Iters: 659900/[15], loss: 3.4058, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:00:29 Iters: 660000/[15], loss: 3.1693, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:00:29 Saving checkpoint: 660000 -20220710-07:01:46 LFW Ave Accuracy: 99.5832 -20220710-07:03:02 AgeDB-30 Ave Accuracy: 97.3000 -20220710-07:04:29 CFP-FP Ave Accuracy: 94.8571 -20220710-07:04:29 Current Best Accuracy: LFW: 99.6666 in iters: 640000, AgeDB-30: 97.3333 in iters: 630000 and CFP-FP: 95.2143 in iters: 650000 -20220710-07:06:31 Iters: 660100/[15], loss: 3.1393, train_accuracy: 0.5312, time: 3.62 s/iter, learning rate: 0.0005000000000000001 -20220710-07:08:34 Iters: 660200/[15], loss: 2.9099, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:10:38 Iters: 660300/[15], loss: 3.0883, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:12:41 Iters: 660400/[15], loss: 3.3934, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:14:44 Iters: 660500/[15], loss: 3.6151, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:16:47 Iters: 660600/[15], loss: 2.9474, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:18:50 Iters: 660700/[15], loss: 2.9344, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:20:53 Iters: 660800/[15], loss: 2.9881, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:22:57 Iters: 660900/[15], loss: 2.9268, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:25:00 Iters: 661000/[15], loss: 3.6674, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:27:03 Iters: 661100/[15], loss: 2.9317, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:29:06 Iters: 661200/[15], loss: 3.3236, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:31:10 Iters: 661300/[15], loss: 3.0529, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:33:13 Iters: 661400/[15], loss: 2.7922, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:35:16 Iters: 661500/[15], loss: 4.0394, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:37:20 Iters: 661600/[15], loss: 3.4012, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:39:23 Iters: 661700/[15], loss: 2.6933, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:41:26 Iters: 661800/[15], loss: 3.0125, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:43:29 Iters: 661900/[15], loss: 2.9849, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:45:33 Iters: 662000/[15], loss: 3.2609, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:47:36 Iters: 662100/[15], loss: 3.5273, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:49:39 Iters: 662200/[15], loss: 3.4153, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:51:43 Iters: 662300/[15], loss: 3.1367, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:53:46 Iters: 662400/[15], loss: 2.8657, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:55:49 Iters: 662500/[15], loss: 3.1123, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:57:52 Iters: 662600/[15], loss: 3.4455, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-07:59:56 Iters: 662700/[15], loss: 2.8913, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:01:59 Iters: 662800/[15], loss: 3.3790, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:04:02 Iters: 662900/[15], loss: 2.9751, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:06:05 Iters: 663000/[15], loss: 3.3568, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:08:08 Iters: 663100/[15], loss: 2.9429, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:10:12 Iters: 663200/[15], loss: 3.5059, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:12:15 Iters: 663300/[15], loss: 3.2912, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:14:18 Iters: 663400/[15], loss: 2.7517, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:16:21 Iters: 663500/[15], loss: 3.3957, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:18:25 Iters: 663600/[15], loss: 3.0837, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:20:28 Iters: 663700/[15], loss: 3.4693, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:22:32 Iters: 663800/[15], loss: 3.1576, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:24:35 Iters: 663900/[15], loss: 3.4004, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:26:38 Iters: 664000/[15], loss: 3.7998, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:28:42 Iters: 664100/[15], loss: 3.0824, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:30:45 Iters: 664200/[15], loss: 3.1602, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:32:48 Iters: 664300/[15], loss: 3.5099, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:34:51 Iters: 664400/[15], loss: 3.1313, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:36:55 Iters: 664500/[15], loss: 2.5981, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:38:58 Iters: 664600/[15], loss: 2.9155, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:41:01 Iters: 664700/[15], loss: 3.1946, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:43:05 Iters: 664800/[15], loss: 3.0907, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:45:08 Iters: 664900/[15], loss: 2.8849, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:47:11 Iters: 665000/[15], loss: 3.0023, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:49:14 Iters: 665100/[15], loss: 3.3426, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:51:18 Iters: 665200/[15], loss: 3.2007, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:53:21 Iters: 665300/[15], loss: 2.8696, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:55:24 Iters: 665400/[15], loss: 3.1633, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:57:28 Iters: 665500/[15], loss: 2.6425, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-08:59:31 Iters: 665600/[15], loss: 2.9529, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:01:34 Iters: 665700/[15], loss: 3.7476, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:03:38 Iters: 665800/[15], loss: 3.3836, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:05:41 Iters: 665900/[15], loss: 3.3943, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:07:44 Iters: 666000/[15], loss: 3.5803, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:09:48 Iters: 666100/[15], loss: 2.8776, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:11:51 Iters: 666200/[15], loss: 2.8139, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:13:54 Iters: 666300/[15], loss: 2.8947, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:15:58 Iters: 666400/[15], loss: 2.7281, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:18:01 Iters: 666500/[15], loss: 3.2084, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:20:04 Iters: 666600/[15], loss: 3.3508, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220710-09:22:08 Iters: 666700/[15], loss: 2.9232, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:24:11 Iters: 666800/[15], loss: 2.9796, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:26:15 Iters: 666900/[15], loss: 2.9861, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:28:18 Iters: 667000/[15], loss: 2.5244, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:30:21 Iters: 667100/[15], loss: 3.2814, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:32:25 Iters: 667200/[15], loss: 3.6935, train_accuracy: 0.4922, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220710-09:34:28 Iters: 667300/[15], loss: 3.1404, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:36:32 Iters: 667400/[15], loss: 2.7201, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:38:35 Iters: 667500/[15], loss: 3.7789, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:40:38 Iters: 667600/[15], loss: 2.8969, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:42:41 Iters: 667700/[15], loss: 3.2782, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:44:45 Iters: 667800/[15], loss: 3.0815, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:46:48 Iters: 667900/[15], loss: 3.8428, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:48:51 Iters: 668000/[15], loss: 3.1408, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:50:55 Iters: 668100/[15], loss: 2.5349, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:52:58 Iters: 668200/[15], loss: 3.0037, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:55:01 Iters: 668300/[15], loss: 3.0203, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:57:04 Iters: 668400/[15], loss: 2.9433, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-09:59:07 Iters: 668500/[15], loss: 3.2938, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:01:11 Iters: 668600/[15], loss: 3.5713, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:03:14 Iters: 668700/[15], loss: 3.3234, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:05:17 Iters: 668800/[15], loss: 3.0625, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:07:20 Iters: 668900/[15], loss: 3.3237, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:09:24 Iters: 669000/[15], loss: 2.8800, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:11:27 Iters: 669100/[15], loss: 2.7194, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:13:30 Iters: 669200/[15], loss: 3.4058, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:15:33 Iters: 669300/[15], loss: 3.7103, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:17:37 Iters: 669400/[15], loss: 3.4548, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:19:40 Iters: 669500/[15], loss: 3.1027, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:21:43 Iters: 669600/[15], loss: 3.5006, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:23:46 Iters: 669700/[15], loss: 3.1975, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:25:50 Iters: 669800/[15], loss: 2.6742, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:27:53 Iters: 669900/[15], loss: 3.6233, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:29:56 Iters: 670000/[15], loss: 3.3522, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:29:56 Saving checkpoint: 670000 -20220710-10:31:14 LFW Ave Accuracy: 99.6333 -20220710-10:32:31 AgeDB-30 Ave Accuracy: 97.2500 -20220710-10:34:01 CFP-FP Ave Accuracy: 95.0286 -20220710-10:34:01 Current Best Accuracy: LFW: 99.6666 in iters: 640000, AgeDB-30: 97.3333 in iters: 630000 and CFP-FP: 95.2143 in iters: 650000 -20220710-10:36:04 Iters: 670100/[15], loss: 3.3208, train_accuracy: 0.5312, time: 3.68 s/iter, learning rate: 0.0005000000000000001 -20220710-10:38:07 Iters: 670200/[15], loss: 3.7130, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:40:10 Iters: 670300/[15], loss: 2.6806, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:42:14 Iters: 670400/[15], loss: 3.6708, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:44:17 Iters: 670500/[15], loss: 3.9205, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:46:21 Iters: 670600/[15], loss: 3.6679, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:48:24 Iters: 670700/[15], loss: 3.7787, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:50:27 Iters: 670800/[15], loss: 3.1394, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:52:30 Iters: 670900/[15], loss: 3.4652, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:54:34 Iters: 671000/[15], loss: 3.3619, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:56:37 Iters: 671100/[15], loss: 3.5077, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-10:58:40 Iters: 671200/[15], loss: 2.7342, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:00:43 Iters: 671300/[15], loss: 3.6138, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:02:47 Iters: 671400/[15], loss: 4.4781, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:04:50 Iters: 671500/[15], loss: 3.3466, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:06:53 Iters: 671600/[15], loss: 3.4014, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:08:56 Iters: 671700/[15], loss: 4.1017, train_accuracy: 0.4609, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:11:00 Iters: 671800/[15], loss: 3.4229, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:13:03 Iters: 671900/[15], loss: 3.1172, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:15:06 Iters: 672000/[15], loss: 3.3352, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:17:09 Iters: 672100/[15], loss: 2.9260, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:19:12 Iters: 672200/[15], loss: 3.0336, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:21:16 Iters: 672300/[15], loss: 3.2268, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:23:19 Iters: 672400/[15], loss: 3.1395, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:25:22 Iters: 672500/[15], loss: 3.5186, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:27:25 Iters: 672600/[15], loss: 2.6605, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:29:29 Iters: 672700/[15], loss: 3.2029, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:31:32 Iters: 672800/[15], loss: 2.9569, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:33:35 Iters: 672900/[15], loss: 3.4347, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:35:39 Iters: 673000/[15], loss: 3.1489, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:37:42 Iters: 673100/[15], loss: 2.9542, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:39:45 Iters: 673200/[15], loss: 3.5444, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:41:48 Iters: 673300/[15], loss: 2.9689, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:43:52 Iters: 673400/[15], loss: 2.9661, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:45:55 Iters: 673500/[15], loss: 3.1494, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:47:58 Iters: 673600/[15], loss: 3.1613, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:50:01 Iters: 673700/[15], loss: 3.0062, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:52:05 Iters: 673800/[15], loss: 3.4693, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:54:08 Iters: 673900/[15], loss: 3.7216, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:56:11 Iters: 674000/[15], loss: 3.2504, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-11:58:14 Iters: 674100/[15], loss: 3.1780, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:00:18 Iters: 674200/[15], loss: 3.2031, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:02:21 Iters: 674300/[15], loss: 3.2009, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:04:24 Iters: 674400/[15], loss: 2.7969, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:06:27 Iters: 674500/[15], loss: 2.6046, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:08:31 Iters: 674600/[15], loss: 2.9693, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:10:34 Iters: 674700/[15], loss: 3.1509, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:12:37 Iters: 674800/[15], loss: 3.0610, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:14:40 Iters: 674900/[15], loss: 2.9879, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:16:44 Iters: 675000/[15], loss: 3.0569, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:18:47 Iters: 675100/[15], loss: 2.9669, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:20:50 Iters: 675200/[15], loss: 2.9916, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:22:53 Iters: 675300/[15], loss: 3.1262, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:24:57 Iters: 675400/[15], loss: 3.1046, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:27:00 Iters: 675500/[15], loss: 3.3364, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:29:03 Iters: 675600/[15], loss: 3.0541, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:31:07 Iters: 675700/[15], loss: 3.0857, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:33:10 Iters: 675800/[15], loss: 3.6106, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:35:13 Iters: 675900/[15], loss: 2.7771, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:37:16 Iters: 676000/[15], loss: 3.1548, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:39:20 Iters: 676100/[15], loss: 2.9730, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:41:23 Iters: 676200/[15], loss: 3.3790, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:43:26 Iters: 676300/[15], loss: 3.1260, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:45:30 Iters: 676400/[15], loss: 3.1901, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:47:33 Iters: 676500/[15], loss: 3.1736, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:49:36 Iters: 676600/[15], loss: 2.8964, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:51:39 Iters: 676700/[15], loss: 3.3517, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:53:43 Iters: 676800/[15], loss: 3.3006, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:55:46 Iters: 676900/[15], loss: 3.1532, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:57:49 Iters: 677000/[15], loss: 3.0865, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-12:59:53 Iters: 677100/[15], loss: 2.6200, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:01:56 Iters: 677200/[15], loss: 3.4823, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:03:59 Iters: 677300/[15], loss: 3.2364, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:06:03 Iters: 677400/[15], loss: 3.3196, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:08:06 Iters: 677500/[15], loss: 3.3153, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:10:09 Iters: 677600/[15], loss: 3.1220, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:12:12 Iters: 677700/[15], loss: 3.1554, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:14:16 Iters: 677800/[15], loss: 3.2664, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:16:19 Iters: 677900/[15], loss: 3.1650, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:18:22 Iters: 678000/[15], loss: 2.8781, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:20:25 Iters: 678100/[15], loss: 2.7044, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:22:29 Iters: 678200/[15], loss: 3.0618, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:24:32 Iters: 678300/[15], loss: 3.2549, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:26:35 Iters: 678400/[15], loss: 3.1001, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:28:38 Iters: 678500/[15], loss: 3.2030, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:30:42 Iters: 678600/[15], loss: 3.3499, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:32:45 Iters: 678700/[15], loss: 3.1800, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:34:48 Iters: 678800/[15], loss: 3.3315, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:36:51 Iters: 678900/[15], loss: 3.9429, train_accuracy: 0.4453, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:38:55 Iters: 679000/[15], loss: 2.7801, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:40:58 Iters: 679100/[15], loss: 3.4899, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:43:01 Iters: 679200/[15], loss: 3.5283, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:45:04 Iters: 679300/[15], loss: 3.0653, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:47:07 Iters: 679400/[15], loss: 2.9869, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:49:11 Iters: 679500/[15], loss: 2.9491, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:51:14 Iters: 679600/[15], loss: 2.6835, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:53:17 Iters: 679700/[15], loss: 2.9142, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:55:20 Iters: 679800/[15], loss: 2.9766, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:57:24 Iters: 679900/[15], loss: 4.0207, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:59:27 Iters: 680000/[15], loss: 2.8391, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-13:59:27 Saving checkpoint: 680000 -20220710-14:00:46 LFW Ave Accuracy: 99.6833 -20220710-14:02:03 AgeDB-30 Ave Accuracy: 97.3333 -20220710-14:03:33 CFP-FP Ave Accuracy: 95.2571 -20220710-14:03:33 Current Best Accuracy: LFW: 99.6833 in iters: 680000, AgeDB-30: 97.3333 in iters: 680000 and CFP-FP: 95.2571 in iters: 680000 -20220710-14:05:36 Iters: 680100/[15], loss: 2.5498, train_accuracy: 0.6016, time: 3.68 s/iter, learning rate: 0.0005000000000000001 -20220710-14:07:39 Iters: 680200/[15], loss: 3.4237, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:09:42 Iters: 680300/[15], loss: 3.2614, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:11:45 Iters: 680400/[15], loss: 3.1607, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:13:48 Iters: 680500/[15], loss: 2.8456, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:15:51 Iters: 680600/[15], loss: 3.1369, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:17:55 Iters: 680700/[15], loss: 3.1578, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:19:58 Iters: 680800/[15], loss: 3.4368, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:22:01 Iters: 680900/[15], loss: 3.4771, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:24:04 Iters: 681000/[15], loss: 3.0897, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:26:07 Iters: 681100/[15], loss: 3.2995, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:28:11 Iters: 681200/[15], loss: 3.2713, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:30:14 Iters: 681300/[15], loss: 3.2538, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:32:17 Iters: 681400/[15], loss: 3.9775, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:34:20 Iters: 681500/[15], loss: 3.8621, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:36:23 Iters: 681600/[15], loss: 3.6311, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:38:27 Iters: 681700/[15], loss: 3.3274, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:40:30 Iters: 681800/[15], loss: 3.3418, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:42:33 Iters: 681900/[15], loss: 2.8542, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:44:36 Iters: 682000/[15], loss: 3.3927, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:46:39 Iters: 682100/[15], loss: 3.7238, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:48:42 Iters: 682200/[15], loss: 3.2195, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:50:45 Iters: 682300/[15], loss: 2.9356, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:51:46 Train Epoch: 16/18 ... -20220710-14:52:49 Iters: 682400/[16], loss: 2.7417, train_accuracy: 0.5625, time: 0.62 s/iter, learning rate: 0.0005000000000000001 -20220710-14:54:52 Iters: 682500/[16], loss: 3.1591, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:56:55 Iters: 682600/[16], loss: 2.6614, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-14:58:59 Iters: 682700/[16], loss: 2.8570, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:01:02 Iters: 682800/[16], loss: 2.9941, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:03:05 Iters: 682900/[16], loss: 2.4171, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:05:08 Iters: 683000/[16], loss: 2.9163, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:07:12 Iters: 683100/[16], loss: 2.8944, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:09:15 Iters: 683200/[16], loss: 2.5007, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:11:18 Iters: 683300/[16], loss: 2.6667, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:13:21 Iters: 683400/[16], loss: 2.5361, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:15:24 Iters: 683500/[16], loss: 2.5474, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:17:28 Iters: 683600/[16], loss: 3.1226, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:19:31 Iters: 683700/[16], loss: 2.4143, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:21:34 Iters: 683800/[16], loss: 2.9842, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:23:37 Iters: 683900/[16], loss: 3.1083, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:25:40 Iters: 684000/[16], loss: 3.2818, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:27:43 Iters: 684100/[16], loss: 2.6729, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:29:46 Iters: 684200/[16], loss: 3.0342, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:31:50 Iters: 684300/[16], loss: 2.4287, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:33:53 Iters: 684400/[16], loss: 2.3637, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:35:56 Iters: 684500/[16], loss: 2.4466, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:37:59 Iters: 684600/[16], loss: 2.0935, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:40:02 Iters: 684700/[16], loss: 2.3839, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:42:05 Iters: 684800/[16], loss: 2.7988, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:44:08 Iters: 684900/[16], loss: 3.1355, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:46:11 Iters: 685000/[16], loss: 2.6073, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:48:15 Iters: 685100/[16], loss: 2.7863, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:50:18 Iters: 685200/[16], loss: 3.2908, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:52:21 Iters: 685300/[16], loss: 3.4203, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:54:24 Iters: 685400/[16], loss: 2.8362, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:56:27 Iters: 685500/[16], loss: 2.1226, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-15:58:30 Iters: 685600/[16], loss: 2.9338, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:00:34 Iters: 685700/[16], loss: 2.7758, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:02:37 Iters: 685800/[16], loss: 2.8744, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:04:40 Iters: 685900/[16], loss: 2.9446, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:06:43 Iters: 686000/[16], loss: 2.9405, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:08:47 Iters: 686100/[16], loss: 2.8308, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:10:50 Iters: 686200/[16], loss: 3.0661, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:12:53 Iters: 686300/[16], loss: 2.7632, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:14:56 Iters: 686400/[16], loss: 3.0098, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:17:00 Iters: 686500/[16], loss: 2.9077, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:19:03 Iters: 686600/[16], loss: 3.2621, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:21:06 Iters: 686700/[16], loss: 2.6130, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:23:10 Iters: 686800/[16], loss: 2.9398, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:25:13 Iters: 686900/[16], loss: 2.8715, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:27:16 Iters: 687000/[16], loss: 3.2372, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:29:19 Iters: 687100/[16], loss: 3.3146, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:31:23 Iters: 687200/[16], loss: 2.7487, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:33:26 Iters: 687300/[16], loss: 3.1247, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:35:29 Iters: 687400/[16], loss: 2.9265, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:37:32 Iters: 687500/[16], loss: 2.7523, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:39:36 Iters: 687600/[16], loss: 3.1839, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:41:39 Iters: 687700/[16], loss: 3.2200, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:43:42 Iters: 687800/[16], loss: 2.4260, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:45:45 Iters: 687900/[16], loss: 2.4572, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:47:48 Iters: 688000/[16], loss: 3.0326, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:49:52 Iters: 688100/[16], loss: 2.6529, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:51:55 Iters: 688200/[16], loss: 2.8997, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:53:59 Iters: 688300/[16], loss: 3.2718, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:56:02 Iters: 688400/[16], loss: 2.6646, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-16:58:05 Iters: 688500/[16], loss: 2.6085, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:00:09 Iters: 688600/[16], loss: 2.8177, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:02:12 Iters: 688700/[16], loss: 2.7774, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:04:16 Iters: 688800/[16], loss: 3.3576, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:06:19 Iters: 688900/[16], loss: 3.0168, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:08:22 Iters: 689000/[16], loss: 2.7130, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:10:26 Iters: 689100/[16], loss: 2.8302, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:12:29 Iters: 689200/[16], loss: 2.9724, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:14:32 Iters: 689300/[16], loss: 3.5249, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:16:35 Iters: 689400/[16], loss: 3.1709, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:18:39 Iters: 689500/[16], loss: 3.0663, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:20:42 Iters: 689600/[16], loss: 3.0265, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:22:45 Iters: 689700/[16], loss: 3.0870, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:24:48 Iters: 689800/[16], loss: 2.7918, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:26:52 Iters: 689900/[16], loss: 2.9507, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:28:55 Iters: 690000/[16], loss: 3.7372, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:28:55 Saving checkpoint: 690000 -20220710-17:30:11 LFW Ave Accuracy: 99.7000 -20220710-17:31:26 AgeDB-30 Ave Accuracy: 97.3167 -20220710-17:32:53 CFP-FP Ave Accuracy: 94.9714 -20220710-17:32:53 Current Best Accuracy: LFW: 99.7000 in iters: 690000, AgeDB-30: 97.3333 in iters: 680000 and CFP-FP: 95.2571 in iters: 680000 -20220710-17:34:56 Iters: 690100/[16], loss: 3.2736, train_accuracy: 0.5234, time: 3.61 s/iter, learning rate: 0.0005000000000000001 -20220710-17:36:59 Iters: 690200/[16], loss: 2.3429, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:39:03 Iters: 690300/[16], loss: 3.0335, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:41:06 Iters: 690400/[16], loss: 3.0812, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:43:09 Iters: 690500/[16], loss: 2.5672, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:45:12 Iters: 690600/[16], loss: 3.0245, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:47:16 Iters: 690700/[16], loss: 2.9025, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:49:19 Iters: 690800/[16], loss: 3.4295, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:51:22 Iters: 690900/[16], loss: 3.3319, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:53:25 Iters: 691000/[16], loss: 3.3311, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:55:29 Iters: 691100/[16], loss: 3.1536, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:57:32 Iters: 691200/[16], loss: 2.8877, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-17:59:35 Iters: 691300/[16], loss: 2.9429, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:01:38 Iters: 691400/[16], loss: 3.5587, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:03:42 Iters: 691500/[16], loss: 2.7675, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:05:45 Iters: 691600/[16], loss: 3.0580, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:07:48 Iters: 691700/[16], loss: 3.4620, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:09:51 Iters: 691800/[16], loss: 3.2843, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:11:55 Iters: 691900/[16], loss: 3.0415, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:13:58 Iters: 692000/[16], loss: 3.0942, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:16:01 Iters: 692100/[16], loss: 2.7330, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:18:04 Iters: 692200/[16], loss: 2.8187, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:20:08 Iters: 692300/[16], loss: 2.7379, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:22:11 Iters: 692400/[16], loss: 3.1795, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:24:14 Iters: 692500/[16], loss: 3.0053, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:26:17 Iters: 692600/[16], loss: 3.0826, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:28:21 Iters: 692700/[16], loss: 3.2187, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:30:24 Iters: 692800/[16], loss: 2.8360, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:32:27 Iters: 692900/[16], loss: 3.9544, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:34:31 Iters: 693000/[16], loss: 3.3527, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:36:34 Iters: 693100/[16], loss: 3.0591, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:38:37 Iters: 693200/[16], loss: 3.4312, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:40:40 Iters: 693300/[16], loss: 3.6218, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:42:44 Iters: 693400/[16], loss: 3.2824, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:44:47 Iters: 693500/[16], loss: 3.2129, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:46:50 Iters: 693600/[16], loss: 2.4604, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:48:53 Iters: 693700/[16], loss: 2.9989, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:50:56 Iters: 693800/[16], loss: 3.2507, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:52:59 Iters: 693900/[16], loss: 2.7075, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:55:03 Iters: 694000/[16], loss: 2.8959, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:57:06 Iters: 694100/[16], loss: 3.0794, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-18:59:09 Iters: 694200/[16], loss: 3.1101, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:01:12 Iters: 694300/[16], loss: 3.0883, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:03:15 Iters: 694400/[16], loss: 2.6882, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:05:19 Iters: 694500/[16], loss: 2.9724, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:07:22 Iters: 694600/[16], loss: 4.0557, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:09:25 Iters: 694700/[16], loss: 2.8393, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:11:29 Iters: 694800/[16], loss: 2.8100, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:13:32 Iters: 694900/[16], loss: 2.5912, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:15:35 Iters: 695000/[16], loss: 2.5900, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:17:39 Iters: 695100/[16], loss: 2.9386, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:19:42 Iters: 695200/[16], loss: 2.3948, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:21:45 Iters: 695300/[16], loss: 2.8553, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:23:48 Iters: 695400/[16], loss: 3.9454, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:25:52 Iters: 695500/[16], loss: 3.0326, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:27:55 Iters: 695600/[16], loss: 3.1601, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:29:58 Iters: 695700/[16], loss: 3.4916, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:32:01 Iters: 695800/[16], loss: 2.7554, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:34:05 Iters: 695900/[16], loss: 3.5359, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:36:08 Iters: 696000/[16], loss: 3.7036, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:38:12 Iters: 696100/[16], loss: 3.4194, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:40:15 Iters: 696200/[16], loss: 2.2899, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:42:18 Iters: 696300/[16], loss: 3.4181, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:44:22 Iters: 696400/[16], loss: 3.0149, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:46:25 Iters: 696500/[16], loss: 2.9311, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:48:28 Iters: 696600/[16], loss: 3.3166, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:50:32 Iters: 696700/[16], loss: 2.6449, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:52:35 Iters: 696800/[16], loss: 3.4040, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:54:38 Iters: 696900/[16], loss: 3.0117, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:56:42 Iters: 697000/[16], loss: 3.1772, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-19:58:45 Iters: 697100/[16], loss: 2.7061, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:00:48 Iters: 697200/[16], loss: 2.9252, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:02:52 Iters: 697300/[16], loss: 3.0984, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:04:55 Iters: 697400/[16], loss: 2.7035, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:06:58 Iters: 697500/[16], loss: 2.6726, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:09:01 Iters: 697600/[16], loss: 3.0990, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:11:05 Iters: 697700/[16], loss: 2.6559, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:13:08 Iters: 697800/[16], loss: 2.9615, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:15:11 Iters: 697900/[16], loss: 3.3416, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:17:14 Iters: 698000/[16], loss: 3.2806, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:19:17 Iters: 698100/[16], loss: 3.5887, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:21:21 Iters: 698200/[16], loss: 2.9521, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:23:24 Iters: 698300/[16], loss: 3.2224, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:25:27 Iters: 698400/[16], loss: 2.9136, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:27:30 Iters: 698500/[16], loss: 3.6196, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:29:33 Iters: 698600/[16], loss: 3.0962, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:31:36 Iters: 698700/[16], loss: 3.3416, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:33:39 Iters: 698800/[16], loss: 3.5148, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:35:42 Iters: 698900/[16], loss: 2.8496, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:37:45 Iters: 699000/[16], loss: 2.8830, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:39:48 Iters: 699100/[16], loss: 2.7676, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:41:51 Iters: 699200/[16], loss: 3.5135, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:43:55 Iters: 699300/[16], loss: 2.8563, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:45:58 Iters: 699400/[16], loss: 2.9519, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:48:01 Iters: 699500/[16], loss: 3.3364, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:50:04 Iters: 699600/[16], loss: 3.0066, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:52:07 Iters: 699700/[16], loss: 2.8937, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:54:10 Iters: 699800/[16], loss: 2.9625, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:56:13 Iters: 699900/[16], loss: 2.9332, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:58:16 Iters: 700000/[16], loss: 3.9583, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-20:58:16 Saving checkpoint: 700000 -20220710-20:59:33 LFW Ave Accuracy: 99.6166 -20220710-21:00:48 AgeDB-30 Ave Accuracy: 97.2667 -20220710-21:02:15 CFP-FP Ave Accuracy: 95.1286 -20220710-21:02:15 Current Best Accuracy: LFW: 99.7000 in iters: 690000, AgeDB-30: 97.3333 in iters: 680000 and CFP-FP: 95.2571 in iters: 680000 -20220710-21:04:18 Iters: 700100/[16], loss: 3.4950, train_accuracy: 0.5156, time: 3.62 s/iter, learning rate: 0.0005000000000000001 -20220710-21:06:21 Iters: 700200/[16], loss: 2.7377, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:08:24 Iters: 700300/[16], loss: 3.5238, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:10:27 Iters: 700400/[16], loss: 2.7167, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:12:30 Iters: 700500/[16], loss: 3.1055, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:14:32 Iters: 700600/[16], loss: 3.1513, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:16:35 Iters: 700700/[16], loss: 3.5451, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:18:38 Iters: 700800/[16], loss: 3.7323, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:20:41 Iters: 700900/[16], loss: 3.4097, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:22:44 Iters: 701000/[16], loss: 3.4348, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:24:47 Iters: 701100/[16], loss: 3.4230, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:26:50 Iters: 701200/[16], loss: 2.9943, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:28:53 Iters: 701300/[16], loss: 2.8651, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:30:56 Iters: 701400/[16], loss: 3.3163, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:32:59 Iters: 701500/[16], loss: 2.6782, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:35:02 Iters: 701600/[16], loss: 3.7666, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:37:05 Iters: 701700/[16], loss: 2.8525, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:39:08 Iters: 701800/[16], loss: 3.2673, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:41:11 Iters: 701900/[16], loss: 2.5001, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:43:14 Iters: 702000/[16], loss: 2.8561, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:45:17 Iters: 702100/[16], loss: 2.8431, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:47:20 Iters: 702200/[16], loss: 3.3348, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:49:23 Iters: 702300/[16], loss: 3.3791, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:51:26 Iters: 702400/[16], loss: 3.3828, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:53:29 Iters: 702500/[16], loss: 2.9646, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:55:32 Iters: 702600/[16], loss: 3.0793, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:57:35 Iters: 702700/[16], loss: 2.7754, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-21:59:38 Iters: 702800/[16], loss: 3.4089, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:01:41 Iters: 702900/[16], loss: 3.9797, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:03:44 Iters: 703000/[16], loss: 3.2074, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:05:47 Iters: 703100/[16], loss: 3.2266, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:07:49 Iters: 703200/[16], loss: 3.2323, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:09:52 Iters: 703300/[16], loss: 3.7890, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:11:55 Iters: 703400/[16], loss: 3.1525, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:13:58 Iters: 703500/[16], loss: 2.5188, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:16:01 Iters: 703600/[16], loss: 3.2067, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:18:04 Iters: 703700/[16], loss: 3.4833, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:20:07 Iters: 703800/[16], loss: 2.3656, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:22:10 Iters: 703900/[16], loss: 3.1661, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:24:13 Iters: 704000/[16], loss: 2.8968, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:26:16 Iters: 704100/[16], loss: 2.4238, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:28:19 Iters: 704200/[16], loss: 3.0643, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:30:22 Iters: 704300/[16], loss: 3.6926, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:32:25 Iters: 704400/[16], loss: 2.7514, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:34:27 Iters: 704500/[16], loss: 3.2682, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:36:30 Iters: 704600/[16], loss: 3.3148, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:38:33 Iters: 704700/[16], loss: 2.9632, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:40:36 Iters: 704800/[16], loss: 2.9191, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:42:39 Iters: 704900/[16], loss: 3.6481, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:44:42 Iters: 705000/[16], loss: 3.2820, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:46:45 Iters: 705100/[16], loss: 2.7316, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:48:48 Iters: 705200/[16], loss: 3.1914, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:50:51 Iters: 705300/[16], loss: 3.0300, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:52:54 Iters: 705400/[16], loss: 2.9541, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:54:57 Iters: 705500/[16], loss: 2.9045, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:57:00 Iters: 705600/[16], loss: 3.4059, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-22:59:03 Iters: 705700/[16], loss: 2.4885, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:01:06 Iters: 705800/[16], loss: 2.8930, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:03:09 Iters: 705900/[16], loss: 3.2914, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:05:11 Iters: 706000/[16], loss: 3.0132, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:07:14 Iters: 706100/[16], loss: 3.3321, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:09:17 Iters: 706200/[16], loss: 3.4236, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:11:20 Iters: 706300/[16], loss: 2.7539, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:13:23 Iters: 706400/[16], loss: 3.5814, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:15:26 Iters: 706500/[16], loss: 3.3640, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:17:29 Iters: 706600/[16], loss: 2.9646, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:19:32 Iters: 706700/[16], loss: 3.1134, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:21:35 Iters: 706800/[16], loss: 3.6356, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:23:38 Iters: 706900/[16], loss: 3.6075, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:25:41 Iters: 707000/[16], loss: 3.2501, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:27:44 Iters: 707100/[16], loss: 3.2860, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:29:47 Iters: 707200/[16], loss: 2.8973, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:31:50 Iters: 707300/[16], loss: 3.2678, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:33:52 Iters: 707400/[16], loss: 3.6798, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:35:55 Iters: 707500/[16], loss: 3.4164, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:37:58 Iters: 707600/[16], loss: 3.7051, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:40:01 Iters: 707700/[16], loss: 3.5561, train_accuracy: 0.4375, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:42:04 Iters: 707800/[16], loss: 3.3320, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:44:07 Iters: 707900/[16], loss: 3.4380, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:46:10 Iters: 708000/[16], loss: 3.4407, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:48:13 Iters: 708100/[16], loss: 3.5403, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:50:16 Iters: 708200/[16], loss: 3.3328, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:52:19 Iters: 708300/[16], loss: 3.4899, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:54:22 Iters: 708400/[16], loss: 2.8661, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:56:24 Iters: 708500/[16], loss: 3.1781, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220710-23:58:27 Iters: 708600/[16], loss: 3.0923, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:00:30 Iters: 708700/[16], loss: 2.9496, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:02:33 Iters: 708800/[16], loss: 3.4234, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:04:36 Iters: 708900/[16], loss: 3.1878, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:06:39 Iters: 709000/[16], loss: 2.7046, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:08:42 Iters: 709100/[16], loss: 4.0455, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:10:45 Iters: 709200/[16], loss: 2.9139, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:12:48 Iters: 709300/[16], loss: 3.1613, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:14:51 Iters: 709400/[16], loss: 3.7895, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:16:54 Iters: 709500/[16], loss: 3.0589, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:18:57 Iters: 709600/[16], loss: 3.8428, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:20:59 Iters: 709700/[16], loss: 3.0023, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:23:02 Iters: 709800/[16], loss: 2.8136, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:25:05 Iters: 709900/[16], loss: 3.0171, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:27:08 Iters: 710000/[16], loss: 3.0092, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:27:08 Saving checkpoint: 710000 -20220711-00:28:25 LFW Ave Accuracy: 99.7166 -20220711-00:29:40 AgeDB-30 Ave Accuracy: 97.1667 -20220711-00:31:07 CFP-FP Ave Accuracy: 95.1429 -20220711-00:31:07 Current Best Accuracy: LFW: 99.7166 in iters: 710000, AgeDB-30: 97.3333 in iters: 680000 and CFP-FP: 95.2571 in iters: 680000 -20220711-00:33:10 Iters: 710100/[16], loss: 3.1424, train_accuracy: 0.5625, time: 3.61 s/iter, learning rate: 0.0005000000000000001 -20220711-00:35:13 Iters: 710200/[16], loss: 2.6797, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:37:16 Iters: 710300/[16], loss: 3.2263, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:39:18 Iters: 710400/[16], loss: 2.9390, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:41:21 Iters: 710500/[16], loss: 2.8591, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:43:24 Iters: 710600/[16], loss: 2.5997, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:45:27 Iters: 710700/[16], loss: 3.3307, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:47:30 Iters: 710800/[16], loss: 3.3007, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:49:33 Iters: 710900/[16], loss: 3.0090, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:51:36 Iters: 711000/[16], loss: 3.6353, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:53:39 Iters: 711100/[16], loss: 2.8884, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:55:42 Iters: 711200/[16], loss: 3.1405, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:57:45 Iters: 711300/[16], loss: 3.7035, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-00:59:48 Iters: 711400/[16], loss: 3.4586, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:01:51 Iters: 711500/[16], loss: 2.6624, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:03:54 Iters: 711600/[16], loss: 3.2190, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:05:57 Iters: 711700/[16], loss: 3.4571, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:08:00 Iters: 711800/[16], loss: 2.9286, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:10:03 Iters: 711900/[16], loss: 3.5764, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:12:06 Iters: 712000/[16], loss: 3.0634, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:14:09 Iters: 712100/[16], loss: 3.7994, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:16:12 Iters: 712200/[16], loss: 2.7027, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:18:14 Iters: 712300/[16], loss: 3.3756, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:20:17 Iters: 712400/[16], loss: 3.0357, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:22:20 Iters: 712500/[16], loss: 3.2060, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:24:23 Iters: 712600/[16], loss: 2.8972, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:26:26 Iters: 712700/[16], loss: 2.8917, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:28:29 Iters: 712800/[16], loss: 3.1338, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:30:32 Iters: 712900/[16], loss: 2.8542, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:32:35 Iters: 713000/[16], loss: 3.7481, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:34:38 Iters: 713100/[16], loss: 3.9797, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:36:41 Iters: 713200/[16], loss: 2.9722, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:38:44 Iters: 713300/[16], loss: 3.9309, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:40:47 Iters: 713400/[16], loss: 3.3608, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:42:50 Iters: 713500/[16], loss: 3.2910, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:44:53 Iters: 713600/[16], loss: 2.6197, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:46:56 Iters: 713700/[16], loss: 3.0128, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:48:59 Iters: 713800/[16], loss: 2.9390, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:51:02 Iters: 713900/[16], loss: 3.4724, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:53:05 Iters: 714000/[16], loss: 3.2775, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:55:08 Iters: 714100/[16], loss: 4.1425, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:57:11 Iters: 714200/[16], loss: 3.5841, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-01:59:14 Iters: 714300/[16], loss: 2.5357, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:01:17 Iters: 714400/[16], loss: 3.4820, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:03:20 Iters: 714500/[16], loss: 3.5196, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:05:23 Iters: 714600/[16], loss: 3.1931, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:07:26 Iters: 714700/[16], loss: 3.0306, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:09:29 Iters: 714800/[16], loss: 3.3448, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:11:32 Iters: 714900/[16], loss: 3.4030, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:13:36 Iters: 715000/[16], loss: 2.9575, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:15:39 Iters: 715100/[16], loss: 2.9355, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:17:42 Iters: 715200/[16], loss: 3.5185, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:19:45 Iters: 715300/[16], loss: 3.6707, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:21:48 Iters: 715400/[16], loss: 3.0359, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:23:51 Iters: 715500/[16], loss: 3.6695, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:25:55 Iters: 715600/[16], loss: 2.6424, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:27:58 Iters: 715700/[16], loss: 3.3281, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:30:01 Iters: 715800/[16], loss: 3.4725, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:32:05 Iters: 715900/[16], loss: 3.5073, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:34:08 Iters: 716000/[16], loss: 3.2059, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:36:11 Iters: 716100/[16], loss: 2.4395, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:38:14 Iters: 716200/[16], loss: 2.9080, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:40:18 Iters: 716300/[16], loss: 3.8937, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:42:21 Iters: 716400/[16], loss: 3.2887, train_accuracy: 0.4922, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:44:24 Iters: 716500/[16], loss: 3.1057, train_accuracy: 0.5547, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-02:46:28 Iters: 716600/[16], loss: 3.2835, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:48:31 Iters: 716700/[16], loss: 3.6186, train_accuracy: 0.5234, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-02:50:35 Iters: 716800/[16], loss: 3.6730, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:52:39 Iters: 716900/[16], loss: 3.3659, train_accuracy: 0.5000, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-02:54:42 Iters: 717000/[16], loss: 3.1570, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:56:45 Iters: 717100/[16], loss: 2.6900, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-02:58:49 Iters: 717200/[16], loss: 2.8518, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:00:52 Iters: 717300/[16], loss: 3.6993, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:02:55 Iters: 717400/[16], loss: 2.7264, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:04:59 Iters: 717500/[16], loss: 3.1016, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:07:02 Iters: 717600/[16], loss: 2.9811, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:09:05 Iters: 717700/[16], loss: 3.1438, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:11:09 Iters: 717800/[16], loss: 2.9942, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:13:12 Iters: 717900/[16], loss: 3.4486, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:15:15 Iters: 718000/[16], loss: 2.9018, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:17:19 Iters: 718100/[16], loss: 3.4513, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:19:22 Iters: 718200/[16], loss: 2.9258, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:21:25 Iters: 718300/[16], loss: 3.1753, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:23:29 Iters: 718400/[16], loss: 2.8069, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-03:25:32 Iters: 718500/[16], loss: 3.5456, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:27:35 Iters: 718600/[16], loss: 2.8924, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:29:39 Iters: 718700/[16], loss: 3.4896, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:31:42 Iters: 718800/[16], loss: 3.1490, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:33:45 Iters: 718900/[16], loss: 3.1091, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:35:48 Iters: 719000/[16], loss: 2.3254, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:37:52 Iters: 719100/[16], loss: 3.0192, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:39:55 Iters: 719200/[16], loss: 3.0371, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:41:58 Iters: 719300/[16], loss: 3.4066, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:44:02 Iters: 719400/[16], loss: 3.1059, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:46:05 Iters: 719500/[16], loss: 2.9315, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:48:09 Iters: 719600/[16], loss: 3.3305, train_accuracy: 0.5469, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-03:50:12 Iters: 719700/[16], loss: 4.1641, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:52:15 Iters: 719800/[16], loss: 3.4594, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:54:19 Iters: 719900/[16], loss: 3.2565, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-03:56:22 Iters: 720000/[16], loss: 2.8587, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-03:56:22 Saving checkpoint: 720000 -20220711-03:57:40 LFW Ave Accuracy: 99.6333 -20220711-03:58:59 AgeDB-30 Ave Accuracy: 97.1333 -20220711-04:00:29 CFP-FP Ave Accuracy: 94.9571 -20220711-04:00:29 Current Best Accuracy: LFW: 99.7166 in iters: 710000, AgeDB-30: 97.3333 in iters: 680000 and CFP-FP: 95.2571 in iters: 680000 -20220711-04:02:32 Iters: 720100/[16], loss: 3.6217, train_accuracy: 0.5391, time: 3.70 s/iter, learning rate: 0.0005000000000000001 -20220711-04:04:35 Iters: 720200/[16], loss: 3.6993, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:06:38 Iters: 720300/[16], loss: 3.3762, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:08:42 Iters: 720400/[16], loss: 3.0997, train_accuracy: 0.5000, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-04:10:46 Iters: 720500/[16], loss: 2.9160, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 0.0005000000000000001 -20220711-04:12:49 Iters: 720600/[16], loss: 2.9169, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:14:52 Iters: 720700/[16], loss: 3.1918, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:16:56 Iters: 720800/[16], loss: 2.7522, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:18:59 Iters: 720900/[16], loss: 3.2319, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:21:02 Iters: 721000/[16], loss: 3.3527, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:23:05 Iters: 721100/[16], loss: 3.1033, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:25:08 Iters: 721200/[16], loss: 2.7512, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:27:12 Iters: 721300/[16], loss: 2.9996, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:29:15 Iters: 721400/[16], loss: 3.2974, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:31:18 Iters: 721500/[16], loss: 2.6937, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:33:21 Iters: 721600/[16], loss: 3.6691, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:35:24 Iters: 721700/[16], loss: 2.8210, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:37:27 Iters: 721800/[16], loss: 3.2595, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:39:31 Iters: 721900/[16], loss: 2.8647, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:41:34 Iters: 722000/[16], loss: 3.0117, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:43:37 Iters: 722100/[16], loss: 3.2525, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:45:40 Iters: 722200/[16], loss: 3.5464, train_accuracy: 0.5078, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:47:43 Iters: 722300/[16], loss: 3.3948, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:49:46 Iters: 722400/[16], loss: 2.7509, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:51:49 Iters: 722500/[16], loss: 3.0608, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:53:52 Iters: 722600/[16], loss: 3.6648, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:55:55 Iters: 722700/[16], loss: 2.3930, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-04:57:58 Iters: 722800/[16], loss: 3.2431, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:00:01 Iters: 722900/[16], loss: 2.9766, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:02:04 Iters: 723000/[16], loss: 2.8206, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:04:07 Iters: 723100/[16], loss: 3.1874, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:06:10 Iters: 723200/[16], loss: 3.5668, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:08:13 Iters: 723300/[16], loss: 2.8610, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:10:15 Iters: 723400/[16], loss: 2.9548, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:12:18 Iters: 723500/[16], loss: 2.6494, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:14:21 Iters: 723600/[16], loss: 3.3500, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:16:24 Iters: 723700/[16], loss: 3.0846, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:18:27 Iters: 723800/[16], loss: 2.9213, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:20:30 Iters: 723900/[16], loss: 2.8600, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:22:33 Iters: 724000/[16], loss: 3.0050, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:24:36 Iters: 724100/[16], loss: 2.9292, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:26:39 Iters: 724200/[16], loss: 3.5839, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:28:42 Iters: 724300/[16], loss: 3.9643, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:30:45 Iters: 724400/[16], loss: 2.8482, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:32:48 Iters: 724500/[16], loss: 3.7340, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:34:51 Iters: 724600/[16], loss: 2.6115, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:36:54 Iters: 724700/[16], loss: 3.3123, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:38:57 Iters: 724800/[16], loss: 2.9777, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:41:00 Iters: 724900/[16], loss: 2.8698, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:43:03 Iters: 725000/[16], loss: 3.9968, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:45:06 Iters: 725100/[16], loss: 2.9701, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:47:09 Iters: 725200/[16], loss: 3.7822, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:49:12 Iters: 725300/[16], loss: 2.7515, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:51:15 Iters: 725400/[16], loss: 3.0667, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:53:18 Iters: 725500/[16], loss: 3.6583, train_accuracy: 0.4688, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:55:21 Iters: 725600/[16], loss: 3.0936, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:57:24 Iters: 725700/[16], loss: 2.7186, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-05:59:27 Iters: 725800/[16], loss: 3.1251, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:01:30 Iters: 725900/[16], loss: 3.1774, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:03:33 Iters: 726000/[16], loss: 3.7343, train_accuracy: 0.4844, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:05:36 Iters: 726100/[16], loss: 3.0757, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:07:39 Iters: 726200/[16], loss: 4.0419, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:09:42 Iters: 726300/[16], loss: 2.9265, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:11:45 Iters: 726400/[16], loss: 3.0894, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:13:48 Iters: 726500/[16], loss: 2.9963, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:15:51 Iters: 726600/[16], loss: 3.2804, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:17:54 Iters: 726700/[16], loss: 3.4473, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:19:57 Iters: 726800/[16], loss: 2.9914, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:22:00 Iters: 726900/[16], loss: 3.1281, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:24:03 Iters: 727000/[16], loss: 3.3112, train_accuracy: 0.4531, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:26:06 Iters: 727100/[16], loss: 3.5331, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:28:09 Iters: 727200/[16], loss: 3.6456, train_accuracy: 0.4766, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:30:12 Iters: 727300/[16], loss: 3.4604, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:32:15 Iters: 727400/[16], loss: 3.4778, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:34:18 Iters: 727500/[16], loss: 3.8425, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:36:21 Iters: 727600/[16], loss: 2.7236, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:38:24 Iters: 727700/[16], loss: 2.6103, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:40:27 Iters: 727800/[16], loss: 2.7759, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 0.0005000000000000001 -20220711-06:41:16 Train Epoch: 17/18 ... -20220711-06:42:31 Iters: 727900/[17], loss: 3.2690, train_accuracy: 0.5000, time: 0.74 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:44:34 Iters: 728000/[17], loss: 2.6418, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:46:37 Iters: 728100/[17], loss: 2.9927, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:48:40 Iters: 728200/[17], loss: 3.0187, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:50:44 Iters: 728300/[17], loss: 3.2666, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:52:47 Iters: 728400/[17], loss: 2.8236, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:54:51 Iters: 728500/[17], loss: 2.9781, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:56:54 Iters: 728600/[17], loss: 3.2324, train_accuracy: 0.5000, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-06:58:57 Iters: 728700/[17], loss: 3.1669, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:01:00 Iters: 728800/[17], loss: 2.6054, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:03:03 Iters: 728900/[17], loss: 3.3539, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:05:06 Iters: 729000/[17], loss: 2.2065, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:07:09 Iters: 729100/[17], loss: 2.0266, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:09:12 Iters: 729200/[17], loss: 3.4417, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:11:15 Iters: 729300/[17], loss: 2.3797, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:13:18 Iters: 729400/[17], loss: 2.8833, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:15:21 Iters: 729500/[17], loss: 3.3240, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:17:24 Iters: 729600/[17], loss: 2.8271, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:19:27 Iters: 729700/[17], loss: 2.7073, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:21:30 Iters: 729800/[17], loss: 3.1092, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:23:33 Iters: 729900/[17], loss: 2.1319, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:25:36 Iters: 730000/[17], loss: 2.9404, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:25:36 Saving checkpoint: 730000 -20220711-07:26:52 LFW Ave Accuracy: 99.6666 -20220711-07:28:07 AgeDB-30 Ave Accuracy: 97.3500 -20220711-07:29:33 CFP-FP Ave Accuracy: 95.3429 -20220711-07:29:33 Current Best Accuracy: LFW: 99.7166 in iters: 710000, AgeDB-30: 97.3500 in iters: 730000 and CFP-FP: 95.3429 in iters: 730000 -20220711-07:31:36 Iters: 730100/[17], loss: 3.4295, train_accuracy: 0.5781, time: 3.60 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:33:39 Iters: 730200/[17], loss: 2.4830, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:35:42 Iters: 730300/[17], loss: 3.3630, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:37:45 Iters: 730400/[17], loss: 2.7323, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:39:48 Iters: 730500/[17], loss: 2.7555, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:41:51 Iters: 730600/[17], loss: 2.5275, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:43:54 Iters: 730700/[17], loss: 2.4747, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:45:57 Iters: 730800/[17], loss: 2.5876, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:48:00 Iters: 730900/[17], loss: 2.5949, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:50:03 Iters: 731000/[17], loss: 2.3175, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:52:06 Iters: 731100/[17], loss: 3.2775, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:54:09 Iters: 731200/[17], loss: 3.0138, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:56:12 Iters: 731300/[17], loss: 2.5317, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-07:58:15 Iters: 731400/[17], loss: 2.6807, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:00:18 Iters: 731500/[17], loss: 2.5098, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:02:21 Iters: 731600/[17], loss: 2.8521, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:04:24 Iters: 731700/[17], loss: 3.7340, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:06:27 Iters: 731800/[17], loss: 2.3405, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:08:30 Iters: 731900/[17], loss: 2.3937, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:10:33 Iters: 732000/[17], loss: 2.4258, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:12:37 Iters: 732100/[17], loss: 2.7034, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:14:40 Iters: 732200/[17], loss: 2.4925, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:16:43 Iters: 732300/[17], loss: 2.9853, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:18:46 Iters: 732400/[17], loss: 3.1425, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:20:49 Iters: 732500/[17], loss: 2.9315, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:22:53 Iters: 732600/[17], loss: 3.1044, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:24:56 Iters: 732700/[17], loss: 2.9521, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:26:59 Iters: 732800/[17], loss: 3.0075, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:29:02 Iters: 732900/[17], loss: 2.4999, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:31:06 Iters: 733000/[17], loss: 3.2586, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:33:09 Iters: 733100/[17], loss: 2.5606, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:35:12 Iters: 733200/[17], loss: 3.0659, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:37:15 Iters: 733300/[17], loss: 2.0554, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:39:18 Iters: 733400/[17], loss: 2.2353, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:41:22 Iters: 733500/[17], loss: 2.4868, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:43:25 Iters: 733600/[17], loss: 3.0602, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:45:28 Iters: 733700/[17], loss: 2.9513, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:47:31 Iters: 733800/[17], loss: 2.3516, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:49:34 Iters: 733900/[17], loss: 2.7151, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:51:37 Iters: 734000/[17], loss: 3.0088, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:53:40 Iters: 734100/[17], loss: 2.4302, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:55:44 Iters: 734200/[17], loss: 3.3631, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:57:47 Iters: 734300/[17], loss: 2.2405, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-08:59:50 Iters: 734400/[17], loss: 2.3564, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:01:53 Iters: 734500/[17], loss: 2.3368, train_accuracy: 0.7422, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:03:56 Iters: 734600/[17], loss: 3.0353, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:05:59 Iters: 734700/[17], loss: 2.8386, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:08:02 Iters: 734800/[17], loss: 2.5901, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:10:05 Iters: 734900/[17], loss: 2.5011, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:12:08 Iters: 735000/[17], loss: 3.0110, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:14:11 Iters: 735100/[17], loss: 3.0860, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:16:14 Iters: 735200/[17], loss: 2.9678, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:18:17 Iters: 735300/[17], loss: 2.8352, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:20:20 Iters: 735400/[17], loss: 3.0171, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:22:23 Iters: 735500/[17], loss: 2.4248, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:24:27 Iters: 735600/[17], loss: 2.4435, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:26:30 Iters: 735700/[17], loss: 2.5905, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:28:33 Iters: 735800/[17], loss: 2.3268, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:30:36 Iters: 735900/[17], loss: 2.9254, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:32:40 Iters: 736000/[17], loss: 2.4324, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:34:43 Iters: 736100/[17], loss: 2.4983, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:36:46 Iters: 736200/[17], loss: 3.6317, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:38:49 Iters: 736300/[17], loss: 2.8309, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:40:52 Iters: 736400/[17], loss: 3.7482, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:42:55 Iters: 736500/[17], loss: 2.4473, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:44:58 Iters: 736600/[17], loss: 2.9905, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:47:01 Iters: 736700/[17], loss: 3.1050, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:49:05 Iters: 736800/[17], loss: 2.8678, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:51:08 Iters: 736900/[17], loss: 2.2224, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:53:11 Iters: 737000/[17], loss: 3.0374, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:55:14 Iters: 737100/[17], loss: 2.5690, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:57:17 Iters: 737200/[17], loss: 2.4990, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-09:59:20 Iters: 737300/[17], loss: 2.4150, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:01:24 Iters: 737400/[17], loss: 2.5627, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:03:27 Iters: 737500/[17], loss: 2.6382, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:05:30 Iters: 737600/[17], loss: 2.7167, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:07:33 Iters: 737700/[17], loss: 2.4063, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:09:36 Iters: 737800/[17], loss: 2.9214, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:11:39 Iters: 737900/[17], loss: 2.5347, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:13:42 Iters: 738000/[17], loss: 2.0738, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:15:45 Iters: 738100/[17], loss: 3.1964, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:17:48 Iters: 738200/[17], loss: 2.2653, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:19:51 Iters: 738300/[17], loss: 2.3152, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:21:54 Iters: 738400/[17], loss: 2.2302, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:23:57 Iters: 738500/[17], loss: 2.7853, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:26:01 Iters: 738600/[17], loss: 2.7654, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:28:04 Iters: 738700/[17], loss: 2.2561, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:30:07 Iters: 738800/[17], loss: 3.1415, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:32:10 Iters: 738900/[17], loss: 2.9061, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:34:13 Iters: 739000/[17], loss: 2.4799, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:36:16 Iters: 739100/[17], loss: 2.5313, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:38:20 Iters: 739200/[17], loss: 2.4674, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:40:23 Iters: 739300/[17], loss: 2.6397, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:42:26 Iters: 739400/[17], loss: 2.2482, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:44:29 Iters: 739500/[17], loss: 2.9587, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:46:33 Iters: 739600/[17], loss: 2.4749, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:48:36 Iters: 739700/[17], loss: 2.7810, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:50:39 Iters: 739800/[17], loss: 2.9109, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:52:42 Iters: 739900/[17], loss: 2.6459, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:54:46 Iters: 740000/[17], loss: 2.7614, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-10:54:46 Saving checkpoint: 740000 -20220711-10:56:03 LFW Ave Accuracy: 99.7333 -20220711-10:57:18 AgeDB-30 Ave Accuracy: 97.4500 -20220711-10:58:46 CFP-FP Ave Accuracy: 95.4143 -20220711-10:58:46 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.4143 in iters: 740000 -20220711-11:00:49 Iters: 740100/[17], loss: 2.6820, train_accuracy: 0.6406, time: 3.63 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:02:52 Iters: 740200/[17], loss: 3.1927, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:04:56 Iters: 740300/[17], loss: 3.0487, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:06:59 Iters: 740400/[17], loss: 1.8992, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:09:02 Iters: 740500/[17], loss: 2.4030, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:11:05 Iters: 740600/[17], loss: 2.1179, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:13:08 Iters: 740700/[17], loss: 2.9770, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:15:12 Iters: 740800/[17], loss: 2.7247, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:17:15 Iters: 740900/[17], loss: 2.9432, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:19:18 Iters: 741000/[17], loss: 2.3416, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:21:22 Iters: 741100/[17], loss: 2.6403, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:23:25 Iters: 741200/[17], loss: 2.2465, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:25:28 Iters: 741300/[17], loss: 3.0495, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:27:31 Iters: 741400/[17], loss: 3.1531, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:29:34 Iters: 741500/[17], loss: 3.0751, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:31:37 Iters: 741600/[17], loss: 2.1185, train_accuracy: 0.7422, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:33:40 Iters: 741700/[17], loss: 3.5330, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:35:43 Iters: 741800/[17], loss: 2.6247, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:37:46 Iters: 741900/[17], loss: 3.0747, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:39:49 Iters: 742000/[17], loss: 2.4493, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:41:53 Iters: 742100/[17], loss: 2.5713, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:43:56 Iters: 742200/[17], loss: 2.8115, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:45:59 Iters: 742300/[17], loss: 2.7893, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:48:02 Iters: 742400/[17], loss: 2.8194, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:50:05 Iters: 742500/[17], loss: 3.1952, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:52:09 Iters: 742600/[17], loss: 2.5470, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:54:12 Iters: 742700/[17], loss: 3.4220, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:56:15 Iters: 742800/[17], loss: 2.9292, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-11:58:18 Iters: 742900/[17], loss: 3.0587, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:00:22 Iters: 743000/[17], loss: 2.1979, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:02:25 Iters: 743100/[17], loss: 2.4799, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:04:28 Iters: 743200/[17], loss: 2.7785, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:06:31 Iters: 743300/[17], loss: 2.5883, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:08:34 Iters: 743400/[17], loss: 3.0491, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:10:37 Iters: 743500/[17], loss: 2.6242, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:12:40 Iters: 743600/[17], loss: 3.0593, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:14:43 Iters: 743700/[17], loss: 2.4828, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:16:46 Iters: 743800/[17], loss: 2.7171, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:18:49 Iters: 743900/[17], loss: 3.0783, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:20:53 Iters: 744000/[17], loss: 2.6555, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:22:56 Iters: 744100/[17], loss: 3.1378, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:25:00 Iters: 744200/[17], loss: 3.2526, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:27:03 Iters: 744300/[17], loss: 2.8991, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:29:07 Iters: 744400/[17], loss: 2.5862, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:31:10 Iters: 744500/[17], loss: 2.3062, train_accuracy: 0.7188, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:33:14 Iters: 744600/[17], loss: 2.9273, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:35:17 Iters: 744700/[17], loss: 2.5588, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:37:21 Iters: 744800/[17], loss: 2.2707, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:39:24 Iters: 744900/[17], loss: 2.9097, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:41:28 Iters: 745000/[17], loss: 2.6870, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:43:31 Iters: 745100/[17], loss: 2.9357, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:45:34 Iters: 745200/[17], loss: 2.6063, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:47:37 Iters: 745300/[17], loss: 2.4911, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:49:41 Iters: 745400/[17], loss: 2.2897, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:51:44 Iters: 745500/[17], loss: 2.4005, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:53:48 Iters: 745600/[17], loss: 2.2967, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:55:51 Iters: 745700/[17], loss: 2.3990, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:57:54 Iters: 745800/[17], loss: 2.7938, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-12:59:57 Iters: 745900/[17], loss: 3.1868, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:02:00 Iters: 746000/[17], loss: 2.0176, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:04:03 Iters: 746100/[17], loss: 2.4308, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:06:06 Iters: 746200/[17], loss: 2.8707, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:08:10 Iters: 746300/[17], loss: 2.1776, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:10:13 Iters: 746400/[17], loss: 2.6591, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:12:16 Iters: 746500/[17], loss: 2.8637, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:14:19 Iters: 746600/[17], loss: 2.8575, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:16:22 Iters: 746700/[17], loss: 2.0643, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:18:25 Iters: 746800/[17], loss: 2.8390, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:20:28 Iters: 746900/[17], loss: 2.5629, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:22:32 Iters: 747000/[17], loss: 2.5730, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:24:35 Iters: 747100/[17], loss: 2.4717, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:26:38 Iters: 747200/[17], loss: 2.2915, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:28:42 Iters: 747300/[17], loss: 3.1243, train_accuracy: 0.5781, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:30:45 Iters: 747400/[17], loss: 3.2985, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:32:49 Iters: 747500/[17], loss: 2.4576, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:34:52 Iters: 747600/[17], loss: 2.3545, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:36:55 Iters: 747700/[17], loss: 2.2964, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:38:58 Iters: 747800/[17], loss: 2.3473, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:41:02 Iters: 747900/[17], loss: 3.0951, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:43:05 Iters: 748000/[17], loss: 3.2954, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:45:08 Iters: 748100/[17], loss: 2.4153, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:47:11 Iters: 748200/[17], loss: 2.9510, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:49:14 Iters: 748300/[17], loss: 2.4933, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:51:17 Iters: 748400/[17], loss: 2.6721, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:53:20 Iters: 748500/[17], loss: 2.7967, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:55:23 Iters: 748600/[17], loss: 2.9556, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:57:27 Iters: 748700/[17], loss: 2.3341, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-13:59:30 Iters: 748800/[17], loss: 2.4444, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:01:33 Iters: 748900/[17], loss: 2.9177, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:03:36 Iters: 749000/[17], loss: 2.6252, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:05:39 Iters: 749100/[17], loss: 2.1245, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:07:42 Iters: 749200/[17], loss: 2.7271, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:09:46 Iters: 749300/[17], loss: 2.3756, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:11:49 Iters: 749400/[17], loss: 2.5979, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:13:52 Iters: 749500/[17], loss: 2.0201, train_accuracy: 0.7344, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:15:55 Iters: 749600/[17], loss: 2.5731, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:17:58 Iters: 749700/[17], loss: 2.4154, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:20:01 Iters: 749800/[17], loss: 2.3637, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:22:04 Iters: 749900/[17], loss: 2.6283, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:24:08 Iters: 750000/[17], loss: 2.7085, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:24:08 Saving checkpoint: 750000 -20220711-14:25:26 LFW Ave Accuracy: 99.7000 -20220711-14:26:43 AgeDB-30 Ave Accuracy: 97.3000 -20220711-14:28:12 CFP-FP Ave Accuracy: 95.3143 -20220711-14:28:12 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.4143 in iters: 740000 -20220711-14:30:15 Iters: 750100/[17], loss: 2.1122, train_accuracy: 0.7109, time: 3.67 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:32:18 Iters: 750200/[17], loss: 2.7060, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:34:21 Iters: 750300/[17], loss: 2.8239, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:36:24 Iters: 750400/[17], loss: 2.6361, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:38:27 Iters: 750500/[17], loss: 2.0563, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:40:31 Iters: 750600/[17], loss: 1.9620, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:42:34 Iters: 750700/[17], loss: 2.4592, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:44:37 Iters: 750800/[17], loss: 2.1366, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:46:40 Iters: 750900/[17], loss: 2.4395, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:48:43 Iters: 751000/[17], loss: 2.7008, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:50:46 Iters: 751100/[17], loss: 2.8965, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:52:49 Iters: 751200/[17], loss: 2.9480, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:54:52 Iters: 751300/[17], loss: 3.4041, train_accuracy: 0.5156, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:56:55 Iters: 751400/[17], loss: 3.1158, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-14:58:58 Iters: 751500/[17], loss: 3.4827, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:01:01 Iters: 751600/[17], loss: 2.4723, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:03:04 Iters: 751700/[17], loss: 2.8412, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:05:07 Iters: 751800/[17], loss: 2.6203, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:07:10 Iters: 751900/[17], loss: 2.9273, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:09:13 Iters: 752000/[17], loss: 2.8107, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:11:16 Iters: 752100/[17], loss: 3.2888, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:13:19 Iters: 752200/[17], loss: 2.2701, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:15:22 Iters: 752300/[17], loss: 2.2060, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:17:25 Iters: 752400/[17], loss: 3.0411, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:19:28 Iters: 752500/[17], loss: 2.7500, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:21:31 Iters: 752600/[17], loss: 3.6339, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:23:34 Iters: 752700/[17], loss: 2.2376, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:25:37 Iters: 752800/[17], loss: 3.0094, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:27:40 Iters: 752900/[17], loss: 3.0834, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:29:43 Iters: 753000/[17], loss: 2.7433, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:31:46 Iters: 753100/[17], loss: 2.4460, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:33:49 Iters: 753200/[17], loss: 2.6229, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:35:52 Iters: 753300/[17], loss: 2.8301, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:37:55 Iters: 753400/[17], loss: 2.2365, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:39:58 Iters: 753500/[17], loss: 2.1757, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:42:01 Iters: 753600/[17], loss: 2.2408, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:44:04 Iters: 753700/[17], loss: 2.9418, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:46:07 Iters: 753800/[17], loss: 2.6153, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:48:10 Iters: 753900/[17], loss: 2.4580, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:50:13 Iters: 754000/[17], loss: 1.9752, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:52:16 Iters: 754100/[17], loss: 2.5511, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:54:19 Iters: 754200/[17], loss: 2.5353, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:56:22 Iters: 754300/[17], loss: 2.2230, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-15:58:25 Iters: 754400/[17], loss: 2.3769, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:00:28 Iters: 754500/[17], loss: 2.8392, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:02:31 Iters: 754600/[17], loss: 2.6214, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:04:34 Iters: 754700/[17], loss: 2.5856, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:06:37 Iters: 754800/[17], loss: 2.6406, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:08:40 Iters: 754900/[17], loss: 2.9253, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:10:43 Iters: 755000/[17], loss: 2.5476, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:12:46 Iters: 755100/[17], loss: 3.1928, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:14:49 Iters: 755200/[17], loss: 2.5428, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:16:52 Iters: 755300/[17], loss: 2.9424, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:18:56 Iters: 755400/[17], loss: 2.6536, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:20:59 Iters: 755500/[17], loss: 2.4412, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:23:02 Iters: 755600/[17], loss: 2.7395, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:25:04 Iters: 755700/[17], loss: 2.5901, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:27:07 Iters: 755800/[17], loss: 2.0102, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:29:10 Iters: 755900/[17], loss: 2.4991, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:31:13 Iters: 756000/[17], loss: 2.5750, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:33:16 Iters: 756100/[17], loss: 2.6408, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:35:19 Iters: 756200/[17], loss: 2.7091, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:37:22 Iters: 756300/[17], loss: 2.7861, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:39:25 Iters: 756400/[17], loss: 2.4819, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:41:28 Iters: 756500/[17], loss: 2.2446, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:43:31 Iters: 756600/[17], loss: 2.5601, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:45:34 Iters: 756700/[17], loss: 2.4097, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:47:37 Iters: 756800/[17], loss: 2.9232, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:49:40 Iters: 756900/[17], loss: 3.3427, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:51:43 Iters: 757000/[17], loss: 2.2374, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:53:46 Iters: 757100/[17], loss: 2.8841, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:55:49 Iters: 757200/[17], loss: 3.1921, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:57:52 Iters: 757300/[17], loss: 3.1882, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-16:59:55 Iters: 757400/[17], loss: 2.4288, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:01:58 Iters: 757500/[17], loss: 3.3814, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:04:01 Iters: 757600/[17], loss: 2.0481, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:06:04 Iters: 757700/[17], loss: 2.5485, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:08:07 Iters: 757800/[17], loss: 2.1709, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:10:09 Iters: 757900/[17], loss: 2.7287, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:12:13 Iters: 758000/[17], loss: 2.7542, train_accuracy: 0.5234, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:14:16 Iters: 758100/[17], loss: 2.7783, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:16:19 Iters: 758200/[17], loss: 2.6402, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:18:22 Iters: 758300/[17], loss: 3.2044, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:20:26 Iters: 758400/[17], loss: 3.4020, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:22:29 Iters: 758500/[17], loss: 2.7434, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:24:33 Iters: 758600/[17], loss: 2.4020, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:26:36 Iters: 758700/[17], loss: 2.8839, train_accuracy: 0.5156, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:28:39 Iters: 758800/[17], loss: 2.6510, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:30:43 Iters: 758900/[17], loss: 2.3704, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:32:47 Iters: 759000/[17], loss: 2.8913, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:34:50 Iters: 759100/[17], loss: 2.4601, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:36:54 Iters: 759200/[17], loss: 2.5718, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:38:57 Iters: 759300/[17], loss: 2.6228, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:41:01 Iters: 759400/[17], loss: 2.7397, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:43:04 Iters: 759500/[17], loss: 2.6314, train_accuracy: 0.5625, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:45:07 Iters: 759600/[17], loss: 2.5986, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:47:11 Iters: 759700/[17], loss: 2.9701, train_accuracy: 0.5469, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:49:15 Iters: 759800/[17], loss: 2.2963, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:51:18 Iters: 759900/[17], loss: 2.2383, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:53:22 Iters: 760000/[17], loss: 2.4210, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-17:53:22 Saving checkpoint: 760000 -20220711-17:54:38 LFW Ave Accuracy: 99.6666 -20220711-17:55:55 AgeDB-30 Ave Accuracy: 97.3167 -20220711-17:57:25 CFP-FP Ave Accuracy: 95.4000 -20220711-17:57:25 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.4143 in iters: 740000 -20220711-17:59:28 Iters: 760100/[17], loss: 2.2367, train_accuracy: 0.6875, time: 3.66 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:01:31 Iters: 760200/[17], loss: 3.1496, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:03:35 Iters: 760300/[17], loss: 2.1695, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:05:38 Iters: 760400/[17], loss: 3.1232, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:07:42 Iters: 760500/[17], loss: 3.2416, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:09:45 Iters: 760600/[17], loss: 2.4285, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:11:49 Iters: 760700/[17], loss: 2.7107, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:13:52 Iters: 760800/[17], loss: 2.4969, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:15:55 Iters: 760900/[17], loss: 3.1641, train_accuracy: 0.5391, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:17:59 Iters: 761000/[17], loss: 1.9997, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:20:03 Iters: 761100/[17], loss: 2.7646, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:22:06 Iters: 761200/[17], loss: 2.3304, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:24:09 Iters: 761300/[17], loss: 2.1461, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:26:13 Iters: 761400/[17], loss: 2.1850, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:28:16 Iters: 761500/[17], loss: 2.8477, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:30:20 Iters: 761600/[17], loss: 2.7526, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:32:23 Iters: 761700/[17], loss: 2.4264, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:34:26 Iters: 761800/[17], loss: 2.9456, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:36:30 Iters: 761900/[17], loss: 2.8521, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:38:33 Iters: 762000/[17], loss: 2.6268, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:40:36 Iters: 762100/[17], loss: 2.7986, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:42:39 Iters: 762200/[17], loss: 2.7097, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:44:42 Iters: 762300/[17], loss: 2.3206, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:46:46 Iters: 762400/[17], loss: 2.4940, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:48:49 Iters: 762500/[17], loss: 3.5917, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:50:52 Iters: 762600/[17], loss: 2.7724, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:52:55 Iters: 762700/[17], loss: 2.9039, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:54:59 Iters: 762800/[17], loss: 2.9517, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:57:02 Iters: 762900/[17], loss: 2.4485, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-18:59:05 Iters: 763000/[17], loss: 2.4441, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:01:08 Iters: 763100/[17], loss: 2.8360, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:03:11 Iters: 763200/[17], loss: 2.9196, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:05:14 Iters: 763300/[17], loss: 2.5783, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:07:17 Iters: 763400/[17], loss: 2.8014, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:09:21 Iters: 763500/[17], loss: 2.9450, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:11:24 Iters: 763600/[17], loss: 2.0623, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:13:27 Iters: 763700/[17], loss: 2.6637, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:15:30 Iters: 763800/[17], loss: 3.2063, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:17:33 Iters: 763900/[17], loss: 2.2711, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:19:36 Iters: 764000/[17], loss: 2.2645, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:21:39 Iters: 764100/[17], loss: 3.0605, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:23:42 Iters: 764200/[17], loss: 2.7033, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:25:45 Iters: 764300/[17], loss: 2.9293, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:27:48 Iters: 764400/[17], loss: 3.0285, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:29:51 Iters: 764500/[17], loss: 2.6620, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:31:54 Iters: 764600/[17], loss: 2.3282, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:33:57 Iters: 764700/[17], loss: 2.9650, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:36:00 Iters: 764800/[17], loss: 2.6716, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:38:03 Iters: 764900/[17], loss: 2.8096, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:40:06 Iters: 765000/[17], loss: 1.8379, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:42:09 Iters: 765100/[17], loss: 2.5645, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:44:12 Iters: 765200/[17], loss: 2.2856, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:46:15 Iters: 765300/[17], loss: 3.0514, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:48:18 Iters: 765400/[17], loss: 2.5883, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:50:21 Iters: 765500/[17], loss: 2.5532, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:52:24 Iters: 765600/[17], loss: 2.6848, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:54:27 Iters: 765700/[17], loss: 2.2645, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:56:30 Iters: 765800/[17], loss: 2.8375, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-19:58:33 Iters: 765900/[17], loss: 3.2789, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:00:36 Iters: 766000/[17], loss: 2.3703, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:02:40 Iters: 766100/[17], loss: 3.2141, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:04:43 Iters: 766200/[17], loss: 2.3976, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:06:46 Iters: 766300/[17], loss: 2.1299, train_accuracy: 0.7344, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:08:50 Iters: 766400/[17], loss: 2.6684, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:10:53 Iters: 766500/[17], loss: 2.5613, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:12:56 Iters: 766600/[17], loss: 2.5739, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:14:59 Iters: 766700/[17], loss: 2.8895, train_accuracy: 0.5391, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:17:03 Iters: 766800/[17], loss: 2.5511, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:19:06 Iters: 766900/[17], loss: 2.1315, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:21:09 Iters: 767000/[17], loss: 2.9829, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:23:13 Iters: 767100/[17], loss: 2.3842, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:25:16 Iters: 767200/[17], loss: 2.8382, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:27:19 Iters: 767300/[17], loss: 3.4322, train_accuracy: 0.5781, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:29:23 Iters: 767400/[17], loss: 2.8242, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:31:26 Iters: 767500/[17], loss: 2.6041, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:33:29 Iters: 767600/[17], loss: 2.3825, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:35:33 Iters: 767700/[17], loss: 3.1523, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:37:36 Iters: 767800/[17], loss: 3.3168, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:39:39 Iters: 767900/[17], loss: 2.5309, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:41:43 Iters: 768000/[17], loss: 2.4738, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:43:46 Iters: 768100/[17], loss: 2.6851, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:45:49 Iters: 768200/[17], loss: 3.1238, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:47:52 Iters: 768300/[17], loss: 2.8168, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:49:55 Iters: 768400/[17], loss: 2.2489, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:51:59 Iters: 768500/[17], loss: 2.5334, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:54:03 Iters: 768600/[17], loss: 2.9289, train_accuracy: 0.5469, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:56:06 Iters: 768700/[17], loss: 3.2173, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-20:58:10 Iters: 768800/[17], loss: 2.6229, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:00:13 Iters: 768900/[17], loss: 2.5278, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:02:16 Iters: 769000/[17], loss: 2.8458, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:04:20 Iters: 769100/[17], loss: 2.8865, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:06:23 Iters: 769200/[17], loss: 1.8985, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:08:27 Iters: 769300/[17], loss: 2.6479, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:10:30 Iters: 769400/[17], loss: 2.8645, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:12:33 Iters: 769500/[17], loss: 2.6036, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:14:36 Iters: 769600/[17], loss: 2.9669, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:16:40 Iters: 769700/[17], loss: 2.8332, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:18:43 Iters: 769800/[17], loss: 2.5719, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:20:46 Iters: 769900/[17], loss: 2.8854, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:22:50 Iters: 770000/[17], loss: 2.7828, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:22:50 Saving checkpoint: 770000 -20220711-21:24:09 LFW Ave Accuracy: 99.6833 -20220711-21:25:26 AgeDB-30 Ave Accuracy: 97.3833 -20220711-21:26:57 CFP-FP Ave Accuracy: 95.3714 -20220711-21:26:57 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.4143 in iters: 740000 -20220711-21:29:00 Iters: 770100/[17], loss: 2.4442, train_accuracy: 0.6406, time: 3.70 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:31:03 Iters: 770200/[17], loss: 2.2874, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:33:07 Iters: 770300/[17], loss: 2.6227, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:35:10 Iters: 770400/[17], loss: 2.8160, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:37:13 Iters: 770500/[17], loss: 2.4745, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:39:17 Iters: 770600/[17], loss: 2.6451, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:41:20 Iters: 770700/[17], loss: 2.7719, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:43:24 Iters: 770800/[17], loss: 2.7878, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:45:27 Iters: 770900/[17], loss: 2.7160, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:47:31 Iters: 771000/[17], loss: 2.9438, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:49:34 Iters: 771100/[17], loss: 2.9120, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:51:37 Iters: 771200/[17], loss: 2.4275, train_accuracy: 0.6875, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:53:41 Iters: 771300/[17], loss: 2.7344, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:55:44 Iters: 771400/[17], loss: 2.3126, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:57:47 Iters: 771500/[17], loss: 2.2763, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-21:59:51 Iters: 771600/[17], loss: 2.7522, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:01:54 Iters: 771700/[17], loss: 2.9951, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:03:58 Iters: 771800/[17], loss: 3.0275, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:06:01 Iters: 771900/[17], loss: 2.6267, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:08:04 Iters: 772000/[17], loss: 2.1920, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:10:07 Iters: 772100/[17], loss: 3.1847, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:12:11 Iters: 772200/[17], loss: 2.2697, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:14:13 Iters: 772300/[17], loss: 2.5992, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:16:17 Iters: 772400/[17], loss: 2.7836, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:18:20 Iters: 772500/[17], loss: 2.8318, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:20:23 Iters: 772600/[17], loss: 2.5738, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:22:27 Iters: 772700/[17], loss: 3.3550, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:24:30 Iters: 772800/[17], loss: 2.6734, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:26:33 Iters: 772900/[17], loss: 2.2645, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:28:37 Iters: 773000/[17], loss: 2.5827, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:30:40 Iters: 773100/[17], loss: 2.7672, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:32:43 Iters: 773200/[17], loss: 2.2053, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:34:47 Iters: 773300/[17], loss: 2.9084, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.000000000000002e-06 -20220711-22:35:23 Train Epoch: 18/18 ... -20220711-22:36:50 Iters: 773400/[18], loss: 2.4624, train_accuracy: 0.6641, time: 0.87 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:38:54 Iters: 773500/[18], loss: 3.0891, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:40:57 Iters: 773600/[18], loss: 2.5619, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:43:01 Iters: 773700/[18], loss: 2.1583, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:45:05 Iters: 773800/[18], loss: 3.1960, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:47:08 Iters: 773900/[18], loss: 3.1922, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:49:11 Iters: 774000/[18], loss: 2.6370, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:51:15 Iters: 774100/[18], loss: 2.7603, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:53:19 Iters: 774200/[18], loss: 3.0467, train_accuracy: 0.5547, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:55:22 Iters: 774300/[18], loss: 2.8392, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:57:26 Iters: 774400/[18], loss: 2.7453, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-22:59:29 Iters: 774500/[18], loss: 2.8572, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:01:32 Iters: 774600/[18], loss: 2.7591, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:03:35 Iters: 774700/[18], loss: 3.1308, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:05:38 Iters: 774800/[18], loss: 2.7602, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:07:41 Iters: 774900/[18], loss: 2.3658, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:09:44 Iters: 775000/[18], loss: 2.3242, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:11:47 Iters: 775100/[18], loss: 2.8495, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:13:50 Iters: 775200/[18], loss: 2.4857, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:15:53 Iters: 775300/[18], loss: 3.4163, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:17:57 Iters: 775400/[18], loss: 2.6981, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:20:00 Iters: 775500/[18], loss: 2.6682, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:22:04 Iters: 775600/[18], loss: 2.9609, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:24:07 Iters: 775700/[18], loss: 2.6974, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:26:11 Iters: 775800/[18], loss: 2.7654, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:28:14 Iters: 775900/[18], loss: 2.7827, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:30:18 Iters: 776000/[18], loss: 2.4830, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:32:21 Iters: 776100/[18], loss: 3.0311, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:34:24 Iters: 776200/[18], loss: 2.3848, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:36:28 Iters: 776300/[18], loss: 2.2990, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:38:32 Iters: 776400/[18], loss: 2.4043, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:40:35 Iters: 776500/[18], loss: 2.7383, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:42:39 Iters: 776600/[18], loss: 3.3184, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:44:42 Iters: 776700/[18], loss: 2.6377, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:46:46 Iters: 776800/[18], loss: 2.6630, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:48:49 Iters: 776900/[18], loss: 2.1062, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:50:53 Iters: 777000/[18], loss: 2.4695, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:52:56 Iters: 777100/[18], loss: 2.4223, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:54:59 Iters: 777200/[18], loss: 2.7963, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:57:03 Iters: 777300/[18], loss: 2.2598, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220711-23:59:06 Iters: 777400/[18], loss: 2.5295, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:01:09 Iters: 777500/[18], loss: 2.8030, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:03:13 Iters: 777600/[18], loss: 2.1964, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:05:16 Iters: 777700/[18], loss: 3.2218, train_accuracy: 0.5547, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:07:19 Iters: 777800/[18], loss: 2.4784, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:09:23 Iters: 777900/[18], loss: 2.8614, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:11:26 Iters: 778000/[18], loss: 2.7092, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:13:30 Iters: 778100/[18], loss: 2.4625, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:15:33 Iters: 778200/[18], loss: 2.7424, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:17:37 Iters: 778300/[18], loss: 2.4594, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:19:40 Iters: 778400/[18], loss: 2.8588, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:21:44 Iters: 778500/[18], loss: 2.3978, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:23:47 Iters: 778600/[18], loss: 2.6034, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:25:51 Iters: 778700/[18], loss: 2.4374, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:27:55 Iters: 778800/[18], loss: 2.5439, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:29:58 Iters: 778900/[18], loss: 2.6517, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:32:02 Iters: 779000/[18], loss: 2.4658, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:34:05 Iters: 779100/[18], loss: 2.5798, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:36:08 Iters: 779200/[18], loss: 2.2513, train_accuracy: 0.7109, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:38:12 Iters: 779300/[18], loss: 2.8330, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:40:16 Iters: 779400/[18], loss: 2.7288, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:42:19 Iters: 779500/[18], loss: 2.6524, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:44:23 Iters: 779600/[18], loss: 3.2246, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:46:27 Iters: 779700/[18], loss: 2.7191, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:48:30 Iters: 779800/[18], loss: 2.7039, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:50:34 Iters: 779900/[18], loss: 2.3702, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:52:37 Iters: 780000/[18], loss: 2.6173, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-00:52:37 Saving checkpoint: 780000 -20220712-00:53:54 LFW Ave Accuracy: 99.7000 -20220712-00:55:10 AgeDB-30 Ave Accuracy: 97.2500 -20220712-00:56:37 CFP-FP Ave Accuracy: 95.5286 -20220712-00:56:37 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.5286 in iters: 780000 -20220712-00:58:40 Iters: 780100/[18], loss: 2.6303, train_accuracy: 0.5625, time: 3.63 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:00:43 Iters: 780200/[18], loss: 2.1093, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:02:47 Iters: 780300/[18], loss: 2.3617, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:04:50 Iters: 780400/[18], loss: 3.1781, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:06:54 Iters: 780500/[18], loss: 2.9701, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:08:57 Iters: 780600/[18], loss: 2.7265, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:11:00 Iters: 780700/[18], loss: 2.8163, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:13:03 Iters: 780800/[18], loss: 2.6920, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:15:07 Iters: 780900/[18], loss: 2.4192, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:17:10 Iters: 781000/[18], loss: 2.5936, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:19:13 Iters: 781100/[18], loss: 2.8660, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:21:17 Iters: 781200/[18], loss: 2.4267, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:23:20 Iters: 781300/[18], loss: 2.4566, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:25:24 Iters: 781400/[18], loss: 2.5033, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:27:27 Iters: 781500/[18], loss: 2.5797, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:29:31 Iters: 781600/[18], loss: 2.4838, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:31:34 Iters: 781700/[18], loss: 2.2925, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:33:38 Iters: 781800/[18], loss: 2.4713, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:35:41 Iters: 781900/[18], loss: 2.4260, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:37:45 Iters: 782000/[18], loss: 2.7595, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:39:48 Iters: 782100/[18], loss: 2.8600, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:41:52 Iters: 782200/[18], loss: 2.5178, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:43:55 Iters: 782300/[18], loss: 2.2054, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:45:58 Iters: 782400/[18], loss: 2.2853, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:48:02 Iters: 782500/[18], loss: 2.6852, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:50:05 Iters: 782600/[18], loss: 2.5572, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:52:09 Iters: 782700/[18], loss: 2.7281, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:54:12 Iters: 782800/[18], loss: 2.5693, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:56:16 Iters: 782900/[18], loss: 2.6422, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-01:58:19 Iters: 783000/[18], loss: 2.7501, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:00:23 Iters: 783100/[18], loss: 2.8480, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:02:26 Iters: 783200/[18], loss: 2.4204, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:04:29 Iters: 783300/[18], loss: 2.3184, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:06:33 Iters: 783400/[18], loss: 3.0337, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:08:36 Iters: 783500/[18], loss: 2.3943, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:10:39 Iters: 783600/[18], loss: 2.2098, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:12:43 Iters: 783700/[18], loss: 2.2950, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:14:46 Iters: 783800/[18], loss: 2.7274, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:16:49 Iters: 783900/[18], loss: 2.5131, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:18:53 Iters: 784000/[18], loss: 2.9701, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:20:56 Iters: 784100/[18], loss: 2.6690, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:22:59 Iters: 784200/[18], loss: 2.6196, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:25:03 Iters: 784300/[18], loss: 3.1644, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:27:06 Iters: 784400/[18], loss: 2.1949, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:29:10 Iters: 784500/[18], loss: 2.1968, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:31:14 Iters: 784600/[18], loss: 3.1035, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:33:17 Iters: 784700/[18], loss: 2.2071, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:35:20 Iters: 784800/[18], loss: 2.7197, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:37:24 Iters: 784900/[18], loss: 2.5290, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:39:27 Iters: 785000/[18], loss: 2.0251, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:41:31 Iters: 785100/[18], loss: 2.2947, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:43:34 Iters: 785200/[18], loss: 2.7480, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:45:38 Iters: 785300/[18], loss: 2.8163, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:47:41 Iters: 785400/[18], loss: 3.0999, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:49:44 Iters: 785500/[18], loss: 2.4729, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:51:48 Iters: 785600/[18], loss: 2.4261, train_accuracy: 0.7266, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:53:51 Iters: 785700/[18], loss: 2.6606, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:55:55 Iters: 785800/[18], loss: 2.4921, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-02:57:58 Iters: 785900/[18], loss: 2.0439, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:00:02 Iters: 786000/[18], loss: 2.7710, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:02:05 Iters: 786100/[18], loss: 2.7090, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:04:09 Iters: 786200/[18], loss: 2.2757, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:06:12 Iters: 786300/[18], loss: 2.2759, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:08:16 Iters: 786400/[18], loss: 2.8967, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:10:20 Iters: 786500/[18], loss: 2.5152, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:12:23 Iters: 786600/[18], loss: 2.3426, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:14:26 Iters: 786700/[18], loss: 3.1434, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:16:30 Iters: 786800/[18], loss: 2.6500, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:18:33 Iters: 786900/[18], loss: 2.2805, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:20:37 Iters: 787000/[18], loss: 2.8092, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:22:40 Iters: 787100/[18], loss: 2.7336, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:24:43 Iters: 787200/[18], loss: 2.6309, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:26:47 Iters: 787300/[18], loss: 2.6598, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:28:50 Iters: 787400/[18], loss: 2.9972, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:30:54 Iters: 787500/[18], loss: 3.1289, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:32:57 Iters: 787600/[18], loss: 3.1276, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:35:00 Iters: 787700/[18], loss: 2.0934, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:37:04 Iters: 787800/[18], loss: 3.2553, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:39:07 Iters: 787900/[18], loss: 2.0900, train_accuracy: 0.7344, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:41:11 Iters: 788000/[18], loss: 2.2067, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:43:14 Iters: 788100/[18], loss: 2.9988, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:45:17 Iters: 788200/[18], loss: 2.1425, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:47:20 Iters: 788300/[18], loss: 2.2018, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:49:23 Iters: 788400/[18], loss: 3.3070, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:51:27 Iters: 788500/[18], loss: 3.5315, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:53:30 Iters: 788600/[18], loss: 2.3985, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:55:33 Iters: 788700/[18], loss: 2.6088, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:57:37 Iters: 788800/[18], loss: 2.5145, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-03:59:40 Iters: 788900/[18], loss: 2.3261, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:01:43 Iters: 789000/[18], loss: 2.2863, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:03:47 Iters: 789100/[18], loss: 2.3696, train_accuracy: 0.7344, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:05:50 Iters: 789200/[18], loss: 3.4801, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:07:53 Iters: 789300/[18], loss: 2.3215, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:09:57 Iters: 789400/[18], loss: 2.9293, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:12:00 Iters: 789500/[18], loss: 2.8384, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:14:04 Iters: 789600/[18], loss: 2.5933, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:16:07 Iters: 789700/[18], loss: 2.2876, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:18:10 Iters: 789800/[18], loss: 2.3056, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:20:14 Iters: 789900/[18], loss: 2.7682, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:22:17 Iters: 790000/[18], loss: 1.9631, train_accuracy: 0.7500, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:22:17 Saving checkpoint: 790000 -20220712-04:23:34 LFW Ave Accuracy: 99.6833 -20220712-04:24:49 AgeDB-30 Ave Accuracy: 97.3000 -20220712-04:26:16 CFP-FP Ave Accuracy: 95.5429 -20220712-04:26:16 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.5429 in iters: 790000 -20220712-04:28:19 Iters: 790100/[18], loss: 2.3770, train_accuracy: 0.6875, time: 3.62 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:30:22 Iters: 790200/[18], loss: 2.1778, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:32:25 Iters: 790300/[18], loss: 2.6280, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:34:29 Iters: 790400/[18], loss: 2.5379, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:36:32 Iters: 790500/[18], loss: 2.3643, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:38:35 Iters: 790600/[18], loss: 3.7178, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:40:38 Iters: 790700/[18], loss: 2.8458, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:42:42 Iters: 790800/[18], loss: 2.8736, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:44:45 Iters: 790900/[18], loss: 2.8658, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:46:48 Iters: 791000/[18], loss: 2.6625, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:48:52 Iters: 791100/[18], loss: 2.1132, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:50:55 Iters: 791200/[18], loss: 2.8153, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:52:58 Iters: 791300/[18], loss: 2.7407, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:55:02 Iters: 791400/[18], loss: 3.1021, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:57:05 Iters: 791500/[18], loss: 2.5993, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-04:59:08 Iters: 791600/[18], loss: 1.9336, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:01:12 Iters: 791700/[18], loss: 2.6007, train_accuracy: 0.5312, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:03:15 Iters: 791800/[18], loss: 2.9015, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:05:18 Iters: 791900/[18], loss: 2.4160, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:07:22 Iters: 792000/[18], loss: 2.6337, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:09:25 Iters: 792100/[18], loss: 3.1111, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:11:28 Iters: 792200/[18], loss: 2.7884, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:13:32 Iters: 792300/[18], loss: 2.8264, train_accuracy: 0.5703, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:15:35 Iters: 792400/[18], loss: 2.8112, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:17:38 Iters: 792500/[18], loss: 2.2600, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:19:42 Iters: 792600/[18], loss: 2.4451, train_accuracy: 0.7109, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:21:45 Iters: 792700/[18], loss: 2.8747, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:23:48 Iters: 792800/[18], loss: 2.2423, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:25:52 Iters: 792900/[18], loss: 2.5528, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:27:55 Iters: 793000/[18], loss: 2.6203, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:29:58 Iters: 793100/[18], loss: 2.5671, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:32:02 Iters: 793200/[18], loss: 2.4426, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:34:05 Iters: 793300/[18], loss: 2.9977, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:36:09 Iters: 793400/[18], loss: 2.4363, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:38:12 Iters: 793500/[18], loss: 3.2400, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:40:15 Iters: 793600/[18], loss: 2.4007, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:42:19 Iters: 793700/[18], loss: 2.1647, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:44:22 Iters: 793800/[18], loss: 3.0213, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:46:25 Iters: 793900/[18], loss: 2.8064, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:48:29 Iters: 794000/[18], loss: 3.1835, train_accuracy: 0.5625, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:50:32 Iters: 794100/[18], loss: 2.0115, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:52:36 Iters: 794200/[18], loss: 2.0286, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:54:39 Iters: 794300/[18], loss: 2.5942, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:56:42 Iters: 794400/[18], loss: 3.2195, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-05:58:46 Iters: 794500/[18], loss: 2.4087, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:00:49 Iters: 794600/[18], loss: 2.1469, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:02:53 Iters: 794700/[18], loss: 2.7389, train_accuracy: 0.5938, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:04:56 Iters: 794800/[18], loss: 2.2550, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:07:00 Iters: 794900/[18], loss: 2.8935, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:09:03 Iters: 795000/[18], loss: 2.7127, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:11:06 Iters: 795100/[18], loss: 2.9050, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:13:10 Iters: 795200/[18], loss: 2.4791, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:15:13 Iters: 795300/[18], loss: 3.3729, train_accuracy: 0.5547, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:17:17 Iters: 795400/[18], loss: 2.1671, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:19:20 Iters: 795500/[18], loss: 2.5682, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:21:23 Iters: 795600/[18], loss: 2.2068, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:23:27 Iters: 795700/[18], loss: 2.9494, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:25:30 Iters: 795800/[18], loss: 2.7923, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:27:34 Iters: 795900/[18], loss: 2.7033, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:29:37 Iters: 796000/[18], loss: 2.5307, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:31:41 Iters: 796100/[18], loss: 2.5171, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:33:44 Iters: 796200/[18], loss: 1.9404, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:35:48 Iters: 796300/[18], loss: 2.3375, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:37:51 Iters: 796400/[18], loss: 2.4671, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:39:54 Iters: 796500/[18], loss: 2.1439, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:41:58 Iters: 796600/[18], loss: 2.4363, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:44:01 Iters: 796700/[18], loss: 3.1074, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:46:05 Iters: 796800/[18], loss: 2.3327, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:48:08 Iters: 796900/[18], loss: 2.7311, train_accuracy: 0.5859, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:50:11 Iters: 797000/[18], loss: 2.8182, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:52:15 Iters: 797100/[18], loss: 3.0006, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:54:18 Iters: 797200/[18], loss: 2.8119, train_accuracy: 0.6016, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:56:21 Iters: 797300/[18], loss: 2.3350, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-06:58:25 Iters: 797400/[18], loss: 2.3050, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:00:28 Iters: 797500/[18], loss: 2.8323, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:02:32 Iters: 797600/[18], loss: 2.0280, train_accuracy: 0.7031, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:04:35 Iters: 797700/[18], loss: 2.4888, train_accuracy: 0.6250, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:06:38 Iters: 797800/[18], loss: 2.8394, train_accuracy: 0.6094, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:08:42 Iters: 797900/[18], loss: 2.4488, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:10:45 Iters: 798000/[18], loss: 2.6765, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:12:48 Iters: 798100/[18], loss: 2.5420, train_accuracy: 0.5469, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:14:52 Iters: 798200/[18], loss: 2.2580, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:16:55 Iters: 798300/[18], loss: 2.0557, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:18:59 Iters: 798400/[18], loss: 2.4942, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:21:02 Iters: 798500/[18], loss: 2.4112, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:23:06 Iters: 798600/[18], loss: 2.5309, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:25:10 Iters: 798700/[18], loss: 1.9215, train_accuracy: 0.7031, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:27:14 Iters: 798800/[18], loss: 2.7420, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:29:18 Iters: 798900/[18], loss: 3.1675, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:31:22 Iters: 799000/[18], loss: 2.4571, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:33:26 Iters: 799100/[18], loss: 2.9601, train_accuracy: 0.5312, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:35:29 Iters: 799200/[18], loss: 2.6033, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:37:33 Iters: 799300/[18], loss: 3.2010, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:39:37 Iters: 799400/[18], loss: 2.5956, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:41:41 Iters: 799500/[18], loss: 2.5548, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:43:45 Iters: 799600/[18], loss: 2.7940, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:45:49 Iters: 799700/[18], loss: 1.8502, train_accuracy: 0.7344, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:47:53 Iters: 799800/[18], loss: 2.5296, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:49:57 Iters: 799900/[18], loss: 2.5833, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:52:01 Iters: 800000/[18], loss: 1.8052, train_accuracy: 0.7266, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-07:52:01 Saving checkpoint: 800000 -20220712-07:53:20 LFW Ave Accuracy: 99.6833 -20220712-07:54:37 AgeDB-30 Ave Accuracy: 97.3000 -20220712-07:56:07 CFP-FP Ave Accuracy: 95.4286 -20220712-07:56:07 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.5429 in iters: 790000 -20220712-07:58:09 Iters: 800100/[18], loss: 2.6897, train_accuracy: 0.6172, time: 3.68 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:00:13 Iters: 800200/[18], loss: 3.4410, train_accuracy: 0.5312, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:02:17 Iters: 800300/[18], loss: 2.9094, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:04:21 Iters: 800400/[18], loss: 2.4332, train_accuracy: 0.7109, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:06:25 Iters: 800500/[18], loss: 2.6029, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:08:29 Iters: 800600/[18], loss: 2.1809, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:10:33 Iters: 800700/[18], loss: 3.2927, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:12:37 Iters: 800800/[18], loss: 2.5784, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:14:41 Iters: 800900/[18], loss: 2.4748, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:16:45 Iters: 801000/[18], loss: 2.7761, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:18:49 Iters: 801100/[18], loss: 2.4963, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:20:53 Iters: 801200/[18], loss: 2.2328, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:22:57 Iters: 801300/[18], loss: 2.5711, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:25:01 Iters: 801400/[18], loss: 2.0025, train_accuracy: 0.7344, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:27:05 Iters: 801500/[18], loss: 2.6858, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:29:09 Iters: 801600/[18], loss: 3.1025, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:31:13 Iters: 801700/[18], loss: 3.3070, train_accuracy: 0.5391, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:33:17 Iters: 801800/[18], loss: 2.8446, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:35:21 Iters: 801900/[18], loss: 2.8810, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:37:25 Iters: 802000/[18], loss: 2.5076, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:39:29 Iters: 802100/[18], loss: 2.3578, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:41:33 Iters: 802200/[18], loss: 2.6246, train_accuracy: 0.7109, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:43:37 Iters: 802300/[18], loss: 2.7740, train_accuracy: 0.5703, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:45:41 Iters: 802400/[18], loss: 3.1395, train_accuracy: 0.5625, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:47:45 Iters: 802500/[18], loss: 2.4523, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:49:49 Iters: 802600/[18], loss: 2.2266, train_accuracy: 0.7031, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:51:53 Iters: 802700/[18], loss: 2.9749, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:53:57 Iters: 802800/[18], loss: 2.5856, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:56:01 Iters: 802900/[18], loss: 3.3095, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-08:58:05 Iters: 803000/[18], loss: 2.2645, train_accuracy: 0.7031, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:00:09 Iters: 803100/[18], loss: 2.8955, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:02:13 Iters: 803200/[18], loss: 2.1793, train_accuracy: 0.7031, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:04:17 Iters: 803300/[18], loss: 2.5629, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:06:21 Iters: 803400/[18], loss: 2.3878, train_accuracy: 0.6875, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:08:25 Iters: 803500/[18], loss: 2.9923, train_accuracy: 0.5781, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:10:29 Iters: 803600/[18], loss: 2.7546, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:12:33 Iters: 803700/[18], loss: 3.1250, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:14:37 Iters: 803800/[18], loss: 2.9788, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:16:42 Iters: 803900/[18], loss: 2.5251, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:18:46 Iters: 804000/[18], loss: 2.7892, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:20:50 Iters: 804100/[18], loss: 2.9605, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:22:54 Iters: 804200/[18], loss: 2.7379, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:24:58 Iters: 804300/[18], loss: 2.4122, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:27:02 Iters: 804400/[18], loss: 2.1605, train_accuracy: 0.7344, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:29:07 Iters: 804500/[18], loss: 2.9212, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:31:11 Iters: 804600/[18], loss: 2.1428, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:33:15 Iters: 804700/[18], loss: 2.5603, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:35:19 Iters: 804800/[18], loss: 2.3940, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:37:23 Iters: 804900/[18], loss: 2.7061, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:39:27 Iters: 805000/[18], loss: 2.3006, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:41:31 Iters: 805100/[18], loss: 2.2709, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:43:36 Iters: 805200/[18], loss: 3.6630, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:45:40 Iters: 805300/[18], loss: 2.5356, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:47:44 Iters: 805400/[18], loss: 2.1683, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:49:48 Iters: 805500/[18], loss: 2.3910, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:51:53 Iters: 805600/[18], loss: 2.8481, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:53:57 Iters: 805700/[18], loss: 2.3404, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:56:01 Iters: 805800/[18], loss: 2.5464, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-09:58:05 Iters: 805900/[18], loss: 2.4490, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:00:10 Iters: 806000/[18], loss: 3.0163, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:02:14 Iters: 806100/[18], loss: 2.5990, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:04:18 Iters: 806200/[18], loss: 2.5441, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:06:23 Iters: 806300/[18], loss: 2.6321, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:08:27 Iters: 806400/[18], loss: 2.0428, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:10:31 Iters: 806500/[18], loss: 2.7192, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:12:36 Iters: 806600/[18], loss: 2.7292, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:14:40 Iters: 806700/[18], loss: 2.2871, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:16:44 Iters: 806800/[18], loss: 2.2816, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:18:49 Iters: 806900/[18], loss: 3.3862, train_accuracy: 0.5547, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:20:53 Iters: 807000/[18], loss: 2.4179, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:22:57 Iters: 807100/[18], loss: 2.2945, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:25:01 Iters: 807200/[18], loss: 3.0617, train_accuracy: 0.5156, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:27:06 Iters: 807300/[18], loss: 2.7359, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:29:10 Iters: 807400/[18], loss: 2.6312, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:31:14 Iters: 807500/[18], loss: 2.3852, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:33:19 Iters: 807600/[18], loss: 2.2878, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:35:23 Iters: 807700/[18], loss: 2.2161, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:37:27 Iters: 807800/[18], loss: 2.1981, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:39:31 Iters: 807900/[18], loss: 2.4204, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:41:35 Iters: 808000/[18], loss: 2.6841, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:43:39 Iters: 808100/[18], loss: 2.5992, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:45:43 Iters: 808200/[18], loss: 2.7165, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:47:46 Iters: 808300/[18], loss: 2.1057, train_accuracy: 0.6875, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:49:50 Iters: 808400/[18], loss: 2.6630, train_accuracy: 0.5625, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:51:54 Iters: 808500/[18], loss: 3.5853, train_accuracy: 0.5469, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:53:58 Iters: 808600/[18], loss: 2.4249, train_accuracy: 0.6875, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:56:02 Iters: 808700/[18], loss: 2.4295, train_accuracy: 0.7031, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-10:58:06 Iters: 808800/[18], loss: 2.4436, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:00:10 Iters: 808900/[18], loss: 2.7767, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:02:14 Iters: 809000/[18], loss: 2.1014, train_accuracy: 0.7109, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:04:18 Iters: 809100/[18], loss: 2.7062, train_accuracy: 0.7031, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:06:22 Iters: 809200/[18], loss: 2.3179, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:08:26 Iters: 809300/[18], loss: 2.7864, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:10:30 Iters: 809400/[18], loss: 2.1572, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:12:34 Iters: 809500/[18], loss: 2.2839, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:14:38 Iters: 809600/[18], loss: 2.8277, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:16:41 Iters: 809700/[18], loss: 2.2813, train_accuracy: 0.7031, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:18:45 Iters: 809800/[18], loss: 2.0044, train_accuracy: 0.7344, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:20:49 Iters: 809900/[18], loss: 3.0037, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:22:53 Iters: 810000/[18], loss: 2.6301, train_accuracy: 0.7109, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:22:53 Saving checkpoint: 810000 -20220712-11:24:11 LFW Ave Accuracy: 99.6833 -20220712-11:25:27 AgeDB-30 Ave Accuracy: 97.2667 -20220712-11:26:55 CFP-FP Ave Accuracy: 95.4857 -20220712-11:26:55 Current Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.5429 in iters: 790000 -20220712-11:28:58 Iters: 810100/[18], loss: 2.9213, train_accuracy: 0.5859, time: 3.65 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:31:02 Iters: 810200/[18], loss: 3.6431, train_accuracy: 0.5781, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:33:06 Iters: 810300/[18], loss: 2.5250, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:35:10 Iters: 810400/[18], loss: 2.3825, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:37:15 Iters: 810500/[18], loss: 2.4931, train_accuracy: 0.6484, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:39:19 Iters: 810600/[18], loss: 2.4724, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:41:23 Iters: 810700/[18], loss: 2.7633, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:43:27 Iters: 810800/[18], loss: 2.3282, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:45:31 Iters: 810900/[18], loss: 1.9732, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:47:34 Iters: 811000/[18], loss: 2.5287, train_accuracy: 0.6875, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:49:38 Iters: 811100/[18], loss: 2.9482, train_accuracy: 0.5312, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:51:42 Iters: 811200/[18], loss: 2.2542, train_accuracy: 0.6875, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:53:46 Iters: 811300/[18], loss: 2.5218, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:55:49 Iters: 811400/[18], loss: 2.5169, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:57:52 Iters: 811500/[18], loss: 2.3466, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-11:59:56 Iters: 811600/[18], loss: 2.4951, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:01:59 Iters: 811700/[18], loss: 2.9744, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:04:02 Iters: 811800/[18], loss: 2.9252, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:06:06 Iters: 811900/[18], loss: 3.0788, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:08:09 Iters: 812000/[18], loss: 2.4658, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:10:13 Iters: 812100/[18], loss: 2.2254, train_accuracy: 0.6953, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:12:16 Iters: 812200/[18], loss: 2.3572, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:14:20 Iters: 812300/[18], loss: 2.7639, train_accuracy: 0.6328, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:16:23 Iters: 812400/[18], loss: 2.4106, train_accuracy: 0.6875, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:18:26 Iters: 812500/[18], loss: 2.3333, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:20:30 Iters: 812600/[18], loss: 2.7849, train_accuracy: 0.6172, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:22:33 Iters: 812700/[18], loss: 2.5128, train_accuracy: 0.6406, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:24:36 Iters: 812800/[18], loss: 2.2723, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:26:40 Iters: 812900/[18], loss: 2.3952, train_accuracy: 0.6484, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:28:43 Iters: 813000/[18], loss: 2.4947, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:30:46 Iters: 813100/[18], loss: 2.5426, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:32:50 Iters: 813200/[18], loss: 2.5677, train_accuracy: 0.6562, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:34:53 Iters: 813300/[18], loss: 2.1349, train_accuracy: 0.6719, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:36:56 Iters: 813400/[18], loss: 2.3191, train_accuracy: 0.6797, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:38:59 Iters: 813500/[18], loss: 2.1100, train_accuracy: 0.7188, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:41:03 Iters: 813600/[18], loss: 2.0921, train_accuracy: 0.6641, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:43:06 Iters: 813700/[18], loss: 3.1079, train_accuracy: 0.5781, time: 1.23 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:45:10 Iters: 813800/[18], loss: 2.9711, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:47:14 Iters: 813900/[18], loss: 3.2989, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:49:17 Iters: 814000/[18], loss: 2.8416, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:51:21 Iters: 814100/[18], loss: 2.6307, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:53:25 Iters: 814200/[18], loss: 2.6848, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:55:29 Iters: 814300/[18], loss: 2.8084, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:57:33 Iters: 814400/[18], loss: 2.4204, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-12:59:37 Iters: 814500/[18], loss: 2.6864, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:01:41 Iters: 814600/[18], loss: 2.6261, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:03:45 Iters: 814700/[18], loss: 2.7919, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:05:49 Iters: 814800/[18], loss: 2.4948, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:07:53 Iters: 814900/[18], loss: 2.8486, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:09:57 Iters: 815000/[18], loss: 2.2534, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:12:01 Iters: 815100/[18], loss: 2.8924, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:14:05 Iters: 815200/[18], loss: 2.4401, train_accuracy: 0.6953, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:16:09 Iters: 815300/[18], loss: 2.3432, train_accuracy: 0.6797, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:18:13 Iters: 815400/[18], loss: 2.6636, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:20:17 Iters: 815500/[18], loss: 2.5878, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:22:21 Iters: 815600/[18], loss: 2.7376, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:24:25 Iters: 815700/[18], loss: 3.5374, train_accuracy: 0.5547, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:26:29 Iters: 815800/[18], loss: 2.6592, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:28:33 Iters: 815900/[18], loss: 3.1617, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:30:37 Iters: 816000/[18], loss: 3.3937, train_accuracy: 0.5625, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:32:41 Iters: 816100/[18], loss: 2.6399, train_accuracy: 0.6328, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:34:45 Iters: 816200/[18], loss: 2.7626, train_accuracy: 0.6016, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:36:49 Iters: 816300/[18], loss: 3.3653, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:38:53 Iters: 816400/[18], loss: 2.6692, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:40:57 Iters: 816500/[18], loss: 2.7190, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:43:01 Iters: 816600/[18], loss: 3.0781, train_accuracy: 0.5625, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:45:05 Iters: 816700/[18], loss: 2.0519, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:47:09 Iters: 816800/[18], loss: 3.0697, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:49:13 Iters: 816900/[18], loss: 2.7007, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:51:17 Iters: 817000/[18], loss: 2.3398, train_accuracy: 0.5781, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:53:20 Iters: 817100/[18], loss: 2.6645, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:55:24 Iters: 817200/[18], loss: 3.2126, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:57:28 Iters: 817300/[18], loss: 2.7856, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-13:59:32 Iters: 817400/[18], loss: 2.7876, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:01:36 Iters: 817500/[18], loss: 2.2583, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:03:40 Iters: 817600/[18], loss: 2.8464, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:05:44 Iters: 817700/[18], loss: 2.5264, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:07:48 Iters: 817800/[18], loss: 2.6201, train_accuracy: 0.6094, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:09:52 Iters: 817900/[18], loss: 2.6171, train_accuracy: 0.6562, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:11:56 Iters: 818000/[18], loss: 2.3217, train_accuracy: 0.5938, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:14:00 Iters: 818100/[18], loss: 2.9086, train_accuracy: 0.5859, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:16:04 Iters: 818200/[18], loss: 2.5418, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:18:08 Iters: 818300/[18], loss: 2.3113, train_accuracy: 0.6719, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:20:12 Iters: 818400/[18], loss: 2.5145, train_accuracy: 0.6406, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:22:16 Iters: 818500/[18], loss: 3.0111, train_accuracy: 0.6250, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:24:20 Iters: 818600/[18], loss: 2.0768, train_accuracy: 0.7109, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:26:23 Iters: 818700/[18], loss: 2.4021, train_accuracy: 0.6641, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:28:27 Iters: 818800/[18], loss: 2.2452, train_accuracy: 0.6172, time: 1.24 s/iter, learning rate: 5.0000000000000016e-05 -20220712-14:28:52 Finally Best Accuracy: LFW: 99.7333 in iters: 740000, AgeDB-30: 97.4500 in iters: 740000 and CFP-FP: 95.5429 in iters: 790000 diff --git a/face_recognition1/face_feature/loss/__init__.py b/face_recognition1/face_feature/loss/__init__.py deleted file mode 100644 index 7b53b7d8e6981d9e7e9dbb03cab143237cf9234b..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/loss/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: __init__.py.py -@desc: -''' \ No newline at end of file diff --git a/face_recognition1/face_feature/loss/agentcenterloss.py b/face_recognition1/face_feature/loss/agentcenterloss.py deleted file mode 100644 index f23988b4d0e465fa7aec3a7da6bfb49ece02e2ee..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/loss/agentcenterloss.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: agentcenterloss.py -@desc: the variety of center loss, which use the class weight as the class center and normalize both the weight and feature, - in this way, the cos distance of weight and feature can be used as the supervised signal. - It's similar with torch.nn.CosineEmbeddingLoss, x_1 means weight_i, x_2 means feature_i. -''' - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class AgentCenterLoss(nn.Module): - - def __init__(self, num_classes, feat_dim, scale): - super(AgentCenterLoss, self).__init__() - self.num_classes = num_classes - self.feat_dim = feat_dim - self.scale = scale - - self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) - - def forward(self, x, labels): - ''' - Parameters: - x: input tensor with shape (batch_size, feat_dim) - labels: ground truth label with shape (batch_size) - Return: - loss of centers - ''' - cos_dis = F.linear(F.normalize(x), F.normalize(self.centers)) * self.scale - - one_hot = torch.zeros_like(cos_dis) - one_hot.scatter_(1, labels.view(-1, 1), 1.0) - - # loss = 1 - cosine(i) - loss = one_hot * self.scale - (one_hot * cos_dis) - - return loss.mean() \ No newline at end of file diff --git a/face_recognition1/face_feature/loss/centerloss.py b/face_recognition1/face_feature/loss/centerloss.py deleted file mode 100644 index 996fe6715e1d099e1f2c1a8744cb944a0c6e5494..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/loss/centerloss.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -@author: MingDong -@file: centerloss.py -@desc: the implementation of center loss -""" - -import torch -import torch.nn as nn - - -class CenterLoss(nn.Module): - - def __init__(self, num_classes, feat_dim): - super(CenterLoss, self).__init__() - self.num_classes = num_classes - self.feat_dim = feat_dim - - self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim)) - - def forward(self, x, labels): - ''' - Parameters: - x: input tensor with shape (batch_size, feat_dim) - labels: ground truth label with shape (batch_size) - Return: - loss of centers - ''' - # compute the distance of (x-center)^2 - batch_size = x.size(0) - distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \ - torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t() - distmat.addmm_(1, -2, x, self.centers.t()) - - # get one_hot matrix - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - classes = torch.arange(self.num_classes).long().to(device) - labels = labels.unsqueeze(1).expand(batch_size, self.num_classes) - mask = labels.eq(classes.expand(batch_size, self.num_classes)) - - dist = [] - for i in range(batch_size): - value = distmat[i][mask[i]] - value = value.clamp(min=1e-12, max=1e+12) # for numerical stability - dist.append(value) - dist = torch.cat(dist) - loss = dist.mean() - - return loss diff --git a/face_recognition1/face_feature/loss/tripletloss.py b/face_recognition1/face_feature/loss/tripletloss.py deleted file mode 100644 index 3ab82bc2cc3b61a4a3a31d4151bc6a92deda32b0..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/loss/tripletloss.py +++ /dev/null @@ -1,150 +0,0 @@ -import math -import re -import functools -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import Parameter, Module - -import numpy as np - - -class LogCoshLoss(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, y_t, y_prime_t): - ey_t = y_t - y_prime_t - return torch.mean(torch.log(torch.cosh(ey_t + 1e-12))) - - -def smooth_l1_loss(input, target, beta=1. / 9, size_average=True): - """ - very similar to the smooth_l1_loss from pytorch, but with - the extra beta parameter - """ - n = torch.abs(input - target) - cond = n < beta - loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta) - if size_average: - return loss.mean() - return loss.sum() - - -def custom_cosine(u,v,ln): - uv = torch.sum(u * v,dim=1,keepdim=True) - uu = torch.sum(u * u,dim=1,keepdim=True) - vv = torch.sum(v * v,dim=1,keepdim=True) - uu_vv=uu*vv - dist = uv / torch.sqrt(uu_vv.clamp(1e-8)) - dist=dist.clamp(max=1) - # Return absolute value to avoid small negative value due to rounding - return torch.abs(1.0 - (dist.sum(dim=1,keepdim=True)/ln).mean()) - - -def average(u,weights,dim=1): - if(weights !=None): - uw=u*weights - else: - uw=u - return torch.sum(uw,dim=dim,keepdim=True) /torch.sum(weights,dim=dim,keepdim=True) - - -def weighted_cosine(u, v,w, eps=1e-8,centered=False): - if(centered): - umu = average(u, weights=w) - vmu = average(v, weights=w) - u = u - umu - v = v - vmu - uv = average(u * v, weights=w) - uu = average(u*u, weights=w) - vv = average(v*v, weights=w) - dist = uv / torch.sqrt(uu * vv) - # Return absolute value to avoid small negative value due to rounding - return torch.abs(1.0-dist.mean()) - - -class TripletLoss(nn.Module): - """ - Triplet loss - Takes embeddings of an anchor sample, a positive sample and a negative sample 0002 - """ - """ - margin 2.0 - """ - def __init__(self, margin = 2.0, alpha=0.95, distance="cosine"): - super(TripletLoss, self).__init__() - self.margin = margin - self.alpha=alpha - self.distance=distance - self.tripletMargin=torch.nn.TripletMarginLoss(margin=1.0,swap=True,reduction='mean') - def l2_norm(self,input, axis=1): - norm = torch.norm(input, 2, axis, True) - output = torch.div(input, norm) - return output - def norm_l2_distnace(self,emb1,emb2,dim=1,centered=False): - if (centered): - umu = torch.sum(emb1,dim=dim,keepdim=True) - vmu = torch.sum(emb2,dim=dim,keepdim=True) - emb1 = emb1 - umu - emb2 = emb2 - vmu - uv = torch.sum(emb1 * emb2,dim=1, keepdim=True) - norm1 = torch.sum(emb1 * emb1,dim=1,keepdim=True) - norm2 = torch.sum(emb2 * emb2,dim=1, keepdim=True) - dist = torch.div(uv, torch.sqrt( norm1)* torch.sqrt(norm2)) - return 2.0*( 1.0- dist) - - def sum(self,emb,dim=1): - return torch.sum(emb,dim=dim,keepdim=True) - def cosine(self, emb1, emb2): - uv = torch.sum(emb1 * emb2,dim=1) - uu = torch.sum(emb2 * emb2,dim=1) - vv = torch.sum(emb1 * emb1,dim=1) - dist =torch.div( uv , torch.sqrt(uu * vv)) - return dist - def l2(self,emb1,emb2,dim=1): - sub=torch.sub(emb1,emb2).pow(2) - sm=torch.sum(sub,dim=1) - return sm - def dis(self,anchor, positive): - pos_dist = torch.sum((torch.sub(anchor, positive).pow(2)), 1) - return pos_dist - - def forward(self, anchor, positive, negative, size_average=True): - if(self.distance=="Triplet" or self.distance=="TTriplet" ): - self.margin = 1.0 - if(self.distance=="TTriplet"): - self.margin=1 - losses=self.tripletMargin(anchor,positive,negative) - return losses, losses, losses - positive = F.normalize(positive, p=2, dim=1) - anchor = F.normalize(anchor, p=2, dim=1) - negative = F.normalize(negative, p=2, dim=1) - - positive_loss = self.dis(anchor, positive) - distance_negative = self.dis(anchor, negative) - distance_p_n = self.dis(positive, negative) - losses = F.relu(positive_loss + self.margin - distance_negative).mean() - return (losses), (positive_loss.mean()), (distance_negative.mean()), distance_p_n.mean() - elif(self.distance=="SRT"): - self.margin=2.0 - positive=F.normalize(positive,p=2,dim=1) - anchor=F.normalize(anchor,p=2,dim=1) - negative=F.normalize(negative,p=2,dim=1) - - positive_loss = self.dis(anchor, positive) - distance_negative = self.dis(anchor, negative) - distance_p_n = self.dis(positive, negative) - - cond = distance_negative.mean() >= distance_p_n.mean() # + 0.5*self.margin - - ls = torch.where(cond, (positive_loss + self.margin - distance_p_n.mean()), - (positive_loss + self.margin - distance_negative)) - losses = F.relu(ls).mean() - return (losses), (positive_loss.mean()), (distance_negative.mean()),distance_p_n.mean() - else: - positive_loss = torch.abs(1.0- self.cosine(anchor, positive).mean()) - distance_negative =self.cosine(anchor,negative) - negative_loss=F.relu( distance_negative-self.margin).mean() - losses = self.alpha* positive_loss +(1.0-self.alpha)*negative_loss - return (losses ),(positive_loss),(negative_loss) diff --git a/face_recognition1/face_feature/margin/ArcMarginProduct.py b/face_recognition1/face_feature/margin/ArcMarginProduct.py deleted file mode 100644 index cfa994b8692f5703b7a81e701095a3238f5e0384..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/margin/ArcMarginProduct.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: ArcMarginProduct.py -@desc: additive angular margin for arcface/insightface -''' - -import math -import torch -from torch import nn -from torch.nn import Parameter -import torch.nn.functional as F - -class ArcMarginProduct(nn.Module): - def __init__(self, in_feature=128, out_feature=10575, s=32.0, m=0.50, easy_margin=False): - super(ArcMarginProduct, self).__init__() - self.in_feature = in_feature - self.out_feature = out_feature - self.s = s - self.m = m - self.weight = Parameter(torch.Tensor(out_feature, in_feature)) - nn.init.xavier_uniform_(self.weight) - - self.easy_margin = easy_margin - self.cos_m = math.cos(m) - self.sin_m = math.sin(m) - - # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°] - self.th = math.cos(math.pi - m) - self.mm = math.sin(math.pi - m) * m - - def forward(self, x, label): - # cos(theta) - cosine = F.linear(F.normalize(x), F.normalize(self.weight)) - # cos(theta + m) - sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) - phi = cosine * self.cos_m - sine * self.sin_m - - if self.easy_margin: - phi = torch.where(cosine > 0, phi, cosine) - else: - phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm) - - #one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu') - one_hot = torch.zeros_like(cosine) - one_hot.scatter_(1, label.view(-1, 1), 1) - output = (one_hot * phi) + ((1.0 - one_hot) * cosine) - output = output * self.s - - return output - - -if __name__ == '__main__': - pass diff --git a/face_recognition1/face_feature/margin/CosineMarginProduct.py b/face_recognition1/face_feature/margin/CosineMarginProduct.py deleted file mode 100644 index da25babb3fd85290df1c9590dd6a3e4310b13789..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/margin/CosineMarginProduct.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: CosineMarginProduct.py -@desc: additive cosine margin for cosface -''' - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import Parameter - - -class CosineMarginProduct(nn.Module): - def __init__(self, in_feature=128, out_feature=10575, s=30.0, m=0.35): - super(CosineMarginProduct, self).__init__() - self.in_feature = in_feature - self.out_feature = out_feature - self.s = s - self.m = m - self.weight = Parameter(torch.Tensor(out_feature, in_feature)) - nn.init.xavier_uniform_(self.weight) - - - def forward(self, x, label): - cosine = F.linear(F.normalize(x), F.normalize(self.weight)) - # one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu') - one_hot = torch.zeros_like(cosine) - one_hot.scatter_(1, label.view(-1, 1), 1.0) - - output = self.s * (cosine - one_hot * self.m) - return output - - -if __name__ == '__main__': - pass diff --git a/face_recognition1/face_feature/margin/InnerProduct.py b/face_recognition1/face_feature/margin/InnerProduct.py deleted file mode 100644 index 76d8ea507d26a769c37089769d6111ca5c251cd5..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/margin/InnerProduct.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: InnerProduct.py -@desc: just normal inner product as fully connected layer do. -''' -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import Parameter - -class InnerProduct(nn.Module): - def __init__(self, in_feature=128, out_feature=10575): - super(InnerProduct, self).__init__() - self.in_feature = in_feature - self.out_feature = out_feature - - self.weight = Parameter(torch.Tensor(out_feature, in_feature)) - nn.init.xavier_uniform_(self.weight) - - - def forward(self, x, label): - # label not used - output = F.linear(x, self.weight) - return output - - -if __name__ == '__main__': - pass diff --git a/face_recognition1/face_feature/margin/MultiMarginProduct.py b/face_recognition1/face_feature/margin/MultiMarginProduct.py deleted file mode 100644 index c21e87600522d5090ad861b570b7b3c715ce5f94..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/margin/MultiMarginProduct.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: MultiMarginProduct.py -@desc: Combination of additive angular margin and additive cosine margin -''' - -import math -import torch -from torch import nn -from torch.nn import Parameter -import torch.nn.functional as F - -class MultiMarginProduct(nn.Module): - def __init__(self, in_feature=128, out_feature=10575, s=32.0, m1=0.20, m2=0.35, easy_margin=False): - super(MultiMarginProduct, self).__init__() - self.in_feature = in_feature - self.out_feature = out_feature - self.s = s - self.m1 = m1 - self.m2 = m2 - self.weight = Parameter(torch.Tensor(out_feature, in_feature)) - nn.init.xavier_uniform_(self.weight) - - self.easy_margin = easy_margin - self.cos_m1 = math.cos(m1) - self.sin_m1 = math.sin(m1) - - # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°] - self.th = math.cos(math.pi - m1) - self.mm = math.sin(math.pi - m1) * m1 - - def forward(self, x, label): - # cos(theta) - cosine = F.linear(F.normalize(x), F.normalize(self.weight)) - # cos(theta + m1) - sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) - phi = cosine * self.cos_m1 - sine * self.sin_m1 - - if self.easy_margin: - phi = torch.where(cosine > 0, phi, cosine) - else: - phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm) - - - one_hot = torch.zeros_like(cosine) - one_hot.scatter_(1, label.view(-1, 1), 1) - output = (one_hot * phi) + ((1.0 - one_hot) * cosine) # additive angular margin - output = output - one_hot * self.m2 # additive cosine margin - output = output * self.s - - return output - - -if __name__ == '__main__': - pass diff --git a/face_recognition1/face_feature/margin/SphereMarginProduct.py b/face_recognition1/face_feature/margin/SphereMarginProduct.py deleted file mode 100644 index 41c48b867e60e958b9654dd0261d30e92d1ae704..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/margin/SphereMarginProduct.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -@author: MingDong -@file: SphereMarginProduct.py -@desc: multiplicative angular margin for sphereface -""" - -import math -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import Parameter - - -class SphereMarginProduct(nn.Module): - def __init__(self, in_feature, out_feature, m=4, base=1000.0, gamma=0.0001, power=2, lambda_min=5.0): - super().__init__() - assert m in [1, 2, 3, 4], 'margin should be 1, 2, 3 or 4' - self.in_feature = in_feature - self.out_feature = out_feature - self.m = m - self.base = base - self.gamma = gamma - self.power = power - self.lambda_min = lambda_min - self.iter = 0 - self.weight = Parameter(torch.Tensor(out_feature, in_feature)) - nn.init.xavier_uniform_(self.weight) - - # duplication formula - self.margin_formula = [ - lambda x : x ** 0, - lambda x : x ** 1, - lambda x : 2 * x ** 2 - 1, - lambda x : 4 * x ** 3 - 3 * x, - lambda x : 8 * x ** 4 - 8 * x ** 2 + 1, - lambda x : 16 * x ** 5 - 20 * x ** 3 + 5 * x - ] - - def forward(self, x, label): - self.iter += 1 - self.cur_lambda = max(self.lambda_min, self.base * (1 + self.gamma * self.iter) ** (-1 * self.power)) - - cos_theta = F.linear(F.normalize(x), F.normalize(self.weight)) - cos_theta = cos_theta(-1, 1) - - cos_m_theta = None #self.margin_formula(self.m)(cos_theta) -- error - theta = cos_theta.data.acos() - k = ((self.m * theta) / math.pi).floor() - phi_theta = ((-1.0) ** k) * cos_m_theta - 2 * k - phi_theta_ = (self.cur_lambda * cos_theta + phi_theta) / (1 + self.cur_lambda) - norm_of_feature = torch.norm(x, 2, 1) - - one_hot = torch.zeros_like(cos_theta) - one_hot.scatter_(1, label.view(-1, 1), 1) - - output = one_hot * phi_theta_ + (1 - one_hot) * cos_theta - output *= norm_of_feature.view(-1, 1) - - return output - - -class SphereLoss(nn.Module): - def __init__(self, m, scale, num_class, use_gpu): - super(SphereLoss, self).__init__() - self.m = m - self.scale = scale - self.num_class = num_class - self.use_gpu = use_gpu - self.loss = nn.CrossEntropyLoss() - - def theta_to_psi(self, theta_yi_i): - k = torch.floor(theta_yi_i * self.m / pi) - sign = torch.full(k.shape, -1) - if self.use_gpu: - sign = sign.cuda() - co = torch.pow(sign, k) - cos_m_theta_yi_i = torch.cos(self.m * theta_yi_i) - return co * cos_m_theta_yi_i - 2 * k - - def forward(self, y_hat, y): - y = torch.unsqueeze(y, 0) - label = torch.reshape(y, (y.shape[1], 1)) - one_hot = torch.zeros(y.shape[1], self.num_class) - if self.use_gpu: - one_hot = one_hot.cuda() - one_hot = one_hot.scatter_(1, label, 1) - mask = one_hot.to(torch.bool) - #theta(yi, i) - cos_theta_yi_i = torch.masked_select(y_hat, mask) - theta_yi_i = torch.acos(cos_theta_yi_i) - psi_yi_i = self.theta_to_psi(theta_yi_i) - - fc = y_hat * 1.0 - index = torch.Tensor(range(y_hat.shape[0])) - fc[index.long(), y.long()] = psi_yi_i[index.long()] - fc = fc * self.scale - - y = y.squeeze(0) - loss = self.loss(fc, y) - - return loss - - -if __name__ == '__main__': - pass diff --git a/face_recognition1/face_feature/margin/__init__.py b/face_recognition1/face_feature/margin/__init__.py deleted file mode 100644 index 7b53b7d8e6981d9e7e9dbb03cab143237cf9234b..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/margin/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: __init__.py.py -@desc: -''' \ No newline at end of file diff --git a/face_recognition1/face_feature/model/__init__.py b/face_recognition1/face_feature/model/__init__.py deleted file mode 100644 index 7b53b7d8e6981d9e7e9dbb03cab143237cf9234b..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/model/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: __init__.py.py -@desc: -''' \ No newline at end of file diff --git a/face_recognition1/face_feature/model/arcfacenet.py b/face_recognition1/face_feature/model/arcfacenet.py deleted file mode 100644 index e4243f763f885c982076656f981e35661ee67ac9..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/model/arcfacenet.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: arcfacenet.py -@desc: Network structures used in the arcface paper, including ResNet50-IR, ResNet101-IR, SEResNet50-IR, SEResNet101-IR - -'''''' -Update: This file has been deprecated, all the models build in this class have been rebuild in cbam.py - Yet the code in this file still works. -''' - - -from collections import namedtuple -import torch -from torch import nn - -class Flatten(nn.Module): - def forward(self, x): - return x.view(x.size(0), -1) - - -class SEModule(nn.Module): - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) - self.relu = nn.ReLU(inplace=True) - self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - input_img = x - x = self.avg_pool(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - - return input_img * x - - -class BottleNeck_IR(nn.Module): - def __init__(self, in_channel, out_channel, stride): - super(BottleNeck_IR, self).__init__() - if in_channel == out_channel: - self.shortcut_layer = nn.MaxPool2d(1, stride) - else: - self.shortcut_layer = nn.Sequential( - nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), - nn.BatchNorm2d(out_channel) - ) - - self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.PReLU(out_channel), - nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), - nn.BatchNorm2d(out_channel)) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - - return shortcut + res - -class BottleNeck_IR_SE(nn.Module): - def __init__(self, in_channel, out_channel, stride): - super(BottleNeck_IR_SE, self).__init__() - if in_channel == out_channel: - self.shortcut_layer = nn.MaxPool2d(1, stride) - else: - self.shortcut_layer = nn.Sequential( - nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), - nn.BatchNorm2d(out_channel) - ) - - self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.PReLU(out_channel), - nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), - nn.BatchNorm2d(out_channel), - SEModule(out_channel, 16)) - - def forward(self, x): - shortcut = self.shortcut_layer(x) - res = self.res_layer(x) - - return shortcut + res - - -class Bottleneck(namedtuple('Block', ['in_channel', 'out_channel', 'stride'])): - '''A named tuple describing a ResNet block.''' - - -def get_block(in_channel, out_channel, num_units, stride=2): - return [Bottleneck(in_channel, out_channel, stride)] + [Bottleneck(out_channel, out_channel, 1) for i in range(num_units - 1)] - - -def get_blocks(num_layers): - if num_layers == 50: - blocks = [ - get_block(in_channel=64, out_channel=64, num_units=3), - get_block(in_channel=64, out_channel=128, num_units=4), - get_block(in_channel=128, out_channel=256, num_units=14), - get_block(in_channel=256, out_channel=512, num_units=3) - ] - elif num_layers == 100: - blocks = [ - get_block(in_channel=64, out_channel=64, num_units=3), - get_block(in_channel=64, out_channel=128, num_units=13), - get_block(in_channel=128, out_channel=256, num_units=30), - get_block(in_channel=256, out_channel=512, num_units=3) - ] - elif num_layers == 152: - blocks = [ - get_block(in_channel=64, out_channel=64, num_units=3), - get_block(in_channel=64, out_channel=128, num_units=8), - get_block(in_channel=128, out_channel=256, num_units=36), - get_block(in_channel=256, out_channel=512, num_units=3) - ] - return blocks - - -class SEResNet_IR(nn.Module): - def __init__(self, num_layers, feature_dim=512, drop_ratio=0.4, mode = 'ir'): - super(SEResNet_IR, self).__init__() - assert num_layers in [50, 100, 152], 'num_layers should be 50, 100 or 152' - assert mode in ['ir', 'se_ir'], 'mode should be ir or se_ir' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = BottleNeck_IR - elif mode == 'se_ir': - unit_module = BottleNeck_IR_SE - self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(64), - nn.PReLU(64)) - - self.output_layer = nn.Sequential(nn.BatchNorm2d(512), - nn.Dropout(drop_ratio), - Flatten(), - nn.Linear(512 * 7 * 7, feature_dim), - nn.BatchNorm1d(feature_dim)) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append( - unit_module(bottleneck.in_channel, - bottleneck.out_channel, - bottleneck.stride)) - self.body = nn.Sequential(*modules) - - def forward(self, x): - x = self.input_layer(x) - x = self.body(x) - x = self.output_layer(x) - - return x - - -if __name__ == '__main__': - x = torch.Tensor(2, 3, 112, 112) - net = SEResNet_IR(100, mode='se_ir') - print(net) - - x = net(x) - print(x.shape) diff --git a/face_recognition1/face_feature/model/attention.py b/face_recognition1/face_feature/model/attention.py deleted file mode 100644 index 3b7c69c7f9a280ba58f2c5abd0d6aba999c492a9..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/model/attention.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: attention.py -@desc: Residual Attention Network for Image Classification, CVPR 2017. - Attention 56 and Attention 92. -''' - - -import torch -import torch.nn as nn - - -class Flatten(nn.Module): - def forward(self, x): - return x.view(x.size(0), -1) - -class ResidualBlock(nn.Module): - - def __init__(self, in_channel, out_channel, stride=1): - super(ResidualBlock, self).__init__() - self.in_channel = in_channel - self.out_channel = out_channel - self.stride = stride - - self.res_bottleneck = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.ReLU(inplace=True), - nn.Conv2d(in_channel, out_channel//4, 1, 1, bias=False), - nn.BatchNorm2d(out_channel//4), - nn.ReLU(inplace=True), - nn.Conv2d(out_channel//4, out_channel//4, 3, stride, padding=1, bias=False), - nn.BatchNorm2d(out_channel//4), - nn.ReLU(inplace=True), - nn.Conv2d(out_channel//4, out_channel, 1, 1, bias=False)) - self.shortcut = nn.Conv2d(in_channel, out_channel, 1, stride, bias=False) - - def forward(self, x): - res = x - out = self.res_bottleneck(x) - if self.in_channel != self.out_channel or self.stride != 1: - res = self.shortcut(x) - - out += res - return out - -class AttentionModule_stage1(nn.Module): - - # input size is 56*56 - def __init__(self, in_channel, out_channel, size1=(56, 56), size2=(28, 28), size3=(14, 14)): - super(AttentionModule_stage1, self).__init__() - self.share_residual_block = ResidualBlock(in_channel, out_channel) - self.trunk_branches = nn.Sequential(ResidualBlock(in_channel, out_channel), - ResidualBlock(in_channel, out_channel)) - - self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.mask_block1 = ResidualBlock(in_channel, out_channel) - self.skip_connect1 = ResidualBlock(in_channel, out_channel) - - self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.mask_block2 = ResidualBlock(in_channel, out_channel) - self.skip_connect2 = ResidualBlock(in_channel, out_channel) - - self.mpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.mask_block3 = nn.Sequential(ResidualBlock(in_channel, out_channel), - ResidualBlock(in_channel, out_channel)) - - self.interpolation3 = nn.UpsamplingBilinear2d(size=size3) - self.mask_block4 = ResidualBlock(in_channel, out_channel) - - self.interpolation2 = nn.UpsamplingBilinear2d(size=size2) - self.mask_block5 = ResidualBlock(in_channel, out_channel) - - self.interpolation1 = nn.UpsamplingBilinear2d(size=size1) - self.mask_block6 = nn.Sequential(nn.BatchNorm2d(out_channel), - nn.ReLU(inplace=True), - nn.Conv2d(out_channel, out_channel, 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.ReLU(inplace=True), - nn.Conv2d(out_channel, out_channel, 1, 1, bias=False), - nn.Sigmoid()) - - self.last_block = ResidualBlock(in_channel, out_channel) - - def forward(self, x): - x = self.share_residual_block(x) - out_trunk = self.trunk_branches(x) - - out_pool1 = self.mpool1(x) - out_block1 = self.mask_block1(out_pool1) - out_skip_connect1 = self.skip_connect1(out_block1) - - out_pool2 = self.mpool2(out_block1) - out_block2 = self.mask_block2(out_pool2) - out_skip_connect2 = self.skip_connect2(out_block2) - - out_pool3 = self.mpool3(out_block2) - out_block3 = self.mask_block3(out_pool3) - # - out_inter3 = self.interpolation3(out_block3) + out_block2 - out = out_inter3 + out_skip_connect2 - out_block4 = self.mask_block4(out) - - out_inter2 = self.interpolation2(out_block4) + out_block1 - out = out_inter2 + out_skip_connect1 - out_block5 = self.mask_block5(out) - - out_inter1 = self.interpolation1(out_block5) + out_trunk - out_block6 = self.mask_block6(out_inter1) - - out = (1 + out_block6) + out_trunk - out_last = self.last_block(out) - - return out_last - -class AttentionModule_stage2(nn.Module): - - # input image size is 28*28 - def __init__(self, in_channels, out_channels, size1=(28, 28), size2=(14, 14)): - super(AttentionModule_stage2, self).__init__() - self.first_residual_blocks = ResidualBlock(in_channels, out_channels) - - self.trunk_branches = nn.Sequential( - ResidualBlock(in_channels, out_channels), - ResidualBlock(in_channels, out_channels) - ) - - self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.softmax1_blocks = ResidualBlock(in_channels, out_channels) - self.skip1_connection_residual_block = ResidualBlock(in_channels, out_channels) - - self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.softmax2_blocks = nn.Sequential( - ResidualBlock(in_channels, out_channels), - ResidualBlock(in_channels, out_channels) - ) - - self.interpolation2 = nn.UpsamplingBilinear2d(size=size2) - self.softmax3_blocks = ResidualBlock(in_channels, out_channels) - self.interpolation1 = nn.UpsamplingBilinear2d(size=size1) - self.softmax4_blocks = nn.Sequential( - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), - nn.Sigmoid() - ) - self.last_blocks = ResidualBlock(in_channels, out_channels) - - def forward(self, x): - x = self.first_residual_blocks(x) - out_trunk = self.trunk_branches(x) - out_mpool1 = self.mpool1(x) - out_softmax1 = self.softmax1_blocks(out_mpool1) - out_skip1_connection = self.skip1_connection_residual_block(out_softmax1) - - out_mpool2 = self.mpool2(out_softmax1) - out_softmax2 = self.softmax2_blocks(out_mpool2) - - out_interp2 = self.interpolation2(out_softmax2) + out_softmax1 - out = out_interp2 + out_skip1_connection - - out_softmax3 = self.softmax3_blocks(out) - out_interp1 = self.interpolation1(out_softmax3) + out_trunk - out_softmax4 = self.softmax4_blocks(out_interp1) - out = (1 + out_softmax4) * out_trunk - out_last = self.last_blocks(out) - - return out_last - -class AttentionModule_stage3(nn.Module): - - # input image size is 14*14 - def __init__(self, in_channels, out_channels, size1=(14, 14)): - super(AttentionModule_stage3, self).__init__() - self.first_residual_blocks = ResidualBlock(in_channels, out_channels) - - self.trunk_branches = nn.Sequential( - ResidualBlock(in_channels, out_channels), - ResidualBlock(in_channels, out_channels) - ) - - self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.softmax1_blocks = nn.Sequential( - ResidualBlock(in_channels, out_channels), - ResidualBlock(in_channels, out_channels) - ) - - self.interpolation1 = nn.UpsamplingBilinear2d(size=size1) - - self.softmax2_blocks = nn.Sequential( - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), - nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False), - nn.Sigmoid() - ) - - self.last_blocks = ResidualBlock(in_channels, out_channels) - - def forward(self, x): - x = self.first_residual_blocks(x) - out_trunk = self.trunk_branches(x) - out_mpool1 = self.mpool1(x) - out_softmax1 = self.softmax1_blocks(out_mpool1) - - out_interp1 = self.interpolation1(out_softmax1) + out_trunk - out_softmax2 = self.softmax2_blocks(out_interp1) - out = (1 + out_softmax2) * out_trunk - out_last = self.last_blocks(out) - - return out_last - -class ResidualAttentionNet_56(nn.Module): - - # for input size 112 - def __init__(self, feature_dim=512, drop_ratio=0.4): - super(ResidualAttentionNet_56, self).__init__() - self.conv1 = nn.Sequential( - nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias = False), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.residual_block1 = ResidualBlock(64, 256) - self.attention_module1 = AttentionModule_stage1(256, 256) - self.residual_block2 = ResidualBlock(256, 512, 2) - self.attention_module2 = AttentionModule_stage2(512, 512) - self.residual_block3 = ResidualBlock(512, 512, 2) - self.attention_module3 = AttentionModule_stage3(512, 512) - self.residual_block4 = ResidualBlock(512, 512, 2) - self.residual_block5 = ResidualBlock(512, 512) - self.residual_block6 = ResidualBlock(512, 512) - self.output_layer = nn.Sequential(nn.BatchNorm2d(512), - nn.Dropout(drop_ratio), - Flatten(), - nn.Linear(512 * 7 * 7, feature_dim), - nn.BatchNorm1d(feature_dim)) - - def forward(self, x): - out = self.conv1(x) - out = self.mpool1(out) - # print(out.data) - out = self.residual_block1(out) - out = self.attention_module1(out) - out = self.residual_block2(out) - out = self.attention_module2(out) - out = self.residual_block3(out) - # print(out.data) - out = self.attention_module3(out) - out = self.residual_block4(out) - out = self.residual_block5(out) - out = self.residual_block6(out) - out = self.output_layer(out) - - return out - -class ResidualAttentionNet_92(nn.Module): - - # for input size 112 - def __init__(self, feature_dim=512, drop_ratio=0.4): - super(ResidualAttentionNet_92, self).__init__() - self.conv1 = nn.Sequential( - nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias = False), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.residual_block1 = ResidualBlock(64, 256) - self.attention_module1 = AttentionModule_stage1(256, 256) - self.residual_block2 = ResidualBlock(256, 512, 2) - self.attention_module2 = AttentionModule_stage2(512, 512) - self.attention_module2_2 = AttentionModule_stage2(512, 512) # tbq add - self.residual_block3 = ResidualBlock(512, 1024, 2) - self.attention_module3 = AttentionModule_stage3(1024, 1024) - self.attention_module3_2 = AttentionModule_stage3(1024, 1024) # tbq add - self.attention_module3_3 = AttentionModule_stage3(1024, 1024) # tbq add - self.residual_block4 = ResidualBlock(1024, 2048, 2) - self.residual_block5 = ResidualBlock(2048, 2048) - self.residual_block6 = ResidualBlock(2048, 2048) - self.output_layer = nn.Sequential(nn.BatchNorm2d(2048), - nn.Dropout(drop_ratio), - Flatten(), - nn.Linear(2048 * 7 * 7, feature_dim), - nn.BatchNorm1d(feature_dim)) - - def forward(self, x): - out = self.conv1(x) - out = self.mpool1(out) - # print(out.data) - out = self.residual_block1(out) - out = self.attention_module1(out) - out = self.residual_block2(out) - out = self.attention_module2(out) - out = self.attention_module2_2(out) - out = self.residual_block3(out) - # print(out.data) - out = self.attention_module3(out) - out = self.attention_module3_2(out) - out = self.attention_module3_3(out) - out = self.residual_block4(out) - out = self.residual_block5(out) - out = self.residual_block6(out) - out = self.output_layer(out) - - return out - - -if __name__ == '__main__': - x = torch.Tensor(2, 3, 112, 112) - net = ResidualAttentionNet_56() - print(net) - - x = net(x) - print(x.shape) diff --git a/face_recognition1/face_feature/model/cbam.py b/face_recognition1/face_feature/model/cbam.py deleted file mode 100644 index e871cada07d18d340e8d1523f27189339827f4d2..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/model/cbam.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: cbam.py -@desc: Convolutional Block Attention Module in ECCV 2018, including channel attention module and spatial attention module. -''' - -import torch -from torch import nn - - -class Flatten(nn.Module): - def forward(self, x): - return x.view(x.size(0), -1) - -class SEModule(nn.Module): - '''Squeeze and Excitation Module''' - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) - self.relu = nn.ReLU(inplace=True) - self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - image = x - x = self.avg_pool(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - - return image * x - -class CAModule(nn.Module): - '''Channel Attention Module''' - def __init__(self, channels, reduction): - super(CAModule, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.max_pool = nn.AdaptiveMaxPool2d(1) - self.shared_mlp = nn.Sequential(nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False), - nn.ReLU(inplace=True), - nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - image = x - avg_pool = self.avg_pool(x) - max_pool = self.max_pool(x) - x = self.shared_mlp(avg_pool) + self.shared_mlp(max_pool) - x = self.sigmoid(x) - - return image * x - -class SAModule(nn.Module): - '''Spatial Attention Module''' - def __init__(self): - super(SAModule, self).__init__() - self.conv = nn.Conv2d(2, 1, kernel_size=3, padding=1, bias=False) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - image = x - avg_c = torch.mean(x, 1, True) - max_c, _ = torch.max(x, 1, True) - x = torch.cat((avg_c, max_c), 1) - x = self.conv(x) - x = self.sigmoid(x) - return image * x - -class BottleNeck_IR(nn.Module): - '''Improved Residual Bottlenecks''' - def __init__(self, in_channel, out_channel, stride, dim_match): - super(BottleNeck_IR, self).__init__() - self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.PReLU(out_channel), - nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), - nn.BatchNorm2d(out_channel)) - if dim_match: - self.shortcut_layer = None - else: - self.shortcut_layer = nn.Sequential( - nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), - nn.BatchNorm2d(out_channel) - ) - - def forward(self, x): - shortcut = x - res = self.res_layer(x) - - if self.shortcut_layer is not None: - shortcut = self.shortcut_layer(x) - - return shortcut + res - -class BottleNeck_IR_SE(nn.Module): - '''Improved Residual Bottlenecks with Squeeze and Excitation Module''' - def __init__(self, in_channel, out_channel, stride, dim_match): - super(BottleNeck_IR_SE, self).__init__() - self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.PReLU(out_channel), - nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), - nn.BatchNorm2d(out_channel), - SEModule(out_channel, 16)) - if dim_match: - self.shortcut_layer = None - else: - self.shortcut_layer = nn.Sequential( - nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), - nn.BatchNorm2d(out_channel) - ) - - def forward(self, x): - shortcut = x - res = self.res_layer(x) - - if self.shortcut_layer is not None: - shortcut = self.shortcut_layer(x) - - return shortcut + res - -class BottleNeck_IR_CAM(nn.Module): - '''Improved Residual Bottlenecks with Channel Attention Module''' - def __init__(self, in_channel, out_channel, stride, dim_match): - super(BottleNeck_IR_CAM, self).__init__() - self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.PReLU(out_channel), - nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), - nn.BatchNorm2d(out_channel), - CAModule(out_channel, 16)) - if dim_match: - self.shortcut_layer = None - else: - self.shortcut_layer = nn.Sequential( - nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), - nn.BatchNorm2d(out_channel) - ) - - def forward(self, x): - shortcut = x - res = self.res_layer(x) - - if self.shortcut_layer is not None: - shortcut = self.shortcut_layer(x) - - return shortcut + res - -class BottleNeck_IR_SAM(nn.Module): - '''Improved Residual Bottlenecks with Spatial Attention Module''' - def __init__(self, in_channel, out_channel, stride, dim_match): - super(BottleNeck_IR_SAM, self).__init__() - self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.PReLU(out_channel), - nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), - nn.BatchNorm2d(out_channel), - SAModule()) - if dim_match: - self.shortcut_layer = None - else: - self.shortcut_layer = nn.Sequential( - nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), - nn.BatchNorm2d(out_channel) - ) - - def forward(self, x): - shortcut = x - res = self.res_layer(x) - - if self.shortcut_layer is not None: - shortcut = self.shortcut_layer(x) - - return shortcut + res - -class BottleNeck_IR_CBAM(nn.Module): - '''Improved Residual Bottleneck with Channel Attention Module and Spatial Attention Module''' - def __init__(self, in_channel, out_channel, stride, dim_match): - super(BottleNeck_IR_CBAM, self).__init__() - self.res_layer = nn.Sequential(nn.BatchNorm2d(in_channel), - nn.Conv2d(in_channel, out_channel, (3, 3), 1, 1, bias=False), - nn.BatchNorm2d(out_channel), - nn.PReLU(out_channel), - nn.Conv2d(out_channel, out_channel, (3, 3), stride, 1, bias=False), - nn.BatchNorm2d(out_channel), - CAModule(out_channel, 16), - SAModule() - ) - if dim_match: - self.shortcut_layer = None - else: - self.shortcut_layer = nn.Sequential( - nn.Conv2d(in_channel, out_channel, kernel_size=(1, 1), stride=stride, bias=False), - nn.BatchNorm2d(out_channel) - ) - - def forward(self, x): - shortcut = x - res = self.res_layer(x) - - if self.shortcut_layer is not None: - shortcut = self.shortcut_layer(x) - - return shortcut + res - - -filter_list = [64, 64, 128, 256, 512] -def get_layers(num_layers): - if num_layers == 50: - return [3, 4, 14, 3] - elif num_layers == 100: - return [3, 13, 30, 3] - elif num_layers == 152: - return [3, 8, 36, 3] - return None - - -class CBAMResNet(nn.Module): - def __init__(self, num_layers, feature_dim=512, drop_ratio=0.4, mode='ir', filter_list=filter_list): - super(CBAMResNet, self).__init__() - assert num_layers in [50, 100, 152], 'num_layers should be 50, 100 or 152' - assert mode in ['ir', 'ir_se', 'ir_cam', 'ir_sam', 'ir_cbam'], 'mode should be ir, ir_se, ir_cam, ir_sam or ir_cbam' - layers = get_layers(num_layers) - if mode == 'ir': - block = BottleNeck_IR - elif mode == 'ir_se': - block = BottleNeck_IR_SE - elif mode == 'ir_cam': - block = BottleNeck_IR_CAM - elif mode == 'ir_sam': - block = BottleNeck_IR_SAM - elif mode == 'ir_cbam': - block = BottleNeck_IR_CBAM - - self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), stride=1, padding=1, bias=False), - nn.BatchNorm2d(64), - nn.PReLU(64)) - self.layer1 = self._make_layer(block, filter_list[0], filter_list[1], layers[0], stride=2) - self.layer2 = self._make_layer(block, filter_list[1], filter_list[2], layers[1], stride=2) - self.layer3 = self._make_layer(block, filter_list[2], filter_list[3], layers[2], stride=2) - self.layer4 = self._make_layer(block, filter_list[3], filter_list[4], layers[3], stride=2) - - self.output_layer = nn.Sequential(nn.BatchNorm2d(512), - nn.Dropout(drop_ratio), - Flatten(), - nn.Linear(512 * 7 * 7, feature_dim), - nn.BatchNorm1d(feature_dim)) - - # weight initialization - for m in self.modules(): - if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): - nn.init.xavier_uniform_(m.weight) - if m.bias is not None: - nn.init.constant_(m.bias, 0.0) - elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, in_channel, out_channel, blocks, stride): - layers = [] - layers.append(block(in_channel, out_channel, stride, False)) - for _ in range(1, blocks): - layers.append(block(out_channel, out_channel, 1, True)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.input_layer(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.output_layer(x) - - return x - -if __name__ == '__main__': - x = torch.Tensor(2, 3, 112, 112) - net = CBAMResNet(50, mode='ir') - - out = net(x) - print(out.shape) diff --git a/face_recognition1/face_feature/model/mobilefacenet.py b/face_recognition1/face_feature/model/mobilefacenet.py deleted file mode 100644 index 874bfe8ed36fef45ab0c634d74b25a37f3d1595d..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/model/mobilefacenet.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: -@file: mobilefacenet.py -@desc: mobilefacenet model -''' - -import math -import torch -from torch import nn - -MobileFaceNet_BottleNeck_Setting = [ - # t, c , n ,s - [2, 64, 5, 2], - [4, 128, 1, 2], - [2, 128, 6, 1], - [4, 128, 1, 2], - [2, 128, 2, 1] -] - -class BottleNeck(nn.Module): - def __init__(self, inp, oup, stride, expansion): - super(BottleNeck, self).__init__() - self.connect = stride == 1 and inp == oup - - self.conv = nn.Sequential( - # 1*1 conv - nn.Conv2d(inp, inp * expansion, 1, 1, 0, bias=False), - nn.BatchNorm2d(inp * expansion), - nn.PReLU(inp * expansion), - - # 3*3 depth wise conv - nn.Conv2d(inp * expansion, inp * expansion, 3, stride, 1, groups=inp * expansion, bias=False), - nn.BatchNorm2d(inp * expansion), - nn.PReLU(inp * expansion), - - # 1*1 conv - nn.Conv2d(inp * expansion, oup, 1, 1, 0, bias=False), - nn.BatchNorm2d(oup), - ) - - def forward(self, x): - if self.connect: - return x + self.conv(x) - else: - return self.conv(x) - - -class ConvBlock(nn.Module): - def __init__(self, inp, oup, k, s, p, dw=False, linear=False): - super(ConvBlock, self).__init__() - self.linear = linear - if dw: - self.conv = nn.Conv2d(inp, oup, k, s, p, groups=inp, bias=False) - else: - self.conv = nn.Conv2d(inp, oup, k, s, p, bias=False) - - self.bn = nn.BatchNorm2d(oup) - if not linear: - self.prelu = nn.PReLU(oup) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - if self.linear: - return x - else: - return self.prelu(x) - - -class MobileFaceNet(nn.Module): - def __init__(self, feature_dim=128, bottleneck_setting=MobileFaceNet_BottleNeck_Setting): - super(MobileFaceNet, self).__init__() - self.conv1 = ConvBlock(3, 64, 3, 2, 1) - self.dw_conv1 = ConvBlock(64, 64, 3, 1, 1, dw=True) - - self.cur_channel = 64 - block = BottleNeck - self.blocks = self._make_layer(block, bottleneck_setting) - - self.conv2 = ConvBlock(128, 512, 1, 1, 0) - self.linear7 = ConvBlock(512, 512, 7, 1, 0, dw=True, linear=True) - self.linear1 = ConvBlock(512, feature_dim, 1, 1, 0, linear=True) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, setting): - layers = [] - for t, c, n, s in setting: - for i in range(n): - if i == 0: - layers.append(block(self.cur_channel, c, s, t)) - else: - layers.append(block(self.cur_channel, c, 1, t)) - self.cur_channel = c - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.dw_conv1(x) - x = self.blocks(x) - x = self.conv2(x) - x = self.linear7(x) - x = self.linear1(x) - x = x.view(x.size(0), -1) - - return x - - -if __name__ == "__main__": - x = torch.Tensor(2, 3, 112, 112) - net = MobileFaceNet() - print(net) - - x = net(x) - print(x.shape) diff --git a/face_recognition1/face_feature/model/resnet.py b/face_recognition1/face_feature/model/resnet.py deleted file mode 100644 index d668756f68a82f027a073e8ff08f2d18f042ab23..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/model/resnet.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: resnet.py -@desc: Original ResNet model, including ResNet18, ResNet34, ResNet50, ResNet101 and ResNet152, we removed the last global average pooling layer - and replaced it with a fully connected layer with dimension of 512. BN is used for fast convergence. -''' -import torch -import torch.nn as nn - -def ResNet18(): - model = ResNet(BasicBlock, [2, 2, 2, 2]) - return model - -def ResNet34(): - model = ResNet(BasicBlock, [3, 4, 6, 3]) - return model - -def ResNet50(): - model = ResNet(Bottleneck, [3, 4, 6, 3]) - return model - -def ResNet101(): - model = ResNet(Bottleneck, [3, 4, 23, 3]) - return model - -def ResNet152(): - model = ResNet(Bottleneck, [3, 8, 36, 3]) - return model - -__all__ = ['ResNet', 'ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152'] - - -def conv3x3(in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.relu1 = nn.ReLU(inplace=True) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - self.skip_add = nn.quantized.FloatFunctional() - # Remember to use two independent ReLU for layer fusion. - self.relu2 = nn.ReLU(inplace=True) - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu1(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - # Use FloatFunctional for addition for quantization compatibility - # out += identity - # out = torch.add(identity, out) - out = self.skip_add.add(identity, out) - out = self.relu2(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.relu1 = nn.ReLU(inplace=True) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.relu2 = nn.ReLU(inplace=True) - self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, stride=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.downsample = downsample - self.stride = stride - self.skip_add = nn.quantized.FloatFunctional() - self.relu3 = nn.ReLU(inplace=True) - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu1(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu2(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - # out += identity - # out = torch.add(identity, out) - out = self.skip_add.add(identity, out) - out = self.relu3(out) - - return out - - -class Flatten(nn.Module): - def forward(self, x): - # return input.view(input.size(0), -1) - x = x.reshape(x.size(0), -1) - return torch.unsqueeze(torch.unsqueeze(x, 2), 3) - - -class ResNet(nn.Module): - - def __init__(self, block, layers, feature_dim=512, drop_ratio=0.4, zero_init_residual=False): - super(ResNet, self).__init__() - self.inplanes = 64 - self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - - self.output_layer = nn.Sequential(nn.BatchNorm2d(512 * block.expansion), - nn.Dropout(drop_ratio), - Flatten(), - nn.Conv2d(512 * block.expansion * 7 * 7, feature_dim, 1), - nn.BatchNorm2d(feature_dim), - nn.Flatten()) - - # self.output_bn2d = nn.BatchNorm2d(512 * block.expansion) - # self.output_drop = nn.Dropout(drop_ratio) - # self.output_linear = nn.Linear(512 * block.expansion * 7 * 7, feature_dim) - # self.output_bn1d = nn.BatchNorm1d(feature_dim) - # - # self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - # self.fc = nn.Linear(512 * block.expansion, feature_dim) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the checkpoints by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.output_layer(x) - # x = self.output_bn2d(x) - # x = self.output_drop(x) - # x = torch.flatten(x, 1) - # x = self.output_linear(x) - # x = self.output_bn1d(x) - - return x - - -if __name__ == "__main__": - x = torch.Tensor(2, 3, 112, 112) - net = ResNet50() - print(net) - - x = net(x) - print(x.shape) diff --git a/face_recognition1/face_feature/model/spherenet.py b/face_recognition1/face_feature/model/spherenet.py deleted file mode 100644 index aee0360bba84e6c9c7b44e8664ba911370976516..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/model/spherenet.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -@author: MingDong -@file: spherenet.py -@desc: A 64 layer residual checkpoints struture used in sphereface and cosface, for fast convergence, I add BN after every Conv layer. -""" - -import torch -import torch.nn as nn - - -class Block(nn.Module): - def __init__(self, channels): - super(Block, self).__init__() - self.conv1 = nn.Conv2d(channels, channels, 3, 1, 1, bias=False) - self.bn1 = nn.BatchNorm2d(channels) - self.prelu1 = nn.PReLU(channels) - self.conv2 = nn.Conv2d(channels, channels, 3, 1, 1, bias=False) - self.bn2 = nn.BatchNorm2d(channels) - self.prelu2 = nn.PReLU(channels) - - def forward(self, x): - short_cut = x - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu1(x) - x = self.conv2(x) - x = self.bn2(x) - x = self.prelu2(x) - - return x + short_cut - - -class SphereNet(nn.Module): - def __init__(self, num_layers = 20, feature_dim=512): - super(SphereNet, self).__init__() - assert num_layers in [20, 64], 'SphereNet num_layers should be 20 or 64' - if num_layers == 20: - layers = [1, 2, 4, 1] - elif num_layers == 64: - layers = [3, 7, 16, 3] - else: - raise ValueError('sphere' + str(num_layers) + " IS NOT SUPPORTED! (sphere20 or sphere64)") - - filter_list = [3, 64, 128, 256, 512] - block = Block - self.layer1 = self._make_layer(block, filter_list[0], filter_list[1], layers[0], stride=2) - self.layer2 = self._make_layer(block, filter_list[1], filter_list[2], layers[1], stride=2) - self.layer3 = self._make_layer(block, filter_list[2], filter_list[3], layers[2], stride=2) - self.layer4 = self._make_layer(block, filter_list[3], filter_list[4], layers[3], stride=2) - self.fc = nn.Linear(512 * 7 * 7, feature_dim) - self.last_bn = nn.BatchNorm1d(feature_dim) - - for m in self.modules(): - if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): - if m.bias is not None: - nn.init.xavier_uniform_(m.weight) - nn.init.constant_(m.bias, 0) - else: - nn.init.normal_(m.weight, 0, 0.01) - - def _make_layer(self, block, inplanes, planes, num_units, stride): - layers = [] - layers.append(nn.Conv2d(inplanes, planes, 3, stride, 1)) - layers.append(nn.BatchNorm2d(planes)) - layers.append(nn.PReLU(planes)) - for _ in range(num_units): - layers.append(block(planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = x.view(x.size(0), -1) - x = self.fc(x) - x = self.last_bn(x) - - return x - - -if __name__ == '__main__': - x = torch.Tensor(2, 3, 112, 112) - net = SphereNet(num_layers=64, feature_dim=512) - - out = net(x) - print(out.shape) diff --git a/face_recognition1/face_feature/run_qat_quant.py b/face_recognition1/face_feature/run_qat_quant.py deleted file mode 100644 index 192d5fe3e2cbf4c6575755c0218f519bfe6e94c0..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/run_qat_quant.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -@author: MingDong -@file: run_qat_quant.py -@desc: quantize model and convert onnx - -""" - -import os -import torch - - -def load_torchscript_model(model_filepath, device): - - model = torch.jit.load(model_filepath, map_location=device) - - return model - - -def main(): - device = torch.device("cpu:0") - model_dir = "checkpoints" - - quantized_model_filename = "resnet50_quantized_casia.pt" - quantized_model_filepath = os.path.join(model_dir, quantized_model_filename) - - # Load quantized model. - quantized_jit_model = load_torchscript_model(model_filepath=quantized_model_filepath, device=device) - print(quantized_jit_model) - - quantized_jit_model.to(device) - quantized_jit_model.eval() - - x = torch.rand(size=(1, 3, 112, 112)).to(device) - - with torch.no_grad(): - quantized_jit_model.quant(x) - print(quantized_jit_model) - - -def convert_fuse_model(): - backbone_path = 'checkpoints/resnet50_Iter_486000_net.ckpt' - margin_path = 'checkpoints/resnet50_Iter_486000_margin.ckpt' - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - model = None #FuseResNet50() - model.backbone.load_state_dict(torch.load(backbone_path, map_location=device)['net_state_dict']) - model.margin.load_state_dict(torch.load(margin_path, map_location=device)['net_state_dict']) - model.eval() - - dummy_input = torch.randn(1, 3, 112, 112).to(device) - torch.onnx.export(model, (dummy_input, 0), 'fuse.onnx', verbose=True, input_names=["input", "label"], - output_names=["output"], training=False, opset_version=11) - - -if __name__ == '__main__': - # main() - convert_fuse_model() diff --git a/face_recognition1/face_feature/train.py b/face_recognition1/face_feature/train.py deleted file mode 100644 index 88733cbb52b92264156df1fae4dd5f22292fe937..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/train.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -""" -@author: MingDong -@file: train.py -@desc: train script for deep face recognition -""" - -import os -import argparse -import time -from datetime import datetime -import numpy as np -import torch.utils.data -import torch.optim as optim -import torchvision.transforms as transforms -from torch.optim import lr_scheduler -from torch.nn import DataParallel -from model.mobilefacenet import MobileFaceNet -from model.resnet import ResNet50 -from model.cbam import CBAMResNet -from model.attention import ResidualAttentionNet_56, ResidualAttentionNet_92 -from margin.ArcMarginProduct import ArcMarginProduct -from margin.MultiMarginProduct import MultiMarginProduct -from margin.CosineMarginProduct import CosineMarginProduct -from margin.SphereMarginProduct import SphereMarginProduct -from margin.InnerProduct import InnerProduct -from utils.visualize import Visualizer -from utils.logging import init_log -from dataloader.casia_webface import CASIAWebFace -from dataloader.lfw import LFW -from dataloader.agedb import AgeDB30 -from dataloader.cfp import CFP_FP - -from eval_lfw import evaluation_10_fold, getFeatureFromTorch - - -def train(args): - # gpu init - multi_gpus = False - if len(args.gpus.split(',')) > 1: - multi_gpus = True - os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - # log init - save_dir = os.path.join(args.save_dir, args.model_pre + args.backbone.upper() + '_' + datetime.now().strftime('%Y%m%d_%H%M%S')) - if os.path.exists(save_dir): - raise NameError('model dir exists!') - os.makedirs(save_dir) - logging = init_log(save_dir) - _print = logging.info - - # dataloader loader - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - # validation dataloader - trainset = CASIAWebFace(args.train_root, args.train_file_list, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, - shuffle=True, num_workers=4, drop_last=False) - # test dataloader - lfwdataset = LFW(args.lfw_test_root, args.lfw_file_list, transform=transform) - lfwloader = torch.utils.data.DataLoader(lfwdataset, batch_size=128, - shuffle=False, num_workers=4, drop_last=False) - agedbdataset = AgeDB30(args.agedb_test_root, args.agedb_file_list, transform=transform) - agedbloader = torch.utils.data.DataLoader(agedbdataset, batch_size=128, - shuffle=False, num_workers=4, drop_last=False) - cfpfpdataset = CFP_FP(args.cfpfp_test_root, args.cfpfp_file_list, transform=transform) - cfpfploader = torch.utils.data.DataLoader(cfpfpdataset, batch_size=128, - shuffle=False, num_workers=4, drop_last=False) - - # define backbone and margin layer - if args.backbone == 'MobileFace': - net = MobileFaceNet(feature_dim=args.feature_dim) - elif args.backbone == 'Res50': - net = ResNet50() - elif args.backbone == 'Res50_IR': - net = CBAMResNet(50, feature_dim=args.feature_dim, mode='ir') - elif args.backbone == 'SERes50_IR': - net = CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se') - elif args.backbone == 'Res100_IR': - net = CBAMResNet(100, feature_dim=args.feature_dim, mode='ir') - elif args.backbone == 'SERes100_IR': - net = CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se') - elif args.backbone == 'Attention_56': - net = ResidualAttentionNet_56(feature_dim=args.feature_dim) - elif args.backbone == 'Attention_92': - net = ResidualAttentionNet_92(feature_dim=args.feature_dim) - else: - print(args.backbone, ' is not available!') - - if args.margin_type == 'ArcFace': - margin = ArcMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) - elif args.margin_type == 'MultiMargin': - margin = MultiMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) - elif args.margin_type == 'CosFace': - margin = CosineMarginProduct(args.feature_dim, trainset.class_nums, s=args.scale_size) - elif args.margin_type == 'Softmax': - margin = InnerProduct(args.feature_dim, trainset.class_nums) - elif args.margin_type == 'SphereFace': - margin = SphereMarginProduct(args.feature_dim, trainset.class_nums) - else: - print(args.margin_type, 'is not available!') - - if args.resume: - print('resume the model parameters from: ', args.net_path, args.margin_path) - net.load_state_dict(torch.load(args.net_path)['net_state_dict']) - margin.load_state_dict(torch.load(args.margin_path)['net_state_dict']) - - # define optimizers for different layer - criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer_ft = optim.SGD([ - {'params': net.parameters(), 'weight_decay': 5e-4}, - {'params': margin.parameters(), 'weight_decay': 5e-4} - ], lr=0.1, momentum=0.9, nesterov=True) - exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=[6, 11, 16], gamma=0.1) - - if multi_gpus: - net = DataParallel(net).to(device) - margin = DataParallel(margin).to(device) - else: - net = net.to(device) - margin = margin.to(device) - - best_lfw_acc = 0.0 - best_lfw_iters = 0 - best_agedb30_acc = 0.0 - best_agedb30_iters = 0 - best_cfp_fp_acc = 0.0 - best_cfp_fp_iters = 0 - total_iters = 0 - vis = Visualizer(env=args.model_pre + args.backbone) - for epoch in range(1, args.total_epoch + 1): - # train model - _print(f"Train Epoch: {epoch}/{args.total_epoch} ...") - net.train() - - since = time.time() - for data in trainloader: - img, label = data[0].to(device), data[1].to(device) - optimizer_ft.zero_grad() - - raw_logits = net(img) - output = margin(raw_logits, label) - total_loss = criterion(output, label) - total_loss.backward() - optimizer_ft.step() - - total_iters += 1 - # print train information - if total_iters % 100 == 0: - # current training accuracy - _, predict = torch.max(output.data, 1) - total = label.size(0) - correct = (np.array(predict.cpu()) == np.array(label.data.cpu())).sum() - time_cur = (time.time() - since) / 100 - since = time.time() - vis.plot_curves({'softmax loss': total_loss.item()}, iters=total_iters, title='train loss', - xlabel='iters', ylabel='train loss') - vis.plot_curves({'train accuracy': correct / total}, iters=total_iters, title='train accuracy', xlabel='iters', - ylabel='train accuracy') - - _print(f"Iters: {total_iters:0>6d}/[{epoch:0>2d}], loss: {total_loss.item():.4f}, train_accuracy: " - f"{correct/total:.4f}, time: {time_cur:.2f} s/iter, learning rate: {exp_lr_scheduler.get_lr()[0]}") - - # save model - if total_iters % args.save_freq == 0: - msg = f'Saving checkpoint: {total_iters}' - _print(msg) - if multi_gpus: - net_state_dict = net.module.state_dict() - margin_state_dict = margin.module.state_dict() - else: - net_state_dict = net.state_dict() - margin_state_dict = margin.state_dict() - if not os.path.exists(save_dir): - os.mkdir(save_dir) - torch.save({ - 'iters': total_iters, - 'net_state_dict': net_state_dict}, - os.path.join(save_dir, f'Iter_{total_iters:06}_net.ckpt')) - torch.save({ - 'iters': total_iters, - 'net_state_dict': margin_state_dict}, - os.path.join(save_dir, f'Iter_{total_iters:06}_margin.ckpt')) - - # test accuracy - if total_iters % args.test_freq == 0: - - # test model on lfw - net.eval() - getFeatureFromTorch('result/cur_lfw_result.mat', net, device, lfwdataset, lfwloader) - lfw_accs = evaluation_10_fold('result/cur_lfw_result.mat') - _print(f'LFW Ave Accuracy: {np.mean(lfw_accs) * 100:.4f}') - if best_lfw_acc <= np.mean(lfw_accs) * 100: - best_lfw_acc = np.mean(lfw_accs) * 100 - best_lfw_iters = total_iters - - # test model on AgeDB30 - getFeatureFromTorch('result/cur_agedb30_result.mat', net, device, agedbdataset, agedbloader) - age_accs = evaluation_10_fold('result/cur_agedb30_result.mat') - _print(f'AgeDB-30 Ave Accuracy: {np.mean(age_accs) * 100:.4f}') - if best_agedb30_acc <= np.mean(age_accs) * 100: - best_agedb30_acc = np.mean(age_accs) * 100 - best_agedb30_iters = total_iters - - # test model on CFP-FP - getFeatureFromTorch('result/cur_cfpfp_result.mat', net, device, cfpfpdataset, cfpfploader) - cfp_accs = evaluation_10_fold('result/cur_cfpfp_result.mat') - _print(f'CFP-FP Ave Accuracy: {np.mean(cfp_accs) * 100:.4f}') - if best_cfp_fp_acc <= np.mean(cfp_accs) * 100: - best_cfp_fp_acc = np.mean(cfp_accs) * 100 - best_cfp_fp_iters = total_iters - _print(f'Current Best Accuracy: LFW: {best_lfw_acc:.4f} in iters: {best_lfw_iters}, AgeDB-30: {best_agedb30_acc:.4f} in iters: ' - f'{best_agedb30_iters} and CFP-FP: {best_cfp_fp_acc:.4f} in iters: {best_cfp_fp_iters}') - - vis.plot_curves({'lfw': np.mean(lfw_accs), 'agedb-30': np.mean(age_accs), 'cfp-fp': np.mean(cfp_accs)}, iters=total_iters, - title='test accuracy', xlabel='iters', ylabel='test accuracy') - net.train() - - exp_lr_scheduler.step() - _print(f'Finally Best Accuracy: LFW: {best_lfw_acc:.4f} in iters: {best_lfw_iters}, ' - f'AgeDB-30: {best_agedb30_acc:.4f} in iters: {best_agedb30_iters} and CFP-FP: {best_cfp_fp_acc:.4f} in iters: {best_cfp_fp_iters}') - print('finishing training') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='PyTorch for deep face recognition') - parser.add_argument('--train_root', type=str, default='/datasets/public2/upload/faces_emore_images', help='train image root') - parser.add_argument('--train_file_list', type=str, default='/datasets/public2/upload/faces_emore/faces_emore.list', help='train list') - parser.add_argument('--lfw_test_root', type=str, default='/datasets/public1/upload/datasets/lfw', help='lfw image root') - parser.add_argument('--lfw_file_list', type=str, default='/datasets/public1/upload/datasets/lfw_pair.txt', help='lfw pair file list') - parser.add_argument('--agedb_test_root', type=str, default='/datasets/public1/upload/datasets/agedb_30', help='agedb image root') - parser.add_argument('--agedb_file_list', type=str, default='/datasets/public1/upload/datasets/agedb_30_pair.txt', help='agedb pair file list') - parser.add_argument('--cfpfp_test_root', type=str, default='/datasets/public1/upload/datasets/cfp_fp', help='agedb image root') - parser.add_argument('--cfpfp_file_list', type=str, default='/datasets/public1/upload/datasets/cfp_fp_pair.txt', help='agedb pair file list') - - parser.add_argument('--backbone', type=str, default='Res50', help='MobileFace, Res50_IR, SERes50_IR, Res100_IR, SERes100_IR, Attention_56, Attention_92') - parser.add_argument('--margin_type', type=str, default='ArcFace', help='ArcFace, CosFace, SphereFace, MultiMargin, Softmax') - parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension, 128 or 512') - parser.add_argument('--scale_size', type=float, default=32.0, help='scale size') - parser.add_argument('--batch_size', type=int, default=128, help='batch size') - parser.add_argument('--total_epoch', type=int, default=18, help='total epochs') - - parser.add_argument('--save_freq', type=int, default=10000, help='save frequency') - parser.add_argument('--test_freq', type=int, default=10000, help='test frequency') - parser.add_argument('--resume', type=int, default=True, help='resume model') - parser.add_argument('--net_path', type=str, default='./checkpoints/resnet50_Iter_486000_net.ckpt', help='resume model') - parser.add_argument('--margin_path', type=str, default='./checkpoints/resnet50_Iter_48600_margin.ckpt', help='resume model') - parser.add_argument('--save_dir', type=str, default='./checkpoints', help='model save dir') - parser.add_argument('--model_pre', type=str, default='Res50_', help='model prefix') - parser.add_argument('--gpus', type=str, default='0', help='model prefix') - - args = parser.parse_args() - - train(args) diff --git a/face_recognition1/face_feature/utils/__init__.py b/face_recognition1/face_feature/utils/__init__.py deleted file mode 100644 index 7b53b7d8e6981d9e7e9dbb03cab143237cf9234b..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/utils/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: __init__.py.py -@desc: -''' \ No newline at end of file diff --git a/face_recognition1/face_feature/utils/load_images_from_bin.py b/face_recognition1/face_feature/utils/load_images_from_bin.py deleted file mode 100644 index cfc07569e68fce2cc3922178f32dc4e20c498e3a..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/utils/load_images_from_bin.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: load_images_from_bin.py -@desc: For AgeDB-30 and CFP-FP test dataloader, we use the mxnet binary file provided by insightface, this is the tool to restore - the aligned images from mxnet binary file. - You should install a mxnet-cpu first, just do 'pip install mxnet==1.2.1' is ok. - For train dataloader, insightface provide a mxnet .rec file, just install a mxnet-cpu for extract images -''' - - -import os -import argparse -import pickle -import cv2 -from tqdm import tqdm -import mxnet as mx - - -def load_mx_rec(rec_path): - save_path = os.path.join(rec_path, 'emore_images_2') - if not os.path.exists(save_path): - os.makedirs(save_path) - - imgrec = mx.recordio.MXIndexedRecordIO(os.path.join(rec_path, 'train.idx'), os.path.join(rec_path, 'train.rec'), 'r') - img_info = imgrec.read_idx(0) - header,_ = mx.recordio.unpack(img_info) - max_idx = int(header.label[0]) - for idx in tqdm(range(1,max_idx)): - img_info = imgrec.read_idx(idx) - header, img = mx.recordio.unpack_img(img_info) - label = int(header.label) - #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) - #img = Image.fromarray(img) - label_path = os.path.join(save_path, str(label).zfill(6)) - if not os.path.exists(label_path): - os.makedirs(label_path) - #img.save(os.path.join(label_path, str(idx).zfill(8) + '.jpg'), quality=95) - cv2.imwrite(os.path.join(label_path, str(idx).zfill(8) + '.jpg'), img) - - -def load_image_from_bin(bin_path, save_dir): - if not os.path.exists(save_dir): - os.makedirs(save_dir) - file = open(os.path.join(save_dir, '../', 'lfw_pair.txt'), 'w') - bins, issame_list = pickle.load(open(bin_path, 'rb'), encoding='bytes') - for idx in tqdm(range(len(bins))): - _bin = bins[idx] - img = mx.image.imdecode(_bin).asnumpy() - img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) - cv2.imwrite(os.path.join(save_dir, str(idx+1).zfill(5)+'.jpg'), img) - if idx % 2 == 0: - label = 1 if issame_list[idx//2] else -1 - file.write(str(idx+1).zfill(5) + '.jpg' + ' ' + str(idx+2).zfill(5) +'.jpg' + ' ' + str(label) + '\n') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Load image from Binary based dataset') - parser.add_argument('--bin_path', type=str, default="D:/face_data_emore/faces_webface_112x112/lfw.bin") - parser.add_argument('--save_dir', type=str, default="D:/face_data_emore/faces_webface_112x112/lfw") - parser.add_argument('--rec_path', type=str, default="D:/face_data_emore/faces_emore") - args = parser.parse_args() - - # load_mx_rec(args) - #load_image_from_bin(bin_path, save_dir) diff --git a/face_recognition1/face_feature/utils/logging.py b/face_recognition1/face_feature/utils/logging.py deleted file mode 100644 index 336cbba090aa44c9139c274d37e6ab90a31e4ece..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/utils/logging.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: logging.py -@desc: logging tools -''' - -from __future__ import print_function -import os -import logging - - -def init_log(output_dir): - logging.basicConfig(level=logging.DEBUG, - format='%(asctime)s %(message)s', - datefmt='%Y%m%d-%H:%M:%S', - filename=os.path.join(output_dir, 'log.log'), - filemode='w') - console = logging.StreamHandler() - console.setLevel(logging.INFO) - logging.getLogger('').addHandler(console) - return logging - - -if __name__ == '__main__': - pass diff --git a/face_recognition1/face_feature/utils/plot_logit.py b/face_recognition1/face_feature/utils/plot_logit.py deleted file mode 100644 index cd26f256fb881d64fa1aee700c7f9e0e9869106a..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/utils/plot_logit.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: plot_logit.py -@desc: plot the logit corresponding to shpereface, cosface, arcface and so on. -''' - -import math -import torch -import matplotlib.pyplot as plt -import numpy as np - -def softmax(theta): - return torch.cos(theta) - -def sphereface(theta, m=4): - return (torch.cos(m * theta) + 20 * torch.cos(theta)) / (20 + 1) - -def cosface(theta, m): - return torch.cos(theta) - m - -def arcface(theta, m): - return torch.cos(theta + m) - -def multimargin(theta, m1, m2): - return torch.cos(theta + m1) - m2 - - -theta = torch.arange(0, math.pi, 0.001) -print(theta.type) - -x = theta.numpy() -y_softmax = softmax(theta).numpy() -y_cosface = cosface(theta, 0.35).numpy() -y_arcface = arcface(theta, 0.5).numpy() - -y_multimargin_1 = multimargin(theta, 0.2, 0.3).numpy() -y_multimargin_2 = multimargin(theta, 0.2, 0.4).numpy() -y_multimargin_3 = multimargin(theta, 0.3, 0.2).numpy() -y_multimargin_4 = multimargin(theta, 0.3, 0.3).numpy() -y_multimargin_5 = multimargin(theta, 0.4, 0.2).numpy() -y_multimargin_6 = multimargin(theta, 0.4, 0.3).numpy() - -plt.plot(x, y_softmax, x, y_cosface, x, y_arcface, x, y_multimargin_1, x, y_multimargin_2, x, y_multimargin_3, x, y_multimargin_4, x, y_multimargin_5, x, y_multimargin_6) -plt.legend(['Softmax(0.00, 0.00)', 'CosFace(0.00, 0.35)', 'ArcFace(0.50, 0.00)', 'MultiMargin(0.20, 0.30)', 'MultiMargin(0.20, 0.40)', 'MultiMargin(0.30, 0.20)', 'MultiMargin(0.30, 0.30)', 'MultiMargin(0.40, 0.20)', 'MultiMargin(0.40, 0.30)']) -plt.grid(False) -plt.xlim((0, 3/4*math.pi)) -plt.ylim((-1.2, 1.2)) - -plt.xticks(np.arange(0, 2.4, 0.3)) -plt.yticks(np.arange(-1.2, 1.2, 0.2)) -plt.xlabel('Angular between the Feature and Target Center (Radian: 0 - 3/4 Pi)') -plt.ylabel('Target Logit') - -plt.savefig('target logits') diff --git a/face_recognition1/face_feature/utils/plot_theta.py b/face_recognition1/face_feature/utils/plot_theta.py deleted file mode 100644 index eb28581c76d8128b258ef7aeab1e7e7bdc14aa03..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/utils/plot_theta.py +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: plot_theta.py -@desc: plot theta distribution between weight and feature vector -''' - -import os -import argparse -import numpy as np -import torch -import torch.nn.functional as F -from matplotlib import pyplot as plt -from torchvision import transforms -from backbone.mobilefacenet import MobileFaceNet -from margin.ArcMarginProduct import ArcMarginProduct -from dataset.casia_webface import CASIAWebFace -plt.switch_backend('agg') - - -def get_train_loader(img_folder, filelist): - print('Loading dataloader...') - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) - ]) - trainset = CASIAWebFace(img_folder, filelist, transform=transform) - trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, - shuffle=False, num_workers=8, drop_last=False) - return trainloader - -def load_model(backbone_state_dict, margin_state_dict, device): - - # load model - net = MobileFaceNet() - net.load_state_dict(torch.load(backbone_state_dict)['net_state_dict']) - margin = ArcMarginProduct(in_feature=128, out_feature=10575) - margin.load_state_dict(torch.load(margin_state_dict)['net_state_dict']) - - net = net.to(device) - margin = margin.to(device) - - return net.eval(), margin.eval() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='plot theta distribution of trained model') - parser.add_argument('--img_root', type=str, default='/media/ramdisk/webface_align_112', help='train image root') - parser.add_argument('--file_list', type=str, default='/media/ramdisk/webface_align_train.list', help='train list') - parser.add_argument('--backbone_file', type=str, default='../model/Paper_MOBILEFACE_20190103_111830/Iter_088000_net.ckpt', help='backbone state dict file') - parser.add_argument('--margin_file', type=str, default='../model/Paper_MOBILEFACE_20190103_111830/Iter_088000_margin.ckpt', help='backbone state dict file') - parser.add_argument('--gpus', type=str, default='0', help='model prefix, single gpu only') - args = parser.parse_args() - - # gpu init - os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - # load pretrain model - trained_net, trained_margin = load_model(args.backbone_file, args.margin_file, device) - - # initial model - initial_net = MobileFaceNet() - initial_margin = ArcMarginProduct() - initial_net = initial_net.to(device).eval() - initial_margin = initial_margin.to(device).eval() - - # image dataloader - image_loader = get_train_loader(args.img_root, args.file_list) - theta_trained = [] - theta_initial = [] - for data in image_loader: - img, label = data[0].to(device), data[1].to(device) - # pretrained - embedding = trained_net(img) - cos_theta = F.linear(F.normalize(embedding), F.normalize(trained_margin.weight)) - cos_theta = cos_theta.clamp(-1, 1).detach().cpu().numpy() - for i in range(img.shape[0]): - cos_trget = cos_theta[i][label[i]] - theta_trained.append(np.arccos(cos_trget) / np.pi * 180) - # initial - embedding = initial_net(img) - cos_theta = F.linear(F.normalize(embedding), F.normalize(initial_margin.weight)) - cos_theta = cos_theta.clamp(-1, 1).detach().cpu().numpy() - for i in range(img.shape[0]): - cos_trget = cos_theta[i][label[i]] - theta_initial.append(np.arccos(cos_trget) / np.pi * 180) - - # write theta list to txt file - # trained_theta_file = open('arcface_theta.txt', 'w') - # initial_theta_file = open('initial_theta.txt', 'w') - # for item in theta_trained: - # trained_theta_file.write(str(item)) - # trained_theta_file.write('\n') - # for item in theta_initial: - # initial_theta_file.write(str(item)) - # initial_theta_file.write('\n') - - # plot the theta, read theta from txt first - # theta_trained = [] - # theta_initial = [] - # trained_theta_file = open('arcface_theta.txt', 'r') - # initial_theta_file = open('initial_theta.txt', 'r') - # lines = trained_theta_file.readlines() - # for line in lines: - # theta_trained.append(float(line.strip('\n')[0])) - # lines = initial_theta_file.readlines() - # for line in lines: - # theta_initial.append(float(line.split('\n')[0])) - - print(len(theta_trained), len(theta_initial)) - plt.figure() - plt.xlabel('Theta') - plt.ylabel('Numbers') - plt.title('Theta Distribution') - plt.hist(theta_trained, bins=180, normed=0) - plt.hist(theta_initial, bins=180, normed=0) - plt.legend(['trained theta distribution', 'initial theta distribution']) - plt.savefig('theta_distribution_hist.jpg') diff --git a/face_recognition1/face_feature/utils/visualize.py b/face_recognition1/face_feature/utils/visualize.py deleted file mode 100644 index a43659cf3cf85a6c8f4ae0d518c49dc918ba84ad..0000000000000000000000000000000000000000 --- a/face_recognition1/face_feature/utils/visualize.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 -''' -@author: MingDong -@file: visualize.py -@desc: visualize tools -''' - -import time -import visdom -import numpy as np - -class Visualizer(): - def __init__(self, env='default', **kwargs): - self.vis = visdom.Visdom(env=env, **kwargs) - self.index = 1 - - def plot_curves(self, d, iters, title='loss', xlabel='iters', ylabel='accuracy'): - name = list(d.keys()) - val = list(d.values()) - if len(val) == 1: - y = np.array(val) - else: - y = np.array(val).reshape(-1, len(val)) - self.vis.line(Y=y, - X=np.array([self.index]), - win=title, - opts=dict(legend=name, title = title, xlabel=xlabel, ylabel=ylabel), - update=None if self.index == 0 else 'append') - self.index = iters - - def test(self): - pass - - -if __name__ == '__main__': - vis = Visualizer(env='test') - for i in range(10): - x = i - y = 2 * i - z = 4 * i - vis.plot_curves({'train': x, 'test': y}, iters=i, title='train') - vis.plot_curves({'train': z, 'test': y, 'val': i}, iters=i, title='test') - time.sleep(1) diff --git a/face_recognition1/face_landmark/__init__.py b/face_recognition1/face_landmark/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_landmark/facelandmark.py b/face_recognition1/face_landmark/facelandmark.py deleted file mode 100644 index 92bbc03ec57e0eef6db816babf88cc9dc3a2c971..0000000000000000000000000000000000000000 --- a/face_recognition1/face_landmark/facelandmark.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -@author: MingDong -@file: facelandmark.py -@desc: define the model for face landmark extraction -""" -import torch -import torch.nn as nn -from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, ReLU, Sequential, Module - -################################## Original Arcface Model ############################################################# - -class Flatten(Module): - def forward(self, x): - return x.view(x.size(0), -1) - -################################## FaceLandmark ############################################################# -class Conv_block(Module): - def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): - super(Conv_block, self).__init__() - self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False) - self.bn = BatchNorm2d(out_c) - self.relu = ReLU(out_c) - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return x - -class Linear_block(Module): - def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): - super(Linear_block, self).__init__() - self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False) - self.bn = BatchNorm2d(out_c) - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - return x - -class Depth_Wise(Module): - def __init__(self, in_c, out_c, residual = False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1): - super(Depth_Wise, self).__init__() - self.conv = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)) - self.conv_dw = Conv_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride) - self.project = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1)) - self.residual = residual - def forward(self, x): - if self.residual: - short_cut = x - x = self.conv(x) - x = self.conv_dw(x) - x = self.project(x) - if self.residual: - output = short_cut + x - else: - output = x - return output - -class Residual(Module): - def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)): - super(Residual, self).__init__() - modules = [] - for _ in range(num_block): - modules.append(Depth_Wise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups)) - self.model = Sequential(*modules) - def forward(self, x): - return self.model(x) - -class GDC(Module): - def __init__(self, embedding_size): - super(GDC, self).__init__() - self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(4,4), stride=(1, 1), padding=(0, 0)) - self.linear = Linear(512, embedding_size, bias=True) - self.bn = BatchNorm1d(embedding_size) - - def forward(self, x): - x = self.conv_6_dw(x) - x = torch.flatten(x, 1) - x = self.linear(x) - x = self.bn(x) - return x - -class FaceLandmark(Module): - def __init__(self, input_size = 64, embedding_size = 136): - super(FaceLandmark, self).__init__() - self.conv1 = Conv_block(1, 32, kernel=(3, 3), stride=(2, 2), padding=(1, 1)) - self.conv2_dw = Conv_block(32, 32, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=32) - self.conv_23 = Depth_Wise(32, 32, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=64) - self.conv_3 = Residual(32, num_block=3, groups=64, kernel=(3, 3), stride=(1, 1), padding=(1, 1)) - self.conv_34 = Depth_Wise(32, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128) - self.conv_4 = Residual(64, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1)) - self.conv_45 = Depth_Wise(64, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256) - self.conv_5 = Residual(64, num_block=2, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1)) - self.conv_6_sep = Conv_block(64, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0)) - self.output_layer = GDC(embedding_size) - self._initialize_weights() - - def _initialize_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - if m.bias is not None: - m.bias.data.zero_() - - - def forward(self, x): - out = self.conv1(x) - out = self.conv2_dw(out) - out = self.conv_23(out) - out = self.conv_3(out) - out = self.conv_34(out) - out = self.conv_4(out) - out = self.conv_45(out) - out = self.conv_5(out) - conv_features = self.conv_6_sep(out) - out = self.output_layer(conv_features) - return out diff --git a/face_recognition1/face_landmark/facelandmark.tar b/face_recognition1/face_landmark/facelandmark.tar deleted file mode 100644 index cc540f945a4e926bbc5f23202737a281a2d3084a..0000000000000000000000000000000000000000 --- a/face_recognition1/face_landmark/facelandmark.tar +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fe0f5cedfa84107502e5a4b0bd55b824813be54f8e443a93dd2b1e6170176cab -size 3723687 diff --git a/face_recognition1/face_landmark/landmark.onnx b/face_recognition1/face_landmark/landmark.onnx deleted file mode 100644 index db44ac83b3dbe1506ef07a2717c121615a6c0a28..0000000000000000000000000000000000000000 --- a/face_recognition1/face_landmark/landmark.onnx +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f10b0ba1d2a8a099adefede1ebf3733e44fe370715e7303dbeb7f59c02142258 -size 1163742 diff --git a/face_recognition1/face_landmark/landmark.tar b/face_recognition1/face_landmark/landmark.tar deleted file mode 100644 index cc540f945a4e926bbc5f23202737a281a2d3084a..0000000000000000000000000000000000000000 --- a/face_recognition1/face_landmark/landmark.tar +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fe0f5cedfa84107502e5a4b0bd55b824813be54f8e443a93dd2b1e6170176cab -size 3723687 diff --git a/face_recognition1/face_utils/__init__.py b/face_recognition1/face_utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_utils/align_faces.py b/face_recognition1/face_utils/align_faces.py deleted file mode 100644 index bb96d6802a365eac710d37c6ce7f8f87f8e60872..0000000000000000000000000000000000000000 --- a/face_recognition1/face_utils/align_faces.py +++ /dev/null @@ -1,202 +0,0 @@ -""" -@author: MingDong -@file: align_faces.py -@desc: merge the face align images (112x112) -""" -import cv2 -import numpy as np -from skimage import transform as trans - -# reference facial points, a list of coordinates (x,y) -REFERENCE_FACIAL_POINTS = [ - [30.29459953, 51.69630051], - [65.53179932, 51.50139999], - [48.02519989, 71.73660278], - [33.54930115, 92.3655014], - [62.72990036, 92.20410156] -] - -# REFERENCE_FACIAL_POINTS = [ -# [34.29459953, 55.69630051], -# [61.53179932, 55.50139999], -# [48.02519989, 71.73660278], -# [33.54930115, 88.3655014], -# [58.72990036, 88.20410156] -# ] - -DEFAULT_CROP_SIZE = (96, 112) - - -class FaceWarpException(Exception): - def __str__(self): - return f'In File {__file__}:{super.__str__(self)}' - - -def get_reference_facial_points(output_size=None, - inner_padding_factor=0.0, - outer_padding=(0, 0), - default_square=False): - tmp_5pts = np.array(REFERENCE_FACIAL_POINTS) - tmp_crop_size = np.array(DEFAULT_CROP_SIZE) - - # 0) make the inner region a square - if default_square: - size_diff = max(tmp_crop_size) - tmp_crop_size - tmp_5pts += size_diff / 2 - tmp_crop_size += size_diff - - # print('---> default:') - # print(' crop_size = ', tmp_crop_size) - # print(' reference_5pts = ', tmp_5pts) - - if (output_size and - output_size[0] == tmp_crop_size[0] and - output_size[1] == tmp_crop_size[1]): - # print(f'output_size == DEFAULT_CROP_SIZE {tmp_crop_size}: return default reference points') - return tmp_5pts - - if (inner_padding_factor == 0 and - outer_padding == (0, 0)): - if output_size is None: - print('No paddings to do: return default reference points') - return tmp_5pts - else: - raise FaceWarpException( - f'No paddings to do, output_size must be None or {tmp_crop_size}') - - # check output size - if not 0 <= inner_padding_factor <= 1.0: - raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)') - - if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) - and output_size is None): - output_size = tmp_crop_size * \ - (1 + inner_padding_factor * 2).astype(np.int32) - output_size += np.array(outer_padding) - print(' deduced from paddings, output_size = ', output_size) - - if not (outer_padding[0] < output_size[0] - and outer_padding[1] < output_size[1]): - raise FaceWarpException('Not (outer_padding[0] < output_size[0]' - 'and outer_padding[1] < output_size[1])') - - # 1) pad the inner region according inner_padding_factor - # print('---> STEP1: pad the inner region according inner_padding_factor') - if inner_padding_factor > 0: - size_diff = tmp_crop_size * inner_padding_factor * 2 - tmp_5pts += size_diff / 2 - tmp_crop_size += np.round(size_diff).astype(np.int32) - - # print(' crop_size = ', tmp_crop_size) - # print(' reference_5pts = ', tmp_5pts) - - # 2) resize the padded inner region - # print('---> STEP2: resize the padded inner region') - size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2 - # print(' crop_size = ', tmp_crop_size) - # print(' size_bf_outer_pad = ', size_bf_outer_pad) - - if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]: - raise FaceWarpException('Must have (output_size - outer_padding)' - '= some_scale * (crop_size * (1.0 + inner_padding_factor)') - - scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0] - # print(' resize scale_factor = ', scale_factor) - tmp_5pts = tmp_5pts * scale_factor - # size_diff = tmp_crop_size * (scale_factor - min(scale_factor)) - # tmp_5pts = tmp_5pts + size_diff / 2 - tmp_crop_size = size_bf_outer_pad - # print(' crop_size = ', tmp_crop_size) - # print(' reference_5pts = ', tmp_5pts) - - # 3) add outer_padding to make output_size - reference_5point = tmp_5pts + np.array(outer_padding) - tmp_crop_size = output_size - # print('---> STEP3: add outer_padding to make output_size') - # print(' crop_size = ', tmp_crop_size) - # print(' reference_5pts = ', tmp_5pts) - # - # print('===> end get_reference_facial_points\n') - - return reference_5point - - -def get_affine_transform_matrix(src_pts, dst_pts): - tfm = np.float32([[1, 0, 0], [0, 1, 0]]) - n_pts = src_pts.shape[0] - ones = np.ones((n_pts, 1), src_pts.dtype) - src_pts_ = np.hstack([src_pts, ones]) - dst_pts_ = np.hstack([dst_pts, ones]) - - A, _, rank, _ = np.linalg.lstsq(src_pts_, dst_pts_) - - if rank == 3: - tfm = np.float32([ - [A[0, 0], A[1, 0], A[2, 0]], - [A[0, 1], A[1, 1], A[2, 1]] - ]) - elif rank == 2: - tfm = np.float32([ - [A[0, 0], A[1, 0], 0], - [A[0, 1], A[1, 1], 0] - ]) - - return tfm - - -def warp_and_crop_face(src_img, - facial_pts, - reference_pts=None, - crop_size=(96, 112), - align_type='smilarity'): - if reference_pts is None: - if crop_size[0] == 96 and crop_size[1] == 112: - reference_pts = REFERENCE_FACIAL_POINTS - else: - default_square = False - inner_padding_factor = 0 - outer_padding = (0, 0) - output_size = crop_size - - reference_pts = get_reference_facial_points(output_size, - inner_padding_factor, - outer_padding, - default_square) - - ref_pts = np.float32(reference_pts) - ref_pts_shp = ref_pts.shape - if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2: - raise FaceWarpException( - 'reference_pts.shape must be (K,2) or (2,K) and K>2') - - if ref_pts_shp[0] == 2: - ref_pts = ref_pts.T - - src_pts = np.float32(facial_pts) - src_pts_shp = src_pts.shape - if max(src_pts_shp) < 3 or min(src_pts_shp) != 2: - raise FaceWarpException( - 'facial_pts.shape must be (K,2) or (2,K) and K>2') - - if src_pts_shp[0] == 2: - src_pts = src_pts.T - - if src_pts.shape != ref_pts.shape: - raise FaceWarpException( - 'facial_pts and reference_pts must have the same shape') - - if align_type == 'cv2_affine': - tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3]) - # print('cv2.getAffineTransform() returns tfm=\n' + str(tfm)) - elif align_type == 'affine': - tfm = get_affine_transform_matrix(src_pts, ref_pts) - # print('get_affine_transform_matrix() returns tfm=\n' + str(tfm)) - else: - # tfm = get_similarity_transform_for_cv2(src_pts, ref_pts) - tform = trans.SimilarityTransform() - tform.estimate(src_pts, ref_pts) - tfm = tform.params[0:2, :] - - face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1])) - - return face_img diff --git a/face_recognition1/face_utils/digit_recognition/__init__.py b/face_recognition1/face_utils/digit_recognition/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_utils/digit_recognition/checkpoints/1_28x28_DRNet.pth b/face_recognition1/face_utils/digit_recognition/checkpoints/1_28x28_DRNet.pth deleted file mode 100644 index 091ec96872449f63ed15688931d64ae398636830..0000000000000000000000000000000000000000 --- a/face_recognition1/face_utils/digit_recognition/checkpoints/1_28x28_DRNet.pth +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:899bb70cbc5022837f093403116b227f865b19b471df8951b06fd2c8c5d9ca3e -size 121903 diff --git a/face_recognition1/face_utils/digit_recognition/model/__init__.py b/face_recognition1/face_utils/digit_recognition/model/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/face_recognition1/face_utils/digit_recognition/model/digitnet.py b/face_recognition1/face_utils/digit_recognition/model/digitnet.py deleted file mode 100644 index 788b7480450f6648899708d8301bf3fd79024477..0000000000000000000000000000000000000000 --- a/face_recognition1/face_utils/digit_recognition/model/digitnet.py +++ /dev/null @@ -1,30 +0,0 @@ -""" -@author: MingDong -@file: digitnet.py -@desc: define model for digit recognition -""" -from torch import nn -import torch.nn.functional as F - - -class DRNet(nn.Module): - def __init__(self, num_classes=10): - super().__init__() - self.layer1 = nn.Sequential( - nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2), - nn.BatchNorm2d(16), - nn.ReLU(), - nn.MaxPool2d(kernel_size=2, stride=2)) - self.layer2 = nn.Sequential( - nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2), - nn.BatchNorm2d(32), - nn.ReLU(), - nn.MaxPool2d(kernel_size=2, stride=2)) - self.fc = nn.Linear(7*7*32, num_classes) - - def forward(self, x): - out = self.layer1(x) - out = self.layer2(out) - out = out.reshape(out.size(0), -1) - out = self.fc(out) - return F.log_softmax(out, dim=1) diff --git a/face_recognition1/face_utils/digit_recognition/test.py b/face_recognition1/face_utils/digit_recognition/test.py deleted file mode 100644 index 7ceaf99e0e042e69595b84f94d5bbe2ff68d75c3..0000000000000000000000000000000000000000 --- a/face_recognition1/face_utils/digit_recognition/test.py +++ /dev/null @@ -1,208 +0,0 @@ -""" -@author: MingDong -@file: test.py -@desc: test model for digit recognition -""" - -import argparse -import numpy as np -import cv2 -from PIL import Image, ImageFilter -import torch -from torchvision import transforms -from matplotlib import pyplot as plt -from src.bioauth_ml.face_utils.digit_recognition.model import digitnet - - -def parse_input_args(): - """ define arguments """ - parser = argparse.ArgumentParser(description='Predict Digits with MNIST Dataset') - parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') - parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',help='input batch size for testing (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)') - parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') - parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') - parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') - parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') - parser.add_argument('--checkpoint', type=str, default="../face_utils/digit_recognition/checkpoints/1_28x28_DRNet.pth", help='Path to save Check point') - parser.add_argument('--input_image', type=str, default="1.png", help='Path to save Check point') - parser.add_argument('--image_similar_to_mnist', action="store_true", default=False, help='use for images similarm to MNIST Dataset format') - parser.add_argument('--debug', action="store_true", default=False, help='use for to print debug logs') - - return parser.parse_args() - - -def load_checkpoint(filepath): - """ load pretrained model from the static file """ - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - checkpoint = torch.load(filepath, map_location=device) - model = digitnet.DRNet() - model.load_state_dict(checkpoint['state_dict']) - - return model - - -def imshow(image, ax=None, title=None): - """ imshow for Tensor.""" - if ax is None: - _, ax = plt.subplots() - - # PyTorch tensors assume the color channel is the first dimension - # but matplotlib assumes is the third dimension - image = image.numpy().transpose((1, 2, 0)) - - # Undo preprocessing - mean = np.array([0.5, 0.5, 0.5]) - std = np.array([0.5, 0.5, 0.5]) - image = std * image + mean - - # Image needs to be clipped between 0 and 1 or it looks like noise when displayed - image = np.clip(image, 0, 1) - - ax.imshow(image) - - return ax - - -def process_image(pil_image): - """ Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array""" - preprocess = transforms.Compose([ - transforms.Resize(28), - transforms.CenterCrop(28), - transforms.ToTensor(), - transforms.Normalize(0.5, 0.5) - ]) - - # preprocess the image - img_tensor = preprocess(pil_image) - - # add dimension for batch - img_tensor.unsqueeze_(0) - - return img_tensor - - -def predict(image_path, model, learning_rate, use_cuda, topk=5): - """ Predict the class (or classes) of an image using a trained deep learning model. """ - - image = process_image(image_path) - device = torch.device("cuda" if use_cuda else "cpu") - - model.eval() - model.to(device) - # criterion = nn.NLLLoss() - # optimizer = optim.Adam(model.parameters(), lr=learning_rate) - image = image.to(device) - - #to change 2D to 1D - #image = image.view(1, 784) - - with torch.no_grad(): - output = model.forward(image) - ps = torch.exp(output) - - topk_probs_tensor, topk_idx_tensor = ps.topk(topk) - - return topk_probs_tensor, topk_idx_tensor #probs, classes - - -def main(x=None): - """ Main purpose of this function is to convert roi area of input image to MNIST format and apply prediction """ - - args = parse_input_args() - model_classification = {"0": "0", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8", - "9": "9"} - - use_cuda = not args.no_cuda and torch.cuda.is_available() - torch.manual_seed(args.seed) - - model = load_checkpoint(args.checkpoint) - - img = cv2.imread(args.input_image, cv2.IMREAD_COLOR) if x is None else x - #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - # To invert colors of images to mak esimilar to MNIST Dataset - if args.image_similar_to_mnist: - img1 = img - gray = img - else: - img1 = cv2.bitwise_not(img) - # Convert Image to gray - gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) - - #To show inverted colour image - if args.debug: - plt.imshow(img1) - plt.show() - - #Apply threshold. - #ret, thresh = cv2.threshold(gray, 75, 255, 0) -# ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) - _, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY) - # find contours in images - contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - - hierarchy = hierarchy[0] - - for c in zip(contours, hierarchy): - - if c[1][2] > 0 or c[1][3] < 0 : - x, y, w, h = cv2.boundingRect(c[0]) - - if args.debug: - print(f"X:{x}, Y:{y}, W:{w}, H:{h}") - - # draw a green rectangle to visualize the bounding rect - # Create region of interest - roi = gray[y:y+h, x:x+w] - #Convert image to PIL image - pil_roi = Image.fromarray(roi) - # creates white canvas of 28x28 pixels - newImage = Image.new('L', (28, 28), (0)) - - #This is resize with maintaining aspect ratio - if w > h: # check which dimension is bigger - nheight = int(round((20.0 / w * h), 0)) # resize height according to ratio width - if nheight == 0: # rare case but minimum is 1 pixel - nheight = 1 - # resize and sharpen - img2 = pil_roi.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN) - wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position - newImage.paste(img2, (4, wtop)) # paste resized image on white canvas - else: - # Height is bigger. Heigth becomes 20 pixels. - nwidth = int(round((20.0 / h * w), 0)) # resize width according to ratio height - if nwidth == 0: # rare case but minimum is 1 pixel - nwidth = 1 - # resize and sharpen - img2 = pil_roi.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN) - wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition - newImage.paste(img2, (wleft, 4)) # paste resized image on white canvas - - #Intermediate images per rectangular box can be saved for debugging purpose. - #newImage.save('Sample.jpg') - - #Define number of top predictions to get, usually 1. - top_k = 1 - probs_tensor, classes_tensor = predict(newImage, model, args.lr, use_cuda, topk=top_k) - - # Convert the probabilities and classes tensors into lists - probs = probs_tensor.tolist()[0] - - model.class_to_idx = model_classification - classes = [model.class_to_idx[str(sorted(model.class_to_idx)[i])] for i in classes_tensor.tolist()[0]] - - print(f"Number :{classes}, Probability :{probs}") - - cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) - font = cv2.FONT_HERSHEY_SIMPLEX - - cv2.putText(img, str(classes), (x+w-10, y+h+5), font, 2, (255, 0, 0), 2, cv2.LINE_AA) - plt.imshow(img) - - plt.show() - - -if __name__ == '__main__': - main() diff --git a/face_recognition1/face_utils/digit_recognition/train.py b/face_recognition1/face_utils/digit_recognition/train.py deleted file mode 100644 index 8b26896301ad3ce3354139ee750d3550d689d1a2..0000000000000000000000000000000000000000 --- a/face_recognition1/face_utils/digit_recognition/train.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -@author: MingDong -@file: train.py -@desc: train model for digit recognition -""" -import argparse -import torch -from model.digitnet import DRNet -from torch import nn -from torch import optim -from torchvision import datasets, transforms - - -def load_checkpoint(filepath): - """ load pretrained checkpoint from static file """ - checkpoint = torch.load(filepath) - model = DRNet() - model.load_state_dict(checkpoint['state_dict']) - - return model - - -def validation(model, test_loader, criterion, device): - """ validate the model """ - accuracy = 0 - test_loss = 0 - for images, labels in test_loader: - - # images.resize_(images.size()[0], 784) - images, labels = images.to(device), labels.to(device) - - output = model.forward(images) - test_loss += criterion(output, labels).item() - - ## Calculating the accuracy - # Model's output is log-softmax, take exponential to get the probabilities - ps = torch.exp(output) - # Class with highest probability is our predicted class, compare with true label - equality = (labels.data == ps.max(1)[1]) - # Accuracy is number of correct predictions divided by all predictions, just take the mean - accuracy += equality.type_as(torch.FloatTensor()).mean() - - return test_loss, accuracy - - -def train(model, train_loader, test_loader, criterion, optimizer, device, epochs=5, print_every=40): - """ train the model """ - steps = 0 - running_loss = 0 - - model.to(device) - - for e in range(epochs): - # Model in training mode, dropout is on - model.train() - for images, labels in train_loader: - steps += 1 - - # Flatten images into a 784 long vector -# images.resize_(images.size()[0], 784) - images, labels = images.to(device), labels.to(device) - - optimizer.zero_grad() - - output = model.forward(images) - loss = criterion(output, labels) - loss.backward() - optimizer.step() - - running_loss += loss.item() - - if steps % print_every == 0: - # Model in inference mode, dropout is off - model.eval() - - # Turn off gradients for validation, will speed up inference - with torch.no_grad(): - test_loss, accuracy = validation(model, test_loader, criterion, device) - - print(f"Epoch: {e+1}/{epochs}.. ", - f"Training Loss: {running_loss/print_every:.3f}.. ", - f"Test Loss: {test_loss/len(test_loader):.3f}", - f"Test Accuracy: {accuracy / len(test_loader):.3f}") - - running_loss = 0 - - # Make sure dropout and grads are on for training - model.train() - - -def main(): - """ include main process to train the model """ - parser = argparse.ArgumentParser(description='PyTorch MNIST Example') - parser.add_argument('--batch-size', type=int, default=64, metavar='N', - help='input batch size for training (default: 64)') - parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', - help='input batch size for testing (default: 1000)') - parser.add_argument('--epochs', type=int, default=10, metavar='N', - help='number of epochs to train (default: 10)') - parser.add_argument('--lr', type=float, default=0.001, metavar='LR', - help='learning rate (default: 0.001)') - parser.add_argument('--momentum', type=float, default=0.5, metavar='M', - help='SGD momentum (default: 0.5)') - parser.add_argument('--no-cuda', action='store_true', default=False, - help='disables CUDA training') - parser.add_argument('--seed', type=int, default=1, metavar='S', - help='random seed (default: 1)') - parser.add_argument('--log-interval', type=int, default=10, metavar='N', - help='how many batches to wait before logging training status') - parser.add_argument('--checkpoint', type=str, default="./checkpoints/checkpoint.pth", - help='Path to save Check point') - parser.add_argument('--verify-model', action="store_true", default=False, - help='use for to verify model file') - parser.add_argument('--debug', action="store_true", default=False, - help='use for to print debug logs') - - args = parser.parse_args() - - use_cuda = not args.no_cuda and torch.cuda.is_available() - - torch.manual_seed(args.seed) - - device = torch.device("cuda" if use_cuda else "cpu") - - kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} - - transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(0.5, 0.5)]) - train_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=True, download=True, transform=transform), - batch_size=args.batch_size, shuffle=True, **kwargs) - test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False, download=True, transform=transform), - batch_size=args.test_batch_size, shuffle=True, **kwargs) - - model = DRNet() - criterion = nn.NLLLoss() - optimizer = optim.Adam(model.parameters(), lr=0.001) - - train(model, train_loader, test_loader, criterion, optimizer, device, args.epochs) - - checkpoint = {'state_dict': model.state_dict()} - torch.save(checkpoint, args.checkpoint) - - if args.verify_model: - model1 = load_checkpoint(args.checkpoint) - model1.to(device) - - model1.eval() - dataiter = iter(test_loader) - images, labels = dataiter.next() - - img, labels = images.to(device), labels.to(device) - with torch.no_grad(): - output = model1.forward(img) - - ps = torch.exp(output) - equality = (labels.data == ps.max(1)[1]) - - print(ps) - print(equality) - - -if __name__ == '__main__': - main() diff --git a/face_recognition1/face_utils/utility.py b/face_recognition1/face_utils/utility.py deleted file mode 100644 index 4eb6a3b7ad6915ecbd9f1211442dfcbaa1c0ed02..0000000000000000000000000000000000000000 --- a/face_recognition1/face_utils/utility.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -@file: utility.py -@desc: defines utility functions -""" -import requests -from tqdm import tqdm - - -def download_checkpoint(remote_url, local_path): - response = requests.get(remote_url, stream=True) - total_size_in_bytes = int(response.headers.get("content-length", 0)) - block_size = 1024 # 1 Kibibyte - - progress_bar = tqdm( - desc=f"Downloading {local_path}..", - total=total_size_in_bytes, - unit="iB", - unit_scale=True, - ) - - with open(local_path, "wb") as ref: - for data in response.iter_content(block_size): - progress_bar.update(len(data)) - ref.write(data) - - progress_bar.close() - if total_size_in_bytes not in (0, progress_bar.n): - print("ERROR, something went wrong") diff --git a/face_recognition1/feature_api.py b/face_recognition1/feature_api.py deleted file mode 100644 index efe95af5a372e38d1de88bb2721809f272bfeb12..0000000000000000000000000000000000000000 --- a/face_recognition1/feature_api.py +++ /dev/null @@ -1,205 +0,0 @@ -""" -@author: MingDong -@file: train.py -@desc: test the model for deep face recognition -""" -import os -import sys -sys.path.append(os.path.dirname(__file__)) - -import argparse -from collections import OrderedDict -import cv2 -import numpy as np -import torch -import torchvision.transforms as transforms -#import onnx -#import onnxruntime as ort -from torch.nn import DataParallel -from face_detect.test import get_bbox -from landmark_api import get_face_landmark -from face_feature.model import mobilefacenet, cbam, resnet -from face_utils.align_faces import warp_and_crop_face, get_reference_facial_points -from face_utils.utility import download_checkpoint - - -def convert_68pts_5pts(landmark): - left_eye_x = (landmark[74] + landmark[76] + landmark[80] + landmark[82]) / 4 - left_eye_y = (landmark[75] + landmark[77] + landmark[81] + landmark[83]) / 4 - - right_eye_x = (landmark[86] + landmark[88] + landmark[92] + landmark[94]) / 4 - right_eye_y = (landmark[87] + landmark[89] + landmark[93] + landmark[95]) / 4 - - nose_x, nose_y = landmark[60], landmark[61] - - left_mouse_x = (landmark[96] + landmark[120]) / 2 - left_mouse_y = (landmark[97] + landmark[121]) / 2 - - right_mouse_x = (landmark[108] + landmark[128]) / 2 - right_mouse_y = (landmark[109] + landmark[129]) / 2 - return np.array([left_eye_x, right_eye_x, nose_x, left_mouse_x, right_mouse_x, - left_eye_y, right_eye_y, nose_y, left_mouse_y, right_mouse_y]) - - -def align(img, output_size): - image_bbox = get_bbox(img) - if image_bbox is None: - return None - - gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - landmark = get_face_landmark(gray_image, image_bbox) - - facial5points = convert_68pts_5pts(landmark.detach().cpu().numpy()) - facial5points = np.reshape(facial5points, (2, 5)) - - default_square = True - inner_padding_factor = 0.25 - outer_padding = (0, 0) - - # get the reference 5 landmarks position in the crop settings - reference_5pts = get_reference_facial_points( - output_size, inner_padding_factor, outer_padding, default_square) - - # dst_img = warp_and_crop_face(raw, facial5points, reference_5pts, crop_size) - dst_img = warp_and_crop_face(img, facial5points, reference_pts=reference_5pts, crop_size=output_size) - # cv2.imwrite(f'{0}_aligned_{output_size[0]}x{output_size[1]}.jpg', dst_img) - # img = cv.resize(raw, (224, 224)) - # cv.imwrite('images/{}_img.jpg'.format(i), img) - return dst_img - - -def load_model(backbone_net, feature_dim, gpus='0'): - """ load the pretrained model """ - if backbone_net == 'MobileFace': - net = mobilefacenet.MobileFaceNet(feature_dim=feature_dim) - elif backbone_net == 'Res50': - net = resnet.ResNet50() - elif backbone_net == 'CBAM_50': - net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir') - elif backbone_net == 'CBAM_50_SE': - net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir_se') - elif backbone_net == 'CBAM_100': - net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir') - elif backbone_net == 'CBAM_100_SE': - net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir_se') - else: - print(backbone_net, ' is not available!') - - # gpu init - multi_gpus = False - if len(gpus.split(',')) > 1: - multi_gpus = True - os.environ['CUDA_VISIBLE_DEVICES'] = gpus - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - resume_path = os.path.join(os.path.dirname(__file__), 'face_feature/checkpoints/feat_net.ckpt') - if not os.path.exists(resume_path): - download_checkpoint(remote_url=feature_pytorch_url, local_path=resume_path) - - net.load_state_dict(torch.load(resume_path, map_location=device)['net_state_dict']) - - if multi_gpus: - net = DataParallel(net).to(device) - else: - net = net.to(device) - - # torch.save({'iters': 0, - # 'net_state_dict': net.state_dict()}, - # f'feat_net.ckpt') - - # convert model to onnx version - # net.eval() - # onnx_model_path = resume_path.replace('ckpt', 'onnx') - # dummy_input = torch.randn(1, 3, 112, 112).to(device) - # torch.onnx.export(net, dummy_input, onnx_model_path, verbose=True, input_names=["input"], output_names=["output"]) - - return net.eval(), device - - -def get_feature(face_image, net, device): - """ extract the feature vector from the input image """ - transform = transforms.Compose([ - transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] - transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] - ]) - face_img = transform(face_image).to(device) - feature_vec = net(face_img.unsqueeze(0)).data.cpu().numpy() - return feature_vec - - -def get_feature_int8(face_image): - image_mean = np.array([127.5, 127.5, 127.5]) - img = (face_image - image_mean) / 127.5 - img = np.transpose(img.astype(np.float32), [2, 0, 1]) - - img = np.expand_dims(img, axis=0) - - # add all intermediate outputs to onnx net - onnx_path = os.path.join(os.path.dirname(__file__), 'face_feature/checkpoints/feature.onnx') - if not os.path.exists(os.path.dirname(onnx_path)): - os.makedirs(os.path.dirname(onnx_path)) - - if not os.path.exists(onnx_path): - download_checkpoint(remote_url=feature_onnx_url, local_path=onnx_path) - - ort_session = ort.InferenceSession(onnx_path) - org_outputs = [x.name for x in ort_session.get_outputs()] - - model = onnx.load(onnx_path) - for node in model.graph.node: - for output in node.output: - if output not in org_outputs: - model.graph.output.extend([onnx.ValueInfoProto(name=output)]) - - # execute onnx model - ort_session = ort.InferenceSession(model.SerializeToString()) - outputs = [x.name for x in ort_session.get_outputs()] - ort_outs = ort_session.run(outputs, {"input": img}) - ort_outs = OrderedDict(zip(outputs, ort_outs)) - # _, feat_int8 = ort_session.run(None, {"input": img}) - # return feat_int8.flatten() - return ort_outs['output'].flatten() - - -def match_feature(feature1, feature2): - - mu = np.mean(np.concatenate((feature1, feature2), axis=0)) - feature1 = feature1 - mu - feature2 = feature2 - mu - - feature1 = feature1 / np.expand_dims(np.sqrt(np.sum(np.power(feature1, 2), 1)), 1) - feature2 = feature2 / np.expand_dims(np.sqrt(np.sum(np.power(feature2, 2), 1)), 1) - - score = np.sum(np.multiply(feature1, feature2), 1) - return score - - -def get_feature_from_origin(image, feature_type): - net, device = load_model('Res50', 512, '0') - sys.exit(1) - face_image = align(image, output_size=(112, 112)) - - if feature_type == 0: - feat_vec = get_feature(face_image, net, device) - elif feature_type == 1: - feat_vec = get_feature_int8(face_image) - else: - feat_vec = None - - return feat_vec - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Infer the model for face recognition') - parser.add_argument('--backbone_net', type=str, default='Res50', help='MobileFace, Res50, CBAM_50, CBAM_50_SE, CBAM_100, CBAM_100_SE') - parser.add_argument('--output_name', type=str, default='output', help='intermediate layer name of onnx model') - parser.add_argument('--feature_dim', type=int, default=512, help='feature dimension') - parser.add_argument('--feature_type', type=int, default=0, help='feature type - {0: float32, 1: int8}') - parser.add_argument('--resume', type=str, default='./face_feature/checkpoints/feature.pth', - help='The path pf save checkpoints') - parser.add_argument('--image', type=str, default='3.jpeg', help='source image file') - parser.add_argument('--gpus', type=str, default='0', help='gpu list') - args = parser.parse_args() - - image = cv2.imread(args.image) - print(get_feature_from_origin(image, args.feature_type)) diff --git a/face_recognition1/landmark_api.py b/face_recognition1/landmark_api.py deleted file mode 100644 index 0f50d53eb52d4dce78d1b735cdeca34af6e258a9..0000000000000000000000000000000000000000 --- a/face_recognition1/landmark_api.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -@author: MingDong -@file: landmark_api.py -@desc: add the inference module for face landmark extraction -""" -import os -import sys -sys.path.append(os.path.dirname( __file__)) - -import numpy as np -import torch -import cv2 -from face_landmark.facelandmark import FaceLandmark -from face_utils.utility import download_checkpoint - -# create face landmark extractor and initialize it - -landmark_path = os.path.join(os.path.dirname(__file__), 'face_landmark/facelandmark.tar') - -model_landmark = FaceLandmark() -model_landmark.load_state_dict(torch.load(landmark_path, map_location='cpu')['state_dict']) -model_landmark.eval() - - -def get_face_landmark(gray_image, bounding_box): - """ - Description: - get face landmark in gray image with face rect - - Args: - gray_image:input gray image - bounding_box:face rect - """ - - image = gray_image - box = bounding_box - - nHeight, nWidth = image.shape - - rLeftMargin = 0.05 - rTopMargin = 0.00 - rRightMargin = 0.05 - rBottomMargin = 0.10 - - # rW = box[2] - box[0] - # rH = box[3] - box[1] - rW = box[2] - rH = box[3] - - rX = box[0] - rY = box[1] - - #get image range to get face landmark from face rect - iExFaceX = int(rX - rLeftMargin * rW) - iExFaceY = int(rY - rTopMargin * rH) - iExFaceW = int((1 + (rLeftMargin + rRightMargin)) * rW) - iExFaceH = int((1 + (rTopMargin + rBottomMargin)) * rH) - - iExFaceX = np.clip(iExFaceX, 0, nWidth - 1) - iExFaceY = np.clip(iExFaceY, 0, nHeight - 1) - iExFaceW = np.clip(iExFaceX + iExFaceW, 0, nWidth - 1) - iExFaceX - iExFaceH = np.clip(iExFaceY + iExFaceH, 0, nHeight - 1) - iExFaceY - - #crop face image in range to face landmark - image = image[iExFaceY:iExFaceY+iExFaceH, iExFaceX:iExFaceX+iExFaceW] - #normalize croped face image - image = cv2.resize(image, (64, 64), cv2.INTER_LINEAR) - image = image / 256 - image = torch.from_numpy(image.astype(np.float32)) - #convert mask_align_image from type [n,n] to [1,1,n,n] - image = image.unsqueeze(0).unsqueeze(0) - - #get landmark fron croped face image - landmark = model_landmark(image) - #reshape face landmark and convert to image coordinates - landmark = landmark.reshape(68, 2) - landmark[:,0] = landmark[:,0] * iExFaceW + iExFaceX - landmark[:,1] = landmark[:,1] * iExFaceH + iExFaceY - - landmark = landmark.reshape(-1) - - return landmark - - -if __name__ == '__main__': - dummy_input = torch.randn(1, 1, 64, 64) - torch.onnx.export(model_landmark, dummy_input, "landmark.onnx", verbose=True, input_names=['input'], output_names=['output']) diff --git a/face_recognition1/run.py b/face_recognition1/run.py deleted file mode 100644 index 908c76ca7a9f45b5c44df7ae97137c2b2f49ac50..0000000000000000000000000000000000000000 --- a/face_recognition1/run.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -@file: run.py -@desc: expose the rest api using flask -""" - -import sys -import cv2 -import numpy as np -from feature_api import load_model, align, get_feature, match_feature - -feature_extractor, device = load_model('Res50', 512, '0') - -def match_image(src_numpy, dst_numpy): - face_image = align(src_numpy, output_size=(112, 112)) - src_feat = get_feature(face_image, feature_extractor, device) - - face_image = align(dst_numpy, output_size=(112, 112)) - dst_feat = get_feature(face_image, feature_extractor, device) - - output = match_feature(src_feat, dst_feat) - sim = 0 if output[0] < 0 else output[0] - - print("Matching Score: ", sim) - - if sim < 0.61: - result = "Different Person" - else: - result = "Same Person" - - - response = { - "confidence": sim, - "threshold": 0.61, - "result": result - } - return response - - -if __name__ == '__main__': - src_file = '../images/11.jpg' - dst_file = '../images/4.jpg' - - src_img = cv2.imread(src_file) - dst_img = cv2.imread(dst_file) - - print(match_image(src_img, dst_img))