model wo checkpoint
Browse files- nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/dataset_fingerprint.json +0 -0
- nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/debug.json +53 -0
- nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/progress.png +0 -0
- nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/training_log_2024_8_29_15_59_41.txt +0 -0
- nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/training_log_2024_8_29_23_59_06.txt +0 -0
- nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/training_log_2024_8_30_07_58_08.txt +1432 -0
- nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/plans.json +356 -0
nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/dataset_fingerprint.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/debug.json
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_best_ema": "0.8639167944091766",
|
3 |
+
"batch_size": "2",
|
4 |
+
"configuration_manager": "{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [128, 128, 128], 'median_image_size_in_voxels': [235.0, 240.0, 240.0], 'spacing': [1.5, 1.5, 1.5], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'architecture': {'network_class_name': 'dynamic_network_architectures.architectures.unet.ResidualEncoderUNet', 'arch_kwargs': {'n_stages': 6, 'features_per_stage': [32, 64, 128, 256, 320, 320], 'conv_op': 'torch.nn.modules.conv.Conv3d', 'kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'strides': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]], 'n_blocks_per_stage': [1, 3, 4, 6, 6, 6], 'n_conv_per_stage_decoder': [1, 1, 1, 1, 1], 'conv_bias': True, 'norm_op': 'torch.nn.modules.instancenorm.InstanceNorm3d', 'norm_op_kwargs': {'eps': 1e-05, 'affine': True}, 'dropout_op': None, 'dropout_op_kwargs': None, 'nonlin': 'torch.nn.LeakyReLU', 'nonlin_kwargs': {'inplace': True}, 'deep_supervision': True}, '_kw_requires_import': ['conv_op', 'norm_op', 'dropout_op', 'nonlin']}, 'batch_dice': False}",
|
5 |
+
"configuration_name": "3d_fullres",
|
6 |
+
"cudnn_version": 8902,
|
7 |
+
"current_epoch": "800",
|
8 |
+
"dataloader_train": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fe33e837130>",
|
9 |
+
"dataloader_train.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fe33e8373d0>",
|
10 |
+
"dataloader_train.num_processes": "16",
|
11 |
+
"dataloader_train.transform": "Compose ( [SpatialTransform( independent_scale_for_each_axis = False, p_rot_per_sample = 0.2, p_scale_per_sample = 0.2, p_el_per_sample = 0, data_key = 'data', label_key = 'seg', patch_size = [128, 128, 128], patch_center_dist_from_border = None, do_elastic_deform = False, alpha = (0, 0), sigma = (0, 0), do_rotation = True, angle_x = (-0.5235987755982988, 0.5235987755982988), angle_y = (-0.5235987755982988, 0.5235987755982988), angle_z = (-0.5235987755982988, 0.5235987755982988), do_scale = True, scale = (0.7, 1.4), border_mode_data = 'constant', border_cval_data = 0, order_data = 3, border_mode_seg = 'constant', border_cval_seg = -1, order_seg = 1, random_crop = False, p_rot_per_axis = 1, p_independent_scale_per_axis = 1 ), GaussianNoiseTransform( p_per_sample = 0.1, data_key = 'data', noise_variance = (0, 0.1), p_per_channel = 1, per_channel = False ), GaussianBlurTransform( p_per_sample = 0.2, different_sigma_per_channel = True, p_per_channel = 0.5, data_key = 'data', blur_sigma = (0.5, 1.0), different_sigma_per_axis = False, p_isotropic = 0 ), BrightnessMultiplicativeTransform( p_per_sample = 0.15, data_key = 'data', multiplier_range = (0.75, 1.25), per_channel = True ), ContrastAugmentationTransform( p_per_sample = 0.15, data_key = 'data', contrast_range = (0.75, 1.25), preserve_range = True, per_channel = True, p_per_channel = 1 ), SimulateLowResolutionTransform( order_upsample = 3, order_downsample = 0, channels = None, per_channel = True, p_per_channel = 0.5, p_per_sample = 0.25, data_key = 'data', zoom_range = (0.5, 1), ignore_axes = None ), GammaTransform( p_per_sample = 0.1, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = True ), GammaTransform( p_per_sample = 0.3, retain_stats = True, per_channel = True, data_key = 'data', gamma_range = (0.7, 1.5), invert_image = False ), MirrorTransform( p_per_sample = 1, data_key = 'data', label_key = 'seg', axes = (0, 1, 2) ), RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
|
12 |
+
"dataloader_val": "<nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter.LimitedLenWrapper object at 0x7fe33e836fb0>",
|
13 |
+
"dataloader_val.generator": "<nnunetv2.training.dataloading.data_loader_3d.nnUNetDataLoader3D object at 0x7fe33e837340>",
|
14 |
+
"dataloader_val.num_processes": "8",
|
15 |
+
"dataloader_val.transform": "Compose ( [RemoveLabelTransform( output_key = 'seg', input_key = 'seg', replace_with = 0, remove_label = -1 ), RenameTransform( delete_old = True, out_key = 'target', in_key = 'seg' ), DownsampleSegForDSTransform2( axes = None, output_key = 'target', input_key = 'target', order = 0, ds_scales = [[1.0, 1.0, 1.0], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.0625, 0.0625, 0.0625]] ), NumpyToTensor( keys = ['data', 'target'], cast_to = 'float' )] )",
|
16 |
+
"dataset_json": "{'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'T1': 1, 'T2': 2, 'T3': 3, 'T4': 4, 'T5': 5, 'T6': 6, 'T7': 7, 'T8': 8, 'T9': 9, 'T10': 10, 'T11': 11, 'T12': 12, 'L1': 13, 'L2': 14, 'L3': 15, 'L4': 16, 'L5': 17, 'L6': 18, 'sacrum': 19, 'coccyx': 20, 'T13': 21}, 'numTraining': 1216, 'file_ending': '.nii.gz', 'dataset_name': 'VertebralBodies Thorax/Abdomen/Sacrum', 'reference': 'https://huggingface.co/datasets/fhofmann/VertebralBodiesCT-Labels', 'release': '2024-08-29', 'license': 'CC BY-SA 4.0', 'description': '1216 CT scans, with segmentations of the vertebral bodies in the thorax, abdomen, and sacrum. CT scans and initial labels derive from the VerSe (https://github.com/anjany/verse, license CC BY-SA 4.0) and TotalSegmentator (license CC BY 4.0 DEED) dataset. Files were processed, and labels were adapted as described in the README.md. A validation set is available separately.'}",
|
17 |
+
"device": "cuda:0",
|
18 |
+
"disable_checkpointing": "False",
|
19 |
+
"enable_deep_supervision": "True",
|
20 |
+
"fold": "all",
|
21 |
+
"folder_with_segs_from_previous_stage": "None",
|
22 |
+
"gpu_name": "NVIDIA A100-SXM4-40GB",
|
23 |
+
"grad_scaler": "<torch.cuda.amp.grad_scaler.GradScaler object at 0x7fe27970c0a0>",
|
24 |
+
"hostname": "srvdrai1",
|
25 |
+
"inference_allowed_mirroring_axes": "(0, 1, 2)",
|
26 |
+
"initial_lr": "0.01",
|
27 |
+
"is_cascaded": "False",
|
28 |
+
"is_ddp": "False",
|
29 |
+
"label_manager": "<nnunetv2.utilities.label_handling.label_handling.LabelManager object at 0x7fe27970c250>",
|
30 |
+
"local_rank": "0",
|
31 |
+
"log_file": "/workspace/data/nnUNet_results/Dataset601_VertebralBodies/nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/training_log_2024_8_30_07_58_08.txt",
|
32 |
+
"logger": "<nnunetv2.training.logging.nnunet_logger.nnUNetLogger object at 0x7fe27970c100>",
|
33 |
+
"loss": "DeepSupervisionWrapper(\n (loss): DC_and_CE_loss(\n (ce): RobustCrossEntropyLoss()\n (dc): MemoryEfficientSoftDiceLoss()\n )\n)",
|
34 |
+
"lr_scheduler": "<nnunetv2.training.lr_scheduler.polylr.PolyLRScheduler object at 0x7fe27a935090>",
|
35 |
+
"my_init_kwargs": "{'plans': {'dataset_name': 'Dataset601_VertebralBodies', 'plans_name': 'nnUNetResEncUNetMPlans', 'original_median_spacing_after_transp': [1.5, 1.5, 1.5], 'original_median_shape_after_transp': [239, 252, 252], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 48, 'patch_size': [256, 256], 'median_image_size_in_voxels': [240.0, 240.0], 'spacing': [1.5, 1.5], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'architecture': {'network_class_name': 'dynamic_network_architectures.architectures.unet.ResidualEncoderUNet', 'arch_kwargs': {'n_stages': 7, 'features_per_stage': [32, 64, 128, 256, 512, 512, 512], 'conv_op': 'torch.nn.modules.conv.Conv2d', 'kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'strides': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'n_blocks_per_stage': [1, 3, 4, 6, 6, 6, 6], 'n_conv_per_stage_decoder': [1, 1, 1, 1, 1, 1], 'conv_bias': True, 'norm_op': 'torch.nn.modules.instancenorm.InstanceNorm2d', 'norm_op_kwargs': {'eps': 1e-05, 'affine': True}, 'dropout_op': None, 'dropout_op_kwargs': None, 'nonlin': 'torch.nn.LeakyReLU', 'nonlin_kwargs': {'inplace': True}}, '_kw_requires_import': ['conv_op', 'norm_op', 'dropout_op', 'nonlin']}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [128, 128, 128], 'median_image_size_in_voxels': [235.0, 240.0, 240.0], 'spacing': [1.5, 1.5, 1.5], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'architecture': {'network_class_name': 'dynamic_network_architectures.architectures.unet.ResidualEncoderUNet', 'arch_kwargs': {'n_stages': 6, 'features_per_stage': [32, 64, 128, 256, 320, 320], 'conv_op': 'torch.nn.modules.conv.Conv3d', 'kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'strides': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]], 'n_blocks_per_stage': [1, 3, 4, 6, 6, 6], 'n_conv_per_stage_decoder': [1, 1, 1, 1, 1], 'conv_bias': True, 'norm_op': 'torch.nn.modules.instancenorm.InstanceNorm3d', 'norm_op_kwargs': {'eps': 1e-05, 'affine': True}, 'dropout_op': None, 'dropout_op_kwargs': None, 'nonlin': 'torch.nn.LeakyReLU', 'nonlin_kwargs': {'inplace': True}}, '_kw_requires_import': ['conv_op', 'norm_op', 'dropout_op', 'nonlin']}, 'batch_dice': False}}, 'experiment_planner_used': 'nnUNetPlannerResEncM', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 10076.0, 'mean': 251.18613451837237, 'median': 216.0, 'min': -2048.0, 'percentile_00_5': -158.0, 'percentile_99_5': 1100.0, 'std': 205.1041738973246}}}, 'configuration': '3d_fullres', 'fold': 'all', 'dataset_json': {'channel_names': {'0': 'CT'}, 'labels': {'background': 0, 'T1': 1, 'T2': 2, 'T3': 3, 'T4': 4, 'T5': 5, 'T6': 6, 'T7': 7, 'T8': 8, 'T9': 9, 'T10': 10, 'T11': 11, 'T12': 12, 'L1': 13, 'L2': 14, 'L3': 15, 'L4': 16, 'L5': 17, 'L6': 18, 'sacrum': 19, 'coccyx': 20, 'T13': 21}, 'numTraining': 1216, 'file_ending': '.nii.gz', 'dataset_name': 'VertebralBodies Thorax/Abdomen/Sacrum', 'reference': 'https://huggingface.co/datasets/fhofmann/VertebralBodiesCT-Labels', 'release': '2024-08-29', 'license': 'CC BY-SA 4.0', 'description': '1216 CT scans, with segmentations of the vertebral bodies in the thorax, abdomen, and sacrum. CT scans and initial labels derive from the VerSe (https://github.com/anjany/verse, license CC BY-SA 4.0) and TotalSegmentator (license CC BY 4.0 DEED) dataset. Files were processed, and labels were adapted as described in the README.md. A validation set is available separately.'}, 'unpack_dataset': False, 'device': device(type='cuda')}",
|
36 |
+
"network": "OptimizedModule",
|
37 |
+
"num_epochs": "1000",
|
38 |
+
"num_input_channels": "1",
|
39 |
+
"num_iterations_per_epoch": "250",
|
40 |
+
"num_val_iterations_per_epoch": "50",
|
41 |
+
"optimizer": "SGD (\nParameter Group 0\n dampening: 0\n differentiable: False\n foreach: None\n fused: None\n initial_lr: 0.01\n lr: 0.0023598068186038313\n maximize: False\n momentum: 0.99\n nesterov: True\n weight_decay: 3e-05\n)",
|
42 |
+
"output_folder": "/workspace/data/nnUNet_results/Dataset601_VertebralBodies/nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all",
|
43 |
+
"output_folder_base": "/workspace/data/nnUNet_results/Dataset601_VertebralBodies/nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres",
|
44 |
+
"oversample_foreground_percent": "0.33",
|
45 |
+
"plans_manager": "{'dataset_name': 'Dataset601_VertebralBodies', 'plans_name': 'nnUNetResEncUNetMPlans', 'original_median_spacing_after_transp': [1.5, 1.5, 1.5], 'original_median_shape_after_transp': [239, 252, 252], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'configurations': {'2d': {'data_identifier': 'nnUNetPlans_2d', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 48, 'patch_size': [256, 256], 'median_image_size_in_voxels': [240.0, 240.0], 'spacing': [1.5, 1.5], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'architecture': {'network_class_name': 'dynamic_network_architectures.architectures.unet.ResidualEncoderUNet', 'arch_kwargs': {'n_stages': 7, 'features_per_stage': [32, 64, 128, 256, 512, 512, 512], 'conv_op': 'torch.nn.modules.conv.Conv2d', 'kernel_sizes': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], 'strides': [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]], 'n_blocks_per_stage': [1, 3, 4, 6, 6, 6, 6], 'n_conv_per_stage_decoder': [1, 1, 1, 1, 1, 1], 'conv_bias': True, 'norm_op': 'torch.nn.modules.instancenorm.InstanceNorm2d', 'norm_op_kwargs': {'eps': 1e-05, 'affine': True}, 'dropout_op': None, 'dropout_op_kwargs': None, 'nonlin': 'torch.nn.LeakyReLU', 'nonlin_kwargs': {'inplace': True}}, '_kw_requires_import': ['conv_op', 'norm_op', 'dropout_op', 'nonlin']}, 'batch_dice': True}, '3d_fullres': {'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [128, 128, 128], 'median_image_size_in_voxels': [235.0, 240.0, 240.0], 'spacing': [1.5, 1.5, 1.5], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'architecture': {'network_class_name': 'dynamic_network_architectures.architectures.unet.ResidualEncoderUNet', 'arch_kwargs': {'n_stages': 6, 'features_per_stage': [32, 64, 128, 256, 320, 320], 'conv_op': 'torch.nn.modules.conv.Conv3d', 'kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'strides': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]], 'n_blocks_per_stage': [1, 3, 4, 6, 6, 6], 'n_conv_per_stage_decoder': [1, 1, 1, 1, 1], 'conv_bias': True, 'norm_op': 'torch.nn.modules.instancenorm.InstanceNorm3d', 'norm_op_kwargs': {'eps': 1e-05, 'affine': True}, 'dropout_op': None, 'dropout_op_kwargs': None, 'nonlin': 'torch.nn.LeakyReLU', 'nonlin_kwargs': {'inplace': True}}, '_kw_requires_import': ['conv_op', 'norm_op', 'dropout_op', 'nonlin']}, 'batch_dice': False}}, 'experiment_planner_used': 'nnUNetPlannerResEncM', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 10076.0, 'mean': 251.18613451837237, 'median': 216.0, 'min': -2048.0, 'percentile_00_5': -158.0, 'percentile_99_5': 1100.0, 'std': 205.1041738973246}}}",
|
46 |
+
"preprocessed_dataset_folder": "/workspace/data2/nnUNet_preprocessed/Dataset601_VertebralBodies/nnUNetPlans_3d_fullres",
|
47 |
+
"preprocessed_dataset_folder_base": "/workspace/data2/nnUNet_preprocessed/Dataset601_VertebralBodies",
|
48 |
+
"save_every": "50",
|
49 |
+
"torch_version": "2.3.0+cu121",
|
50 |
+
"unpack_dataset": "False",
|
51 |
+
"was_initialized": "True",
|
52 |
+
"weight_decay": "3e-05"
|
53 |
+
}
|
nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/progress.png
ADDED
nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/training_log_2024_8_29_15_59_41.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/training_log_2024_8_29_23_59_06.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/fold_all/logs/training_log_2024_8_30_07_58_08.txt
ADDED
@@ -0,0 +1,1432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#######################################################################
|
3 |
+
Please cite the following paper when using nnU-Net:
|
4 |
+
Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nature methods, 18(2), 203-211.
|
5 |
+
#######################################################################
|
6 |
+
|
7 |
+
2024-08-30 07:58:11.563206: Using torch.compile...
|
8 |
+
2024-08-30 07:58:18.089942: do_dummy_2d_data_aug: False
|
9 |
+
|
10 |
+
This is the configuration used by this training:
|
11 |
+
Configuration name: 3d_fullres
|
12 |
+
{'data_identifier': 'nnUNetPlans_3d_fullres', 'preprocessor_name': 'DefaultPreprocessor', 'batch_size': 2, 'patch_size': [128, 128, 128], 'median_image_size_in_voxels': [235.0, 240.0, 240.0], 'spacing': [1.5, 1.5, 1.5], 'normalization_schemes': ['CTNormalization'], 'use_mask_for_norm': [False], 'resampling_fn_data': 'resample_data_or_seg_to_shape', 'resampling_fn_seg': 'resample_data_or_seg_to_shape', 'resampling_fn_data_kwargs': {'is_seg': False, 'order': 3, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_seg_kwargs': {'is_seg': True, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'resampling_fn_probabilities': 'resample_data_or_seg_to_shape', 'resampling_fn_probabilities_kwargs': {'is_seg': False, 'order': 1, 'order_z': 0, 'force_separate_z': None}, 'architecture': {'network_class_name': 'dynamic_network_architectures.architectures.unet.ResidualEncoderUNet', 'arch_kwargs': {'n_stages': 6, 'features_per_stage': [32, 64, 128, 256, 320, 320], 'conv_op': 'torch.nn.modules.conv.Conv3d', 'kernel_sizes': [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]], 'strides': [[1, 1, 1], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]], 'n_blocks_per_stage': [1, 3, 4, 6, 6, 6], 'n_conv_per_stage_decoder': [1, 1, 1, 1, 1], 'conv_bias': True, 'norm_op': 'torch.nn.modules.instancenorm.InstanceNorm3d', 'norm_op_kwargs': {'eps': 1e-05, 'affine': True}, 'dropout_op': None, 'dropout_op_kwargs': None, 'nonlin': 'torch.nn.LeakyReLU', 'nonlin_kwargs': {'inplace': True}, 'deep_supervision': True}, '_kw_requires_import': ['conv_op', 'norm_op', 'dropout_op', 'nonlin']}, 'batch_dice': False}
|
13 |
+
|
14 |
+
These are the global plan.json settings:
|
15 |
+
{'dataset_name': 'Dataset601_VertebralBodies', 'plans_name': 'nnUNetResEncUNetMPlans', 'original_median_spacing_after_transp': [1.5, 1.5, 1.5], 'original_median_shape_after_transp': [239, 252, 252], 'image_reader_writer': 'SimpleITKIO', 'transpose_forward': [0, 1, 2], 'transpose_backward': [0, 1, 2], 'experiment_planner_used': 'nnUNetPlannerResEncM', 'label_manager': 'LabelManager', 'foreground_intensity_properties_per_channel': {'0': {'max': 10076.0, 'mean': 251.18613451837237, 'median': 216.0, 'min': -2048.0, 'percentile_00_5': -158.0, 'percentile_99_5': 1100.0, 'std': 205.1041738973246}}}
|
16 |
+
|
17 |
+
2024-08-30 07:58:21.674600: Unable to plot network architecture: nnUNet_compile is enabled!
|
18 |
+
2024-08-30 07:58:22.131281:
|
19 |
+
2024-08-30 07:58:22.132290: Epoch 800
|
20 |
+
2024-08-30 07:58:22.133175: Current learning rate: 0.00235
|
21 |
+
2024-08-30 08:03:45.428643: train_loss -0.8298
|
22 |
+
2024-08-30 08:03:45.430047: val_loss -0.8349
|
23 |
+
2024-08-30 08:03:45.431003: Pseudo dice [0.9243, 0.9449, 0.9335, 0.8995, 0.8846, 0.9162, 0.9295, 0.9348, 0.9324, 0.913, 0.8889, 0.8969, 0.9247, 0.9191, 0.9173, 0.8506, 0.7589, 0.0, 0.9421, nan, 0.0]
|
24 |
+
2024-08-30 08:03:45.431701: Epoch time: 323.3 s
|
25 |
+
2024-08-30 08:03:46.966835:
|
26 |
+
2024-08-30 08:03:46.967671: Epoch 801
|
27 |
+
2024-08-30 08:03:46.968291: Current learning rate: 0.00234
|
28 |
+
2024-08-30 08:04:38.504457: train_loss -0.8275
|
29 |
+
2024-08-30 08:04:38.505691: val_loss -0.841
|
30 |
+
2024-08-30 08:04:38.506451: Pseudo dice [0.898, 0.9391, 0.937, 0.8957, 0.8836, 0.8873, 0.8928, 0.9155, 0.9086, 0.9039, 0.8952, 0.9038, 0.9531, 0.9499, 0.9427, 0.9347, 0.8904, 0.0, 0.9463, nan, 0.0]
|
31 |
+
2024-08-30 08:04:38.507064: Epoch time: 51.54 s
|
32 |
+
2024-08-30 08:04:39.909204:
|
33 |
+
2024-08-30 08:04:39.909985: Epoch 802
|
34 |
+
2024-08-30 08:04:39.910569: Current learning rate: 0.00233
|
35 |
+
2024-08-30 08:05:32.788320: train_loss -0.8287
|
36 |
+
2024-08-30 08:05:32.791261: val_loss -0.8546
|
37 |
+
2024-08-30 08:05:32.792677: Pseudo dice [0.9208, 0.946, 0.9223, 0.9167, 0.9083, 0.9257, 0.9309, 0.9319, 0.9459, 0.9684, 0.9716, 0.9611, 0.9578, 0.9272, 0.9065, 0.8822, 0.8567, 0.0, 0.9434, nan, nan]
|
38 |
+
2024-08-30 08:05:32.793562: Epoch time: 52.88 s
|
39 |
+
2024-08-30 08:05:34.988766:
|
40 |
+
2024-08-30 08:05:34.989611: Epoch 803
|
41 |
+
2024-08-30 08:05:34.990308: Current learning rate: 0.00232
|
42 |
+
2024-08-30 08:06:26.532500: train_loss -0.8275
|
43 |
+
2024-08-30 08:06:26.533911: val_loss -0.8135
|
44 |
+
2024-08-30 08:06:26.535242: Pseudo dice [0.8438, 0.8301, 0.7534, 0.7937, 0.832, 0.7874, 0.7203, 0.7308, 0.7882, 0.8375, 0.8646, 0.8654, 0.8509, 0.8684, 0.8871, 0.9275, 0.9166, 0.0, 0.9492, nan, nan]
|
45 |
+
2024-08-30 08:06:26.536349: Epoch time: 51.55 s
|
46 |
+
2024-08-30 08:06:28.116220:
|
47 |
+
2024-08-30 08:06:28.116964: Epoch 804
|
48 |
+
2024-08-30 08:06:28.117596: Current learning rate: 0.00231
|
49 |
+
2024-08-30 08:07:22.739134: train_loss -0.826
|
50 |
+
2024-08-30 08:07:22.741840: val_loss -0.8538
|
51 |
+
2024-08-30 08:07:22.742763: Pseudo dice [0.9266, 0.9505, 0.9481, 0.9558, 0.9594, 0.9632, 0.9063, 0.9122, 0.9563, 0.9658, 0.9565, 0.9547, 0.9174, 0.8873, 0.9158, 0.9345, 0.8662, 0.0, 0.9411, nan, nan]
|
52 |
+
2024-08-30 08:07:22.743593: Epoch time: 54.63 s
|
53 |
+
2024-08-30 08:07:24.305591:
|
54 |
+
2024-08-30 08:07:24.306453: Epoch 805
|
55 |
+
2024-08-30 08:07:24.307409: Current learning rate: 0.0023
|
56 |
+
2024-08-30 08:08:16.513894: train_loss -0.8196
|
57 |
+
2024-08-30 08:08:16.515238: val_loss -0.8443
|
58 |
+
2024-08-30 08:08:16.516107: Pseudo dice [0.9205, 0.9388, 0.943, 0.9425, 0.9243, 0.9212, 0.9322, 0.9344, 0.9331, 0.9398, 0.9214, 0.9125, 0.9076, 0.8794, 0.8772, 0.8927, 0.843, 0.0, 0.9436, nan, nan]
|
59 |
+
2024-08-30 08:08:16.516857: Epoch time: 52.21 s
|
60 |
+
2024-08-30 08:08:17.939694:
|
61 |
+
2024-08-30 08:08:17.940573: Epoch 806
|
62 |
+
2024-08-30 08:08:17.941231: Current learning rate: 0.00229
|
63 |
+
2024-08-30 08:09:10.630731: train_loss -0.8309
|
64 |
+
2024-08-30 08:09:10.632896: val_loss -0.8427
|
65 |
+
2024-08-30 08:09:10.634222: Pseudo dice [0.921, 0.9139, 0.8767, 0.8661, 0.8639, 0.8846, 0.8672, 0.8613, 0.8769, 0.8845, 0.9237, 0.95, 0.9552, 0.9689, 0.9725, 0.9647, 0.9313, 0.0, 0.935, nan, nan]
|
66 |
+
2024-08-30 08:09:10.635642: Epoch time: 52.69 s
|
67 |
+
2024-08-30 08:09:12.215455:
|
68 |
+
2024-08-30 08:09:12.216362: Epoch 807
|
69 |
+
2024-08-30 08:09:12.216984: Current learning rate: 0.00228
|
70 |
+
2024-08-30 08:10:03.502030: train_loss -0.8369
|
71 |
+
2024-08-30 08:10:03.503799: val_loss -0.8569
|
72 |
+
2024-08-30 08:10:03.504809: Pseudo dice [0.919, 0.9469, 0.933, 0.9408, 0.9519, 0.9526, 0.9455, 0.9432, 0.9465, 0.9597, 0.9574, 0.9595, 0.963, 0.952, 0.9375, 0.9212, 0.8982, 0.0, 0.9518, nan, nan]
|
73 |
+
2024-08-30 08:10:03.505649: Epoch time: 51.29 s
|
74 |
+
2024-08-30 08:10:04.943223:
|
75 |
+
2024-08-30 08:10:04.944008: Epoch 808
|
76 |
+
2024-08-30 08:10:04.944632: Current learning rate: 0.00226
|
77 |
+
2024-08-30 08:10:55.502361: train_loss -0.8244
|
78 |
+
2024-08-30 08:10:55.504170: val_loss -0.8443
|
79 |
+
2024-08-30 08:10:55.504948: Pseudo dice [0.9041, 0.914, 0.9259, 0.9342, 0.9182, 0.9123, 0.9433, 0.9522, 0.9473, 0.9455, 0.9306, 0.9368, 0.9317, 0.9557, 0.9111, 0.8272, 0.7577, 0.0, 0.945, nan, 0.0]
|
80 |
+
2024-08-30 08:10:55.505560: Epoch time: 50.56 s
|
81 |
+
2024-08-30 08:10:56.940577:
|
82 |
+
2024-08-30 08:10:56.941448: Epoch 809
|
83 |
+
2024-08-30 08:10:56.942101: Current learning rate: 0.00225
|
84 |
+
2024-08-30 08:11:49.197581: train_loss -0.8294
|
85 |
+
2024-08-30 08:11:49.199519: val_loss -0.8529
|
86 |
+
2024-08-30 08:11:49.201134: Pseudo dice [0.9261, 0.9319, 0.933, 0.9313, 0.9415, 0.9552, 0.9307, 0.8648, 0.8206, 0.8718, 0.9199, 0.9415, 0.9534, 0.9488, 0.9468, 0.9375, 0.9104, 0.0, 0.9443, nan, 0.0]
|
87 |
+
2024-08-30 08:11:49.203180: Epoch time: 52.26 s
|
88 |
+
2024-08-30 08:11:50.628483:
|
89 |
+
2024-08-30 08:11:50.629328: Epoch 810
|
90 |
+
2024-08-30 08:11:50.629950: Current learning rate: 0.00224
|
91 |
+
2024-08-30 08:12:44.200685: train_loss -0.8284
|
92 |
+
2024-08-30 08:12:44.202734: val_loss -0.8515
|
93 |
+
2024-08-30 08:12:44.203762: Pseudo dice [0.9285, 0.9179, 0.9271, 0.9394, 0.8995, 0.9022, 0.9364, 0.912, 0.8835, 0.9053, 0.9436, 0.9638, 0.9683, 0.9266, 0.8912, 0.9094, 0.8941, 0.0, 0.9413, nan, nan]
|
94 |
+
2024-08-30 08:12:44.204494: Epoch time: 53.58 s
|
95 |
+
2024-08-30 08:12:45.877465:
|
96 |
+
2024-08-30 08:12:45.878678: Epoch 811
|
97 |
+
2024-08-30 08:12:45.879470: Current learning rate: 0.00223
|
98 |
+
2024-08-30 08:13:35.151080: train_loss -0.818
|
99 |
+
2024-08-30 08:13:35.152261: val_loss -0.8408
|
100 |
+
2024-08-30 08:13:35.153176: Pseudo dice [0.9395, 0.924, 0.8925, 0.8974, 0.8862, 0.8433, 0.8485, 0.8944, 0.8938, 0.8996, 0.9513, 0.9481, 0.9603, 0.9192, 0.8756, 0.8428, 0.8499, nan, 0.945, nan, nan]
|
101 |
+
2024-08-30 08:13:35.153873: Epoch time: 49.28 s
|
102 |
+
2024-08-30 08:13:36.626055:
|
103 |
+
2024-08-30 08:13:36.626868: Epoch 812
|
104 |
+
2024-08-30 08:13:36.627525: Current learning rate: 0.00222
|
105 |
+
2024-08-30 08:14:25.278524: train_loss -0.8262
|
106 |
+
2024-08-30 08:14:25.281066: val_loss -0.841
|
107 |
+
2024-08-30 08:14:25.282288: Pseudo dice [0.9162, 0.9452, 0.956, 0.9518, 0.9345, 0.912, 0.9154, 0.9162, 0.8872, 0.874, 0.8917, 0.8981, 0.9605, 0.9475, 0.9276, 0.877, 0.7966, 0.0, 0.942, nan, 0.0]
|
108 |
+
2024-08-30 08:14:25.283130: Epoch time: 48.66 s
|
109 |
+
2024-08-30 08:14:26.765371:
|
110 |
+
2024-08-30 08:14:26.766247: Epoch 813
|
111 |
+
2024-08-30 08:14:26.766987: Current learning rate: 0.00221
|
112 |
+
2024-08-30 08:15:17.590965: train_loss -0.8314
|
113 |
+
2024-08-30 08:15:17.593155: val_loss -0.8355
|
114 |
+
2024-08-30 08:15:17.594711: Pseudo dice [0.9184, 0.9248, 0.9114, 0.9097, 0.8947, 0.8811, 0.8787, 0.8975, 0.8757, 0.881, 0.8705, 0.9022, 0.8961, 0.8504, 0.8486, 0.8488, 0.8674, 0.0, 0.9501, nan, nan]
|
115 |
+
2024-08-30 08:15:17.596956: Epoch time: 50.83 s
|
116 |
+
2024-08-30 08:15:19.065178:
|
117 |
+
2024-08-30 08:15:19.066002: Epoch 814
|
118 |
+
2024-08-30 08:15:19.066709: Current learning rate: 0.0022
|
119 |
+
2024-08-30 08:16:10.943189: train_loss -0.8306
|
120 |
+
2024-08-30 08:16:10.955420: val_loss -0.8514
|
121 |
+
2024-08-30 08:16:10.956509: Pseudo dice [0.9252, 0.9536, 0.952, 0.9485, 0.9561, 0.9566, 0.9676, 0.9678, 0.9677, 0.9643, 0.95, 0.9457, 0.9073, 0.9278, 0.9319, 0.905, 0.8595, 0.0, 0.9426, nan, 0.0]
|
122 |
+
2024-08-30 08:16:10.957301: Epoch time: 51.88 s
|
123 |
+
2024-08-30 08:16:13.075094:
|
124 |
+
2024-08-30 08:16:13.075996: Epoch 815
|
125 |
+
2024-08-30 08:16:13.076652: Current learning rate: 0.00219
|
126 |
+
2024-08-30 08:17:06.350408: train_loss -0.8157
|
127 |
+
2024-08-30 08:17:06.351511: val_loss -0.842
|
128 |
+
2024-08-30 08:17:06.352343: Pseudo dice [0.9262, 0.9019, 0.8707, 0.8943, 0.9377, 0.9371, 0.9258, 0.9275, 0.9172, 0.9048, 0.9104, 0.9174, 0.9221, 0.9076, 0.8824, 0.8531, 0.824, 0.0, 0.9371, nan, nan]
|
129 |
+
2024-08-30 08:17:06.353109: Epoch time: 53.28 s
|
130 |
+
2024-08-30 08:17:07.755994:
|
131 |
+
2024-08-30 08:17:07.757254: Epoch 816
|
132 |
+
2024-08-30 08:17:07.757989: Current learning rate: 0.00218
|
133 |
+
2024-08-30 08:17:58.611602: train_loss -0.8396
|
134 |
+
2024-08-30 08:17:58.613509: val_loss -0.8431
|
135 |
+
2024-08-30 08:17:58.614527: Pseudo dice [0.9309, 0.9434, 0.924, 0.9131, 0.9369, 0.9263, 0.9033, 0.9149, 0.9345, 0.9132, 0.864, 0.8876, 0.9373, 0.9518, 0.9265, 0.8795, 0.8378, 0.0, 0.9452, nan, nan]
|
136 |
+
2024-08-30 08:17:58.615318: Epoch time: 50.86 s
|
137 |
+
2024-08-30 08:18:00.178677:
|
138 |
+
2024-08-30 08:18:00.179557: Epoch 817
|
139 |
+
2024-08-30 08:18:00.180188: Current learning rate: 0.00217
|
140 |
+
2024-08-30 08:18:54.053931: train_loss -0.8233
|
141 |
+
2024-08-30 08:18:54.055542: val_loss -0.8204
|
142 |
+
2024-08-30 08:18:54.056728: Pseudo dice [0.8693, 0.8951, 0.8895, 0.8801, 0.8989, 0.9228, 0.9474, 0.9307, 0.9131, 0.9114, 0.9073, 0.9106, 0.8527, 0.7768, 0.7465, 0.7157, 0.7106, 0.0, 0.9297, nan, nan]
|
143 |
+
2024-08-30 08:18:54.057687: Epoch time: 53.88 s
|
144 |
+
2024-08-30 08:18:55.619385:
|
145 |
+
2024-08-30 08:18:55.620272: Epoch 818
|
146 |
+
2024-08-30 08:18:55.620930: Current learning rate: 0.00216
|
147 |
+
2024-08-30 08:19:46.511590: train_loss -0.8308
|
148 |
+
2024-08-30 08:19:46.513474: val_loss -0.8539
|
149 |
+
2024-08-30 08:19:46.514338: Pseudo dice [0.9501, 0.9587, 0.9606, 0.9649, 0.9644, 0.9682, 0.958, 0.9286, 0.8767, 0.8543, 0.8755, 0.8759, 0.9143, 0.8994, 0.8829, 0.9086, 0.9096, 0.0, 0.9338, nan, 0.0]
|
150 |
+
2024-08-30 08:19:46.515176: Epoch time: 50.9 s
|
151 |
+
2024-08-30 08:19:48.165145:
|
152 |
+
2024-08-30 08:19:48.166027: Epoch 819
|
153 |
+
2024-08-30 08:19:48.166734: Current learning rate: 0.00215
|
154 |
+
2024-08-30 08:20:38.473231: train_loss -0.8336
|
155 |
+
2024-08-30 08:20:38.475305: val_loss -0.8618
|
156 |
+
2024-08-30 08:20:38.476350: Pseudo dice [0.9202, 0.9409, 0.9411, 0.9425, 0.9466, 0.9563, 0.9615, 0.9633, 0.9232, 0.896, 0.9343, 0.956, 0.9608, 0.969, 0.9541, 0.9523, 0.9448, nan, 0.949, nan, nan]
|
157 |
+
2024-08-30 08:20:38.477642: Epoch time: 50.31 s
|
158 |
+
2024-08-30 08:20:40.186954:
|
159 |
+
2024-08-30 08:20:40.187847: Epoch 820
|
160 |
+
2024-08-30 08:20:40.188530: Current learning rate: 0.00214
|
161 |
+
2024-08-30 08:21:33.444569: train_loss -0.8376
|
162 |
+
2024-08-30 08:21:33.446877: val_loss -0.8386
|
163 |
+
2024-08-30 08:21:33.448053: Pseudo dice [0.9123, 0.9334, 0.9372, 0.9498, 0.9616, 0.9515, 0.9048, 0.8704, 0.901, 0.9199, 0.9388, 0.8919, 0.9033, 0.9588, 0.8961, 0.8513, 0.8759, 0.0, 0.9404, nan, nan]
|
164 |
+
2024-08-30 08:21:33.448708: Epoch time: 53.26 s
|
165 |
+
2024-08-30 08:21:35.020370:
|
166 |
+
2024-08-30 08:21:35.021193: Epoch 821
|
167 |
+
2024-08-30 08:21:35.021801: Current learning rate: 0.00213
|
168 |
+
2024-08-30 08:22:28.024297: train_loss -0.8313
|
169 |
+
2024-08-30 08:22:28.025396: val_loss -0.835
|
170 |
+
2024-08-30 08:22:28.026192: Pseudo dice [0.9202, 0.9456, 0.9307, 0.928, 0.9179, 0.9048, 0.9096, 0.9235, 0.9275, 0.94, 0.9337, 0.9078, 0.8945, 0.8613, 0.8258, 0.7934, 0.7485, 0.0, 0.943, nan, nan]
|
171 |
+
2024-08-30 08:22:28.027001: Epoch time: 53.01 s
|
172 |
+
2024-08-30 08:22:29.338894:
|
173 |
+
2024-08-30 08:22:29.339920: Epoch 822
|
174 |
+
2024-08-30 08:22:29.340533: Current learning rate: 0.00212
|
175 |
+
2024-08-30 08:23:23.282223: train_loss -0.8291
|
176 |
+
2024-08-30 08:23:23.285022: val_loss -0.8318
|
177 |
+
2024-08-30 08:23:23.285893: Pseudo dice [0.9273, 0.9355, 0.9379, 0.931, 0.9015, 0.8874, 0.8629, 0.8341, 0.7981, 0.8074, 0.8294, 0.8404, 0.8596, 0.8865, 0.9295, 0.9218, 0.8733, 0.0, 0.9337, nan, nan]
|
178 |
+
2024-08-30 08:23:23.286742: Epoch time: 53.95 s
|
179 |
+
2024-08-30 08:23:24.822444:
|
180 |
+
2024-08-30 08:23:24.823317: Epoch 823
|
181 |
+
2024-08-30 08:23:24.823978: Current learning rate: 0.0021
|
182 |
+
2024-08-30 08:24:16.636767: train_loss -0.8283
|
183 |
+
2024-08-30 08:24:16.637908: val_loss -0.8405
|
184 |
+
2024-08-30 08:24:16.638814: Pseudo dice [0.8966, 0.869, 0.819, 0.8487, 0.843, 0.8508, 0.8709, 0.8922, 0.9166, 0.9319, 0.9118, 0.8867, 0.9029, 0.9309, 0.9442, 0.9578, 0.9509, nan, 0.9552, nan, nan]
|
185 |
+
2024-08-30 08:24:16.639580: Epoch time: 51.82 s
|
186 |
+
2024-08-30 08:24:17.981040:
|
187 |
+
2024-08-30 08:24:17.981936: Epoch 824
|
188 |
+
2024-08-30 08:24:17.982702: Current learning rate: 0.00209
|
189 |
+
2024-08-30 08:25:09.459312: train_loss -0.8332
|
190 |
+
2024-08-30 08:25:09.461983: val_loss -0.847
|
191 |
+
2024-08-30 08:25:09.463172: Pseudo dice [0.9129, 0.9362, 0.9282, 0.9401, 0.9414, 0.9529, 0.9507, 0.9306, 0.9334, 0.9195, 0.9052, 0.885, 0.9312, 0.9253, 0.9305, 0.9056, 0.8777, 0.0, 0.9459, nan, 0.0]
|
192 |
+
2024-08-30 08:25:09.464164: Epoch time: 51.48 s
|
193 |
+
2024-08-30 08:25:11.123589:
|
194 |
+
2024-08-30 08:25:11.124481: Epoch 825
|
195 |
+
2024-08-30 08:25:11.125131: Current learning rate: 0.00208
|
196 |
+
2024-08-30 08:26:03.669369: train_loss -0.8402
|
197 |
+
2024-08-30 08:26:03.671208: val_loss -0.8566
|
198 |
+
2024-08-30 08:26:03.672660: Pseudo dice [0.9124, 0.9442, 0.942, 0.952, 0.9593, 0.9669, 0.9522, 0.9513, 0.9681, 0.9742, 0.9675, 0.9643, 0.9618, 0.9555, 0.9259, 0.9126, 0.8724, 0.0, 0.9548, nan, nan]
|
199 |
+
2024-08-30 08:26:03.673769: Epoch time: 52.55 s
|
200 |
+
2024-08-30 08:26:05.188530:
|
201 |
+
2024-08-30 08:26:05.189483: Epoch 826
|
202 |
+
2024-08-30 08:26:05.190189: Current learning rate: 0.00207
|
203 |
+
2024-08-30 08:26:56.577958: train_loss -0.8289
|
204 |
+
2024-08-30 08:26:56.580436: val_loss -0.8455
|
205 |
+
2024-08-30 08:26:56.581439: Pseudo dice [0.8892, 0.9066, 0.9185, 0.953, 0.9587, 0.9408, 0.914, 0.8965, 0.9056, 0.9023, 0.8851, 0.9108, 0.9464, 0.908, 0.9074, 0.8988, 0.8317, 0.0, 0.9439, nan, nan]
|
206 |
+
2024-08-30 08:26:56.582162: Epoch time: 51.39 s
|
207 |
+
2024-08-30 08:26:57.894692:
|
208 |
+
2024-08-30 08:26:57.895434: Epoch 827
|
209 |
+
2024-08-30 08:26:57.896052: Current learning rate: 0.00206
|
210 |
+
2024-08-30 08:27:47.155619: train_loss -0.8429
|
211 |
+
2024-08-30 08:27:47.157499: val_loss -0.8626
|
212 |
+
2024-08-30 08:27:47.158466: Pseudo dice [0.9454, 0.9514, 0.9409, 0.9206, 0.913, 0.9173, 0.9452, 0.9572, 0.9547, 0.9567, 0.9601, 0.9514, 0.9524, 0.9657, 0.967, 0.9685, 0.9502, nan, 0.947, nan, nan]
|
213 |
+
2024-08-30 08:27:47.159390: Epoch time: 49.26 s
|
214 |
+
2024-08-30 08:27:47.160301: Yayy! New best EMA pseudo Dice: 0.8698
|
215 |
+
2024-08-30 08:27:52.292960:
|
216 |
+
2024-08-30 08:27:52.293909: Epoch 828
|
217 |
+
2024-08-30 08:27:52.294642: Current learning rate: 0.00205
|
218 |
+
2024-08-30 08:28:43.239846: train_loss -0.8371
|
219 |
+
2024-08-30 08:28:43.241592: val_loss -0.8352
|
220 |
+
2024-08-30 08:28:43.243298: Pseudo dice [0.9016, 0.9249, 0.9414, 0.9493, 0.9637, 0.9653, 0.963, 0.9665, 0.9624, 0.9481, 0.9269, 0.9263, 0.8967, 0.8463, 0.7995, 0.7497, 0.7564, 0.0, 0.9564, nan, 0.0]
|
221 |
+
2024-08-30 08:28:43.244061: Epoch time: 50.95 s
|
222 |
+
2024-08-30 08:28:44.624213:
|
223 |
+
2024-08-30 08:28:44.625808: Epoch 829
|
224 |
+
2024-08-30 08:28:44.626519: Current learning rate: 0.00204
|
225 |
+
2024-08-30 08:29:37.756772: train_loss -0.8299
|
226 |
+
2024-08-30 08:29:37.757998: val_loss -0.8568
|
227 |
+
2024-08-30 08:29:37.758874: Pseudo dice [0.9323, 0.9466, 0.942, 0.9373, 0.9468, 0.9594, 0.966, 0.9621, 0.9544, 0.9604, 0.9596, 0.9597, 0.9711, 0.9682, 0.9462, 0.8969, 0.8514, 0.0, 0.9407, nan, nan]
|
228 |
+
2024-08-30 08:29:37.759763: Epoch time: 53.14 s
|
229 |
+
2024-08-30 08:29:39.056917:
|
230 |
+
2024-08-30 08:29:39.057681: Epoch 830
|
231 |
+
2024-08-30 08:29:39.058367: Current learning rate: 0.00203
|
232 |
+
2024-08-30 08:30:35.667782: train_loss -0.8141
|
233 |
+
2024-08-30 08:30:35.669878: val_loss -0.8573
|
234 |
+
2024-08-30 08:30:35.670662: Pseudo dice [0.9336, 0.9498, 0.9528, 0.9497, 0.9628, 0.9605, 0.9664, 0.9729, 0.9727, 0.9722, 0.9661, 0.9445, 0.9273, 0.9296, 0.9067, 0.8506, 0.8598, 0.0, 0.9463, nan, nan]
|
235 |
+
2024-08-30 08:30:35.671288: Epoch time: 56.61 s
|
236 |
+
2024-08-30 08:30:35.671895: Yayy! New best EMA pseudo Dice: 0.8699
|
237 |
+
2024-08-30 08:30:40.406667:
|
238 |
+
2024-08-30 08:30:40.407565: Epoch 831
|
239 |
+
2024-08-30 08:30:40.408188: Current learning rate: 0.00202
|
240 |
+
2024-08-30 08:31:33.459675: train_loss -0.8337
|
241 |
+
2024-08-30 08:31:33.460820: val_loss -0.8456
|
242 |
+
2024-08-30 08:31:33.461634: Pseudo dice [0.9263, 0.9389, 0.9259, 0.9389, 0.9516, 0.9527, 0.9041, 0.8772, 0.9082, 0.9303, 0.9191, 0.9362, 0.9529, 0.9466, 0.9279, 0.8727, 0.8546, 0.0, 0.9462, nan, nan]
|
243 |
+
2024-08-30 08:31:33.462340: Epoch time: 53.06 s
|
244 |
+
2024-08-30 08:31:33.463093: Yayy! New best EMA pseudo Dice: 0.8703
|
245 |
+
2024-08-30 08:31:37.837598:
|
246 |
+
2024-08-30 08:31:37.838884: Epoch 832
|
247 |
+
2024-08-30 08:31:37.839515: Current learning rate: 0.00201
|
248 |
+
2024-08-30 08:32:29.919479: train_loss -0.8371
|
249 |
+
2024-08-30 08:32:29.921590: val_loss -0.8504
|
250 |
+
2024-08-30 08:32:29.922781: Pseudo dice [0.9288, 0.9495, 0.9564, 0.958, 0.9473, 0.9519, 0.9621, 0.9535, 0.9327, 0.9095, 0.8838, 0.8829, 0.9102, 0.8988, 0.8911, 0.9062, 0.9134, nan, 0.945, nan, nan]
|
251 |
+
2024-08-30 08:32:29.923557: Epoch time: 52.08 s
|
252 |
+
2024-08-30 08:32:29.924380: Yayy! New best EMA pseudo Dice: 0.876
|
253 |
+
2024-08-30 08:32:34.433934:
|
254 |
+
2024-08-30 08:32:34.434817: Epoch 833
|
255 |
+
2024-08-30 08:32:34.435482: Current learning rate: 0.002
|
256 |
+
2024-08-30 08:33:25.896383: train_loss -0.8359
|
257 |
+
2024-08-30 08:33:25.898402: val_loss -0.8484
|
258 |
+
2024-08-30 08:33:25.899250: Pseudo dice [0.9355, 0.9488, 0.9456, 0.9063, 0.908, 0.9442, 0.9221, 0.9163, 0.9263, 0.9353, 0.9369, 0.9373, 0.9159, 0.9038, 0.9198, 0.8978, 0.9145, 0.0, 0.9548, nan, nan]
|
259 |
+
2024-08-30 08:33:25.900124: Epoch time: 51.47 s
|
260 |
+
2024-08-30 08:33:25.900837: Yayy! New best EMA pseudo Dice: 0.8761
|
261 |
+
2024-08-30 08:33:30.282899:
|
262 |
+
2024-08-30 08:33:30.283942: Epoch 834
|
263 |
+
2024-08-30 08:33:30.284630: Current learning rate: 0.00199
|
264 |
+
2024-08-30 08:34:22.623142: train_loss -0.8318
|
265 |
+
2024-08-30 08:34:22.624439: val_loss -0.85
|
266 |
+
2024-08-30 08:34:22.625240: Pseudo dice [0.9323, 0.9508, 0.9449, 0.9214, 0.8995, 0.8596, 0.8281, 0.8324, 0.8634, 0.909, 0.9332, 0.9516, 0.9706, 0.9711, 0.9661, 0.94, 0.9221, 0.0, 0.9507, nan, nan]
|
267 |
+
2024-08-30 08:34:22.626162: Epoch time: 52.34 s
|
268 |
+
2024-08-30 08:34:23.944872:
|
269 |
+
2024-08-30 08:34:23.945734: Epoch 835
|
270 |
+
2024-08-30 08:34:23.946381: Current learning rate: 0.00198
|
271 |
+
2024-08-30 08:35:17.867495: train_loss -0.8387
|
272 |
+
2024-08-30 08:35:17.870377: val_loss -0.8609
|
273 |
+
2024-08-30 08:35:17.871281: Pseudo dice [0.9046, 0.9497, 0.9472, 0.9573, 0.9583, 0.9423, 0.9298, 0.9285, 0.953, 0.9676, 0.9662, 0.9654, 0.9695, 0.9583, 0.961, 0.9583, 0.902, 0.0, 0.9462, nan, nan]
|
274 |
+
2024-08-30 08:35:17.872017: Epoch time: 53.93 s
|
275 |
+
2024-08-30 08:35:17.872632: Yayy! New best EMA pseudo Dice: 0.8778
|
276 |
+
2024-08-30 08:35:22.239060:
|
277 |
+
2024-08-30 08:35:22.239956: Epoch 836
|
278 |
+
2024-08-30 08:35:22.240781: Current learning rate: 0.00196
|
279 |
+
2024-08-30 08:36:15.614010: train_loss -0.8287
|
280 |
+
2024-08-30 08:36:15.616992: val_loss -0.8443
|
281 |
+
2024-08-30 08:36:15.617964: Pseudo dice [0.9094, 0.9418, 0.9388, 0.9396, 0.9294, 0.9082, 0.9189, 0.9234, 0.877, 0.8867, 0.8986, 0.8993, 0.9003, 0.8602, 0.8569, 0.9135, 0.8982, 0.0, 0.9551, nan, nan]
|
282 |
+
2024-08-30 08:36:15.619199: Epoch time: 53.38 s
|
283 |
+
2024-08-30 08:36:16.960143:
|
284 |
+
2024-08-30 08:36:16.961052: Epoch 837
|
285 |
+
2024-08-30 08:36:16.961776: Current learning rate: 0.00195
|
286 |
+
2024-08-30 08:37:07.388469: train_loss -0.8272
|
287 |
+
2024-08-30 08:37:07.390649: val_loss -0.8489
|
288 |
+
2024-08-30 08:37:07.391516: Pseudo dice [0.9166, 0.9045, 0.8891, 0.8701, 0.8837, 0.8848, 0.9035, 0.905, 0.9083, 0.915, 0.93, 0.9457, 0.9486, 0.9387, 0.9536, 0.9627, 0.9111, 0.0, 0.9435, nan, nan]
|
289 |
+
2024-08-30 08:37:07.392443: Epoch time: 50.43 s
|
290 |
+
2024-08-30 08:37:08.861180:
|
291 |
+
2024-08-30 08:37:08.862068: Epoch 838
|
292 |
+
2024-08-30 08:37:08.862830: Current learning rate: 0.00194
|
293 |
+
2024-08-30 08:38:05.148122: train_loss -0.8258
|
294 |
+
2024-08-30 08:38:05.150485: val_loss -0.8404
|
295 |
+
2024-08-30 08:38:05.151386: Pseudo dice [0.8883, 0.8898, 0.9005, 0.8151, 0.8185, 0.8683, 0.8654, 0.8936, 0.9216, 0.9188, 0.9192, 0.9373, 0.958, 0.9664, 0.9683, 0.9513, 0.9398, 0.0, 0.9475, nan, nan]
|
296 |
+
2024-08-30 08:38:05.152148: Epoch time: 56.29 s
|
297 |
+
2024-08-30 08:38:06.483434:
|
298 |
+
2024-08-30 08:38:06.484345: Epoch 839
|
299 |
+
2024-08-30 08:38:06.485116: Current learning rate: 0.00193
|
300 |
+
2024-08-30 08:38:57.670839: train_loss -0.8387
|
301 |
+
2024-08-30 08:38:57.673256: val_loss -0.8605
|
302 |
+
2024-08-30 08:38:57.674401: Pseudo dice [0.9394, 0.9507, 0.9524, 0.9487, 0.954, 0.9456, 0.9253, 0.9307, 0.9418, 0.9422, 0.9399, 0.9486, 0.9598, 0.9701, 0.9737, 0.9696, 0.933, nan, 0.9466, nan, 0.0]
|
303 |
+
2024-08-30 08:38:57.675169: Epoch time: 51.19 s
|
304 |
+
2024-08-30 08:38:59.269151:
|
305 |
+
2024-08-30 08:38:59.270121: Epoch 840
|
306 |
+
2024-08-30 08:38:59.270879: Current learning rate: 0.00192
|
307 |
+
2024-08-30 08:39:53.401448: train_loss -0.841
|
308 |
+
2024-08-30 08:39:53.402682: val_loss -0.8495
|
309 |
+
2024-08-30 08:39:53.403647: Pseudo dice [0.9174, 0.9366, 0.9184, 0.9149, 0.9108, 0.9191, 0.9439, 0.9464, 0.9191, 0.9199, 0.9207, 0.9107, 0.9452, 0.9343, 0.9256, 0.9306, 0.924, 0.0, 0.9462, nan, 0.0]
|
310 |
+
2024-08-30 08:39:53.404493: Epoch time: 54.14 s
|
311 |
+
2024-08-30 08:39:54.918965:
|
312 |
+
2024-08-30 08:39:54.919803: Epoch 841
|
313 |
+
2024-08-30 08:39:54.920514: Current learning rate: 0.00191
|
314 |
+
2024-08-30 08:40:47.035358: train_loss -0.8342
|
315 |
+
2024-08-30 08:40:47.037726: val_loss -0.8393
|
316 |
+
2024-08-30 08:40:47.038593: Pseudo dice [0.9156, 0.9198, 0.9055, 0.9006, 0.8871, 0.8714, 0.8915, 0.8892, 0.8902, 0.8827, 0.8617, 0.8779, 0.9057, 0.8994, 0.8778, 0.8781, 0.8957, 0.0, 0.9567, nan, nan]
|
317 |
+
2024-08-30 08:40:47.039367: Epoch time: 52.12 s
|
318 |
+
2024-08-30 08:40:48.478051:
|
319 |
+
2024-08-30 08:40:48.478997: Epoch 842
|
320 |
+
2024-08-30 08:40:48.479679: Current learning rate: 0.0019
|
321 |
+
2024-08-30 08:41:39.741175: train_loss -0.8354
|
322 |
+
2024-08-30 08:41:39.742464: val_loss -0.8438
|
323 |
+
2024-08-30 08:41:39.743416: Pseudo dice [0.9415, 0.9181, 0.883, 0.8168, 0.8267, 0.9008, 0.9337, 0.9525, 0.9629, 0.9413, 0.925, 0.9168, 0.886, 0.8656, 0.8794, 0.9152, 0.9164, 0.0, 0.9501, nan, 0.0]
|
324 |
+
2024-08-30 08:41:39.744118: Epoch time: 51.27 s
|
325 |
+
2024-08-30 08:41:41.238216:
|
326 |
+
2024-08-30 08:41:41.239219: Epoch 843
|
327 |
+
2024-08-30 08:41:41.239991: Current learning rate: 0.00189
|
328 |
+
2024-08-30 08:42:34.509820: train_loss -0.8206
|
329 |
+
2024-08-30 08:42:34.511800: val_loss -0.8529
|
330 |
+
2024-08-30 08:42:34.512738: Pseudo dice [0.9251, 0.9411, 0.947, 0.9551, 0.9312, 0.9193, 0.9308, 0.9373, 0.9253, 0.9274, 0.9375, 0.9481, 0.9566, 0.9615, 0.9748, 0.9434, 0.826, 0.0, 0.9267, nan, nan]
|
331 |
+
2024-08-30 08:42:34.513470: Epoch time: 53.27 s
|
332 |
+
2024-08-30 08:42:35.918376:
|
333 |
+
2024-08-30 08:42:35.919334: Epoch 844
|
334 |
+
2024-08-30 08:42:35.920022: Current learning rate: 0.00188
|
335 |
+
2024-08-30 08:43:29.541205: train_loss -0.8317
|
336 |
+
2024-08-30 08:43:29.542472: val_loss -0.8121
|
337 |
+
2024-08-30 08:43:29.543409: Pseudo dice [0.8903, 0.9135, 0.8928, 0.8693, 0.854, 0.8603, 0.8552, 0.8442, 0.8344, 0.8207, 0.8267, 0.8474, 0.9029, 0.9041, 0.8716, 0.8192, 0.7569, 0.0, 0.9575, nan, 0.0]
|
338 |
+
2024-08-30 08:43:29.544298: Epoch time: 53.63 s
|
339 |
+
2024-08-30 08:43:30.901514:
|
340 |
+
2024-08-30 08:43:30.902413: Epoch 845
|
341 |
+
2024-08-30 08:43:30.903183: Current learning rate: 0.00187
|
342 |
+
2024-08-30 08:44:23.643970: train_loss -0.8304
|
343 |
+
2024-08-30 08:44:23.645729: val_loss -0.8566
|
344 |
+
2024-08-30 08:44:23.646530: Pseudo dice [0.9207, 0.9391, 0.8981, 0.8785, 0.902, 0.893, 0.891, 0.9151, 0.939, 0.9532, 0.9421, 0.921, 0.9327, 0.9647, 0.9618, 0.9327, 0.8771, 0.0, 0.936, nan, nan]
|
345 |
+
2024-08-30 08:44:23.647211: Epoch time: 52.75 s
|
346 |
+
2024-08-30 08:44:24.951501:
|
347 |
+
2024-08-30 08:44:24.952410: Epoch 846
|
348 |
+
2024-08-30 08:44:24.953138: Current learning rate: 0.00186
|
349 |
+
2024-08-30 08:45:15.575314: train_loss -0.8288
|
350 |
+
2024-08-30 08:45:15.577284: val_loss -0.8433
|
351 |
+
2024-08-30 08:45:15.578287: Pseudo dice [0.9362, 0.9328, 0.9073, 0.8303, 0.8414, 0.8708, 0.8822, 0.9292, 0.9562, 0.9539, 0.9443, 0.9305, 0.9291, 0.8906, 0.8432, 0.8253, 0.8535, 0.0, 0.9521, nan, nan]
|
352 |
+
2024-08-30 08:45:15.579273: Epoch time: 50.63 s
|
353 |
+
2024-08-30 08:45:17.240562:
|
354 |
+
2024-08-30 08:45:17.241265: Epoch 847
|
355 |
+
2024-08-30 08:45:17.241756: Current learning rate: 0.00185
|
356 |
+
2024-08-30 08:46:09.978026: train_loss -0.8326
|
357 |
+
2024-08-30 08:46:09.991204: val_loss -0.8519
|
358 |
+
2024-08-30 08:46:09.992387: Pseudo dice [0.926, 0.8973, 0.8754, 0.911, 0.9288, 0.9329, 0.9321, 0.926, 0.9219, 0.921, 0.948, 0.963, 0.9617, 0.9694, 0.963, 0.9507, 0.9403, 0.0, 0.9505, nan, nan]
|
359 |
+
2024-08-30 08:46:09.993431: Epoch time: 52.74 s
|
360 |
+
2024-08-30 08:46:11.367785:
|
361 |
+
2024-08-30 08:46:11.368587: Epoch 848
|
362 |
+
2024-08-30 08:46:11.369292: Current learning rate: 0.00184
|
363 |
+
2024-08-30 08:47:03.955757: train_loss -0.838
|
364 |
+
2024-08-30 08:47:03.956916: val_loss -0.8315
|
365 |
+
2024-08-30 08:47:03.957751: Pseudo dice [0.9125, 0.9179, 0.8678, 0.8423, 0.8697, 0.8744, 0.8542, 0.8571, 0.8618, 0.8344, 0.8276, 0.8869, 0.9626, 0.9507, 0.936, 0.9326, 0.9405, 0.0, 0.9413, nan, 0.0]
|
366 |
+
2024-08-30 08:47:03.958437: Epoch time: 52.59 s
|
367 |
+
2024-08-30 08:47:05.276408:
|
368 |
+
2024-08-30 08:47:05.277321: Epoch 849
|
369 |
+
2024-08-30 08:47:05.278058: Current learning rate: 0.00182
|
370 |
+
2024-08-30 08:47:59.892816: train_loss -0.8387
|
371 |
+
2024-08-30 08:47:59.895495: val_loss -0.8549
|
372 |
+
2024-08-30 08:47:59.896668: Pseudo dice [0.9251, 0.9427, 0.9384, 0.9354, 0.9379, 0.9505, 0.959, 0.9614, 0.9629, 0.958, 0.9555, 0.9562, 0.9562, 0.936, 0.8962, 0.8824, 0.8624, 0.0, 0.9507, nan, nan]
|
373 |
+
2024-08-30 08:47:59.897888: Epoch time: 54.62 s
|
374 |
+
2024-08-30 08:48:03.877543:
|
375 |
+
2024-08-30 08:48:03.878408: Epoch 850
|
376 |
+
2024-08-30 08:48:03.879123: Current learning rate: 0.00181
|
377 |
+
2024-08-30 08:48:55.590902: train_loss -0.8367
|
378 |
+
2024-08-30 08:48:55.592170: val_loss -0.8651
|
379 |
+
2024-08-30 08:48:55.593011: Pseudo dice [0.9056, 0.927, 0.9329, 0.9429, 0.9531, 0.9613, 0.9652, 0.9692, 0.9625, 0.9509, 0.9524, 0.9603, 0.9667, 0.9757, 0.9683, 0.957, 0.9337, nan, 0.9366, nan, nan]
|
380 |
+
2024-08-30 08:48:55.593708: Epoch time: 51.72 s
|
381 |
+
2024-08-30 08:48:57.592999:
|
382 |
+
2024-08-30 08:48:57.593977: Epoch 851
|
383 |
+
2024-08-30 08:48:57.594691: Current learning rate: 0.0018
|
384 |
+
2024-08-30 08:49:48.381647: train_loss -0.8374
|
385 |
+
2024-08-30 08:49:48.383764: val_loss -0.8453
|
386 |
+
2024-08-30 08:49:48.384715: Pseudo dice [0.895, 0.8924, 0.8888, 0.8955, 0.8853, 0.8824, 0.9294, 0.9467, 0.9421, 0.9434, 0.9375, 0.9262, 0.9169, 0.9239, 0.9239, 0.929, 0.9112, 0.0, 0.9424, nan, nan]
|
387 |
+
2024-08-30 08:49:48.385490: Epoch time: 50.79 s
|
388 |
+
2024-08-30 08:49:49.953361:
|
389 |
+
2024-08-30 08:49:49.954305: Epoch 852
|
390 |
+
2024-08-30 08:49:49.955045: Current learning rate: 0.00179
|
391 |
+
2024-08-30 08:50:41.454666: train_loss -0.8347
|
392 |
+
2024-08-30 08:50:41.456775: val_loss -0.8491
|
393 |
+
2024-08-30 08:50:41.458367: Pseudo dice [0.9167, 0.9272, 0.8905, 0.9042, 0.9136, 0.9085, 0.9005, 0.9117, 0.9186, 0.9246, 0.9365, 0.9549, 0.9551, 0.9315, 0.9235, 0.904, 0.885, 0.0, 0.9512, nan, 0.0]
|
394 |
+
2024-08-30 08:50:41.459208: Epoch time: 51.5 s
|
395 |
+
2024-08-30 08:50:42.787482:
|
396 |
+
2024-08-30 08:50:42.788740: Epoch 853
|
397 |
+
2024-08-30 08:50:42.789418: Current learning rate: 0.00178
|
398 |
+
2024-08-30 08:51:35.049179: train_loss -0.8385
|
399 |
+
2024-08-30 08:51:35.051316: val_loss -0.8338
|
400 |
+
2024-08-30 08:51:35.052195: Pseudo dice [0.9174, 0.9189, 0.9197, 0.9246, 0.9394, 0.941, 0.9031, 0.8926, 0.8781, 0.8344, 0.7994, 0.8644, 0.9192, 0.944, 0.9667, 0.9473, 0.8746, 0.0, 0.9445, nan, nan]
|
401 |
+
2024-08-30 08:51:35.053322: Epoch time: 52.26 s
|
402 |
+
2024-08-30 08:51:36.377242:
|
403 |
+
2024-08-30 08:51:36.378135: Epoch 854
|
404 |
+
2024-08-30 08:51:36.378981: Current learning rate: 0.00177
|
405 |
+
2024-08-30 08:52:28.232911: train_loss -0.8387
|
406 |
+
2024-08-30 08:52:28.234886: val_loss -0.8301
|
407 |
+
2024-08-30 08:52:28.235937: Pseudo dice [0.9305, 0.9367, 0.9232, 0.9403, 0.9553, 0.9441, 0.9246, 0.855, 0.8164, 0.826, 0.8148, 0.8342, 0.9149, 0.91, 0.9103, 0.9004, 0.9058, 0.0, 0.9433, nan, 0.0]
|
408 |
+
2024-08-30 08:52:28.236702: Epoch time: 51.86 s
|
409 |
+
2024-08-30 08:52:29.565022:
|
410 |
+
2024-08-30 08:52:29.565928: Epoch 855
|
411 |
+
2024-08-30 08:52:29.566649: Current learning rate: 0.00176
|
412 |
+
2024-08-30 08:53:22.500609: train_loss -0.8342
|
413 |
+
2024-08-30 08:53:22.502524: val_loss -0.856
|
414 |
+
2024-08-30 08:53:22.503296: Pseudo dice [0.8973, 0.9256, 0.9276, 0.9283, 0.9204, 0.9272, 0.9373, 0.9462, 0.9605, 0.9675, 0.9693, 0.9664, 0.9648, 0.9482, 0.9414, 0.9262, 0.8635, 0.0, 0.9429, nan, nan]
|
415 |
+
2024-08-30 08:53:22.504044: Epoch time: 52.94 s
|
416 |
+
2024-08-30 08:53:23.820922:
|
417 |
+
2024-08-30 08:53:23.821906: Epoch 856
|
418 |
+
2024-08-30 08:53:23.822630: Current learning rate: 0.00175
|
419 |
+
2024-08-30 08:54:16.268810: train_loss -0.8411
|
420 |
+
2024-08-30 08:54:16.270410: val_loss -0.8504
|
421 |
+
2024-08-30 08:54:16.271315: Pseudo dice [0.9122, 0.9143, 0.9029, 0.8923, 0.8937, 0.9052, 0.9128, 0.9075, 0.9167, 0.9291, 0.9404, 0.9495, 0.9684, 0.9705, 0.9591, 0.9309, 0.9091, 0.0, 0.9387, nan, 0.0]
|
422 |
+
2024-08-30 08:54:16.272061: Epoch time: 52.45 s
|
423 |
+
2024-08-30 08:54:17.691141:
|
424 |
+
2024-08-30 08:54:17.692030: Epoch 857
|
425 |
+
2024-08-30 08:54:17.692727: Current learning rate: 0.00174
|
426 |
+
2024-08-30 08:55:09.571531: train_loss -0.832
|
427 |
+
2024-08-30 08:55:09.573950: val_loss -0.849
|
428 |
+
2024-08-30 08:55:09.575038: Pseudo dice [0.9242, 0.9466, 0.9406, 0.9254, 0.9234, 0.9233, 0.9262, 0.9339, 0.9336, 0.9337, 0.9398, 0.9177, 0.9436, 0.9119, 0.8793, 0.833, 0.7875, nan, 0.9264, nan, 0.0]
|
429 |
+
2024-08-30 08:55:09.575829: Epoch time: 51.88 s
|
430 |
+
2024-08-30 08:55:10.855058:
|
431 |
+
2024-08-30 08:55:10.855939: Epoch 858
|
432 |
+
2024-08-30 08:55:10.856600: Current learning rate: 0.00173
|
433 |
+
2024-08-30 08:55:59.863614: train_loss -0.831
|
434 |
+
2024-08-30 08:55:59.865005: val_loss -0.8535
|
435 |
+
2024-08-30 08:55:59.867006: Pseudo dice [0.9404, 0.9531, 0.9496, 0.9549, 0.9598, 0.9671, 0.9671, 0.9523, 0.9575, 0.958, 0.9564, 0.933, 0.9251, 0.9185, 0.92, 0.9268, 0.8361, 0.0, 0.9322, nan, nan]
|
436 |
+
2024-08-30 08:55:59.867766: Epoch time: 49.01 s
|
437 |
+
2024-08-30 08:56:01.408063:
|
438 |
+
2024-08-30 08:56:01.409021: Epoch 859
|
439 |
+
2024-08-30 08:56:01.409722: Current learning rate: 0.00172
|
440 |
+
2024-08-30 08:56:51.798097: train_loss -0.843
|
441 |
+
2024-08-30 08:56:51.800311: val_loss -0.8369
|
442 |
+
2024-08-30 08:56:51.801406: Pseudo dice [0.9064, 0.9308, 0.9259, 0.9113, 0.888, 0.8785, 0.8644, 0.8599, 0.8873, 0.9033, 0.918, 0.9246, 0.9489, 0.9225, 0.9154, 0.9183, 0.8533, 0.0, 0.9459, nan, 0.0]
|
443 |
+
2024-08-30 08:56:51.802177: Epoch time: 50.39 s
|
444 |
+
2024-08-30 08:56:53.236082:
|
445 |
+
2024-08-30 08:56:53.237377: Epoch 860
|
446 |
+
2024-08-30 08:56:53.238142: Current learning rate: 0.0017
|
447 |
+
2024-08-30 08:57:48.362655: train_loss -0.8373
|
448 |
+
2024-08-30 08:57:48.363925: val_loss -0.8358
|
449 |
+
2024-08-30 08:57:48.364877: Pseudo dice [0.9434, 0.9557, 0.9561, 0.9474, 0.9004, 0.8748, 0.8762, 0.8653, 0.846, 0.8354, 0.8613, 0.91, 0.9531, 0.9548, 0.9597, 0.9397, 0.8697, 0.0, 0.9262, nan, nan]
|
450 |
+
2024-08-30 08:57:48.365652: Epoch time: 55.13 s
|
451 |
+
2024-08-30 08:57:49.672308:
|
452 |
+
2024-08-30 08:57:49.673516: Epoch 861
|
453 |
+
2024-08-30 08:57:49.674527: Current learning rate: 0.00169
|
454 |
+
2024-08-30 08:58:45.218183: train_loss -0.8346
|
455 |
+
2024-08-30 08:58:45.220060: val_loss -0.8465
|
456 |
+
2024-08-30 08:58:45.220916: Pseudo dice [0.9053, 0.9293, 0.9208, 0.9106, 0.9059, 0.9117, 0.9204, 0.9207, 0.9181, 0.9415, 0.9398, 0.938, 0.9492, 0.9557, 0.9598, 0.9258, 0.8812, 0.0, 0.9454, nan, nan]
|
457 |
+
2024-08-30 08:58:45.221845: Epoch time: 55.55 s
|
458 |
+
2024-08-30 08:58:46.646538:
|
459 |
+
2024-08-30 08:58:46.647485: Epoch 862
|
460 |
+
2024-08-30 08:58:46.648240: Current learning rate: 0.00168
|
461 |
+
2024-08-30 08:59:40.994913: train_loss -0.8318
|
462 |
+
2024-08-30 08:59:40.996721: val_loss -0.851
|
463 |
+
2024-08-30 08:59:40.997763: Pseudo dice [0.9387, 0.9371, 0.9391, 0.9518, 0.9575, 0.9651, 0.9665, 0.9642, 0.9657, 0.9506, 0.946, 0.9581, 0.9337, 0.8996, 0.8819, 0.8758, 0.8834, 0.0, 0.9446, nan, nan]
|
464 |
+
2024-08-30 08:59:40.999017: Epoch time: 54.35 s
|
465 |
+
2024-08-30 08:59:42.270347:
|
466 |
+
2024-08-30 08:59:42.270894: Epoch 863
|
467 |
+
2024-08-30 08:59:42.271382: Current learning rate: 0.00167
|
468 |
+
2024-08-30 09:00:33.523203: train_loss -0.8386
|
469 |
+
2024-08-30 09:00:33.525758: val_loss -0.8467
|
470 |
+
2024-08-30 09:00:33.527037: Pseudo dice [0.9169, 0.9109, 0.8887, 0.9015, 0.9131, 0.9238, 0.9044, 0.9005, 0.8851, 0.8783, 0.9046, 0.9107, 0.9397, 0.9291, 0.9289, 0.9144, 0.8798, 0.0, 0.9498, nan, 0.0]
|
471 |
+
2024-08-30 09:00:33.527949: Epoch time: 51.26 s
|
472 |
+
2024-08-30 09:00:35.809411:
|
473 |
+
2024-08-30 09:00:35.810287: Epoch 864
|
474 |
+
2024-08-30 09:00:35.811031: Current learning rate: 0.00166
|
475 |
+
2024-08-30 09:01:28.219671: train_loss -0.838
|
476 |
+
2024-08-30 09:01:28.221019: val_loss -0.8554
|
477 |
+
2024-08-30 09:01:28.221902: Pseudo dice [0.9124, 0.934, 0.9384, 0.9115, 0.9199, 0.9542, 0.9577, 0.9646, 0.9678, 0.9709, 0.9679, 0.9698, 0.9702, 0.9411, 0.9, 0.8693, 0.8403, 0.0, 0.9338, nan, nan]
|
478 |
+
2024-08-30 09:01:28.222677: Epoch time: 52.41 s
|
479 |
+
2024-08-30 09:01:29.836327:
|
480 |
+
2024-08-30 09:01:29.837203: Epoch 865
|
481 |
+
2024-08-30 09:01:29.837966: Current learning rate: 0.00165
|
482 |
+
2024-08-30 09:02:22.851339: train_loss -0.8418
|
483 |
+
2024-08-30 09:02:22.853962: val_loss -0.8571
|
484 |
+
2024-08-30 09:02:22.854879: Pseudo dice [0.942, 0.9536, 0.952, 0.9326, 0.9033, 0.8973, 0.9031, 0.9102, 0.9125, 0.9062, 0.8857, 0.9213, 0.9625, 0.9712, 0.9754, 0.9669, 0.9257, 0.0, 0.9539, nan, nan]
|
485 |
+
2024-08-30 09:02:22.855863: Epoch time: 53.02 s
|
486 |
+
2024-08-30 09:02:24.152373:
|
487 |
+
2024-08-30 09:02:24.153325: Epoch 866
|
488 |
+
2024-08-30 09:02:24.154001: Current learning rate: 0.00164
|
489 |
+
2024-08-30 09:03:16.440200: train_loss -0.8464
|
490 |
+
2024-08-30 09:03:16.441539: val_loss -0.8543
|
491 |
+
2024-08-30 09:03:16.442665: Pseudo dice [0.9283, 0.9498, 0.9386, 0.9328, 0.953, 0.9616, 0.9685, 0.9518, 0.9429, 0.9621, 0.9631, 0.9694, 0.9678, 0.9614, 0.9301, 0.8632, 0.8204, 0.0, 0.9587, nan, nan]
|
492 |
+
2024-08-30 09:03:16.443727: Epoch time: 52.29 s
|
493 |
+
2024-08-30 09:03:17.824345:
|
494 |
+
2024-08-30 09:03:17.825303: Epoch 867
|
495 |
+
2024-08-30 09:03:17.825968: Current learning rate: 0.00163
|
496 |
+
2024-08-30 09:04:08.592646: train_loss -0.8416
|
497 |
+
2024-08-30 09:04:08.595639: val_loss -0.8524
|
498 |
+
2024-08-30 09:04:08.596529: Pseudo dice [0.9298, 0.9471, 0.9465, 0.9608, 0.9608, 0.9581, 0.9576, 0.9526, 0.9453, 0.9294, 0.9132, 0.9362, 0.9493, 0.9362, 0.9075, 0.8671, 0.7967, 0.0, 0.9418, nan, nan]
|
499 |
+
2024-08-30 09:04:08.597581: Epoch time: 50.77 s
|
500 |
+
2024-08-30 09:04:09.935222:
|
501 |
+
2024-08-30 09:04:09.936138: Epoch 868
|
502 |
+
2024-08-30 09:04:09.936894: Current learning rate: 0.00162
|
503 |
+
2024-08-30 09:05:02.175758: train_loss -0.8382
|
504 |
+
2024-08-30 09:05:02.180358: val_loss -0.8533
|
505 |
+
2024-08-30 09:05:02.181579: Pseudo dice [0.9343, 0.9478, 0.9531, 0.9568, 0.9592, 0.9551, 0.9494, 0.933, 0.9293, 0.9281, 0.9349, 0.9247, 0.9568, 0.9299, 0.9029, 0.9175, 0.8949, 0.0, 0.9369, nan, 0.0]
|
506 |
+
2024-08-30 09:05:02.182430: Epoch time: 52.24 s
|
507 |
+
2024-08-30 09:05:03.815528:
|
508 |
+
2024-08-30 09:05:03.817090: Epoch 869
|
509 |
+
2024-08-30 09:05:03.817828: Current learning rate: 0.00161
|
510 |
+
2024-08-30 09:05:57.417204: train_loss -0.8328
|
511 |
+
2024-08-30 09:05:57.419668: val_loss -0.8604
|
512 |
+
2024-08-30 09:05:57.420626: Pseudo dice [0.9082, 0.949, 0.9422, 0.9247, 0.9396, 0.9548, 0.9273, 0.9226, 0.9415, 0.9536, 0.9577, 0.9531, 0.966, 0.9682, 0.9672, 0.9542, 0.9222, 0.0, 0.9538, nan, nan]
|
513 |
+
2024-08-30 09:05:57.421386: Epoch time: 53.6 s
|
514 |
+
2024-08-30 09:05:58.851858:
|
515 |
+
2024-08-30 09:05:58.852747: Epoch 870
|
516 |
+
2024-08-30 09:05:58.853485: Current learning rate: 0.00159
|
517 |
+
2024-08-30 09:06:52.801990: train_loss -0.8372
|
518 |
+
2024-08-30 09:06:52.803658: val_loss -0.8536
|
519 |
+
2024-08-30 09:06:52.804487: Pseudo dice [0.9369, 0.9397, 0.9356, 0.9546, 0.9619, 0.9644, 0.9407, 0.9462, 0.9642, 0.9676, 0.9406, 0.9255, 0.8748, 0.8859, 0.9038, 0.8989, 0.903, 0.0, 0.9541, nan, 0.0]
|
520 |
+
2024-08-30 09:06:52.805259: Epoch time: 53.95 s
|
521 |
+
2024-08-30 09:06:54.142854:
|
522 |
+
2024-08-30 09:06:54.143848: Epoch 871
|
523 |
+
2024-08-30 09:06:54.144514: Current learning rate: 0.00158
|
524 |
+
2024-08-30 09:07:48.394995: train_loss -0.8409
|
525 |
+
2024-08-30 09:07:48.397168: val_loss -0.8576
|
526 |
+
2024-08-30 09:07:48.398114: Pseudo dice [0.9372, 0.9585, 0.9488, 0.9537, 0.9626, 0.9471, 0.9049, 0.9016, 0.9215, 0.9314, 0.9359, 0.9476, 0.9646, 0.9705, 0.9729, 0.962, 0.9385, 0.0, 0.9599, nan, nan]
|
527 |
+
2024-08-30 09:07:48.398971: Epoch time: 54.26 s
|
528 |
+
2024-08-30 09:07:49.755149:
|
529 |
+
2024-08-30 09:07:49.756004: Epoch 872
|
530 |
+
2024-08-30 09:07:49.756696: Current learning rate: 0.00157
|
531 |
+
2024-08-30 09:08:43.106714: train_loss -0.8371
|
532 |
+
2024-08-30 09:08:43.107915: val_loss -0.8562
|
533 |
+
2024-08-30 09:08:43.108848: Pseudo dice [0.9368, 0.9445, 0.9531, 0.9557, 0.9561, 0.962, 0.9412, 0.9402, 0.953, 0.9583, 0.9591, 0.9518, 0.9686, 0.9571, 0.9379, 0.8977, 0.8283, 0.0, 0.9554, nan, 0.0]
|
534 |
+
2024-08-30 09:08:43.109610: Epoch time: 53.35 s
|
535 |
+
2024-08-30 09:08:44.639570:
|
536 |
+
2024-08-30 09:08:44.640445: Epoch 873
|
537 |
+
2024-08-30 09:08:44.641129: Current learning rate: 0.00156
|
538 |
+
2024-08-30 09:09:35.810926: train_loss -0.843
|
539 |
+
2024-08-30 09:09:35.813389: val_loss -0.8493
|
540 |
+
2024-08-30 09:09:35.814319: Pseudo dice [0.9132, 0.9213, 0.9236, 0.9295, 0.9249, 0.9122, 0.8885, 0.8997, 0.8974, 0.9023, 0.9263, 0.9364, 0.9549, 0.9696, 0.9662, 0.9468, 0.8956, 0.0, 0.9346, nan, nan]
|
541 |
+
2024-08-30 09:09:35.815161: Epoch time: 51.17 s
|
542 |
+
2024-08-30 09:09:37.314640:
|
543 |
+
2024-08-30 09:09:37.315544: Epoch 874
|
544 |
+
2024-08-30 09:09:37.316256: Current learning rate: 0.00155
|
545 |
+
2024-08-30 09:10:31.548849: train_loss -0.8447
|
546 |
+
2024-08-30 09:10:31.549944: val_loss -0.8468
|
547 |
+
2024-08-30 09:10:31.550888: Pseudo dice [0.9228, 0.9285, 0.9166, 0.931, 0.9581, 0.9519, 0.9376, 0.9329, 0.9187, 0.9142, 0.9283, 0.9442, 0.923, 0.8854, 0.851, 0.8311, 0.7589, 0.0, 0.9417, nan, nan]
|
548 |
+
2024-08-30 09:10:31.551782: Epoch time: 54.24 s
|
549 |
+
2024-08-30 09:10:32.872042:
|
550 |
+
2024-08-30 09:10:32.872933: Epoch 875
|
551 |
+
2024-08-30 09:10:32.873559: Current learning rate: 0.00154
|
552 |
+
2024-08-30 09:11:25.913647: train_loss -0.8416
|
553 |
+
2024-08-30 09:11:25.916418: val_loss -0.8512
|
554 |
+
2024-08-30 09:11:25.917590: Pseudo dice [0.9287, 0.9457, 0.9462, 0.923, 0.8986, 0.9299, 0.9323, 0.9177, 0.9126, 0.9143, 0.9129, 0.9478, 0.9649, 0.9579, 0.9596, 0.947, 0.865, 0.0, 0.9397, nan, nan]
|
555 |
+
2024-08-30 09:11:25.918908: Epoch time: 53.04 s
|
556 |
+
2024-08-30 09:11:27.242563:
|
557 |
+
2024-08-30 09:11:27.243446: Epoch 876
|
558 |
+
2024-08-30 09:11:27.244117: Current learning rate: 0.00153
|
559 |
+
2024-08-30 09:12:16.963555: train_loss -0.8312
|
560 |
+
2024-08-30 09:12:16.965032: val_loss -0.8509
|
561 |
+
2024-08-30 09:12:16.966198: Pseudo dice [0.9027, 0.9104, 0.8725, 0.8879, 0.9106, 0.9152, 0.9027, 0.883, 0.8969, 0.9002, 0.9077, 0.9196, 0.9159, 0.9291, 0.9282, 0.947, 0.8979, nan, 0.9356, nan, nan]
|
562 |
+
2024-08-30 09:12:16.967159: Epoch time: 49.72 s
|
563 |
+
2024-08-30 09:12:18.284918:
|
564 |
+
2024-08-30 09:12:18.285751: Epoch 877
|
565 |
+
2024-08-30 09:12:18.286430: Current learning rate: 0.00152
|
566 |
+
2024-08-30 09:13:09.038544: train_loss -0.8394
|
567 |
+
2024-08-30 09:13:09.040658: val_loss -0.8522
|
568 |
+
2024-08-30 09:13:09.041566: Pseudo dice [0.9443, 0.9627, 0.9609, 0.9649, 0.9532, 0.9516, 0.9521, 0.937, 0.9204, 0.9069, 0.9125, 0.923, 0.9625, 0.9268, 0.939, 0.9383, 0.8919, 0.0, 0.9295, nan, 0.0]
|
569 |
+
2024-08-30 09:13:09.042206: Epoch time: 50.76 s
|
570 |
+
2024-08-30 09:13:11.051424:
|
571 |
+
2024-08-30 09:13:11.052295: Epoch 878
|
572 |
+
2024-08-30 09:13:11.052982: Current learning rate: 0.00151
|
573 |
+
2024-08-30 09:14:01.156067: train_loss -0.8369
|
574 |
+
2024-08-30 09:14:01.157804: val_loss -0.8611
|
575 |
+
2024-08-30 09:14:01.158995: Pseudo dice [0.9301, 0.9615, 0.9623, 0.9619, 0.9621, 0.9645, 0.965, 0.9523, 0.9359, 0.9281, 0.9196, 0.9195, 0.9647, 0.967, 0.9233, 0.8982, 0.9088, nan, 0.9421, nan, nan]
|
576 |
+
2024-08-30 09:14:01.159832: Epoch time: 50.11 s
|
577 |
+
2024-08-30 09:14:02.494009:
|
578 |
+
2024-08-30 09:14:02.494841: Epoch 879
|
579 |
+
2024-08-30 09:14:02.495541: Current learning rate: 0.00149
|
580 |
+
2024-08-30 09:14:53.631948: train_loss -0.836
|
581 |
+
2024-08-30 09:14:53.633824: val_loss -0.8332
|
582 |
+
2024-08-30 09:14:53.634913: Pseudo dice [0.9079, 0.876, 0.8622, 0.9014, 0.9182, 0.9402, 0.9176, 0.8935, 0.8962, 0.9322, 0.9457, 0.9458, 0.9375, 0.8962, 0.8779, 0.8574, 0.8324, 0.0, 0.9115, nan, nan]
|
583 |
+
2024-08-30 09:14:53.636594: Epoch time: 51.14 s
|
584 |
+
2024-08-30 09:14:55.305416:
|
585 |
+
2024-08-30 09:14:55.306445: Epoch 880
|
586 |
+
2024-08-30 09:14:55.307124: Current learning rate: 0.00148
|
587 |
+
2024-08-30 09:15:48.906780: train_loss -0.8415
|
588 |
+
2024-08-30 09:15:48.908741: val_loss -0.8419
|
589 |
+
2024-08-30 09:15:48.909892: Pseudo dice [0.9333, 0.9413, 0.946, 0.9542, 0.9622, 0.9293, 0.8882, 0.8673, 0.8643, 0.867, 0.8732, 0.8859, 0.9404, 0.9425, 0.8989, 0.8451, 0.7916, 0.0, 0.9407, nan, 0.0]
|
590 |
+
2024-08-30 09:15:48.910892: Epoch time: 53.6 s
|
591 |
+
2024-08-30 09:15:50.297559:
|
592 |
+
2024-08-30 09:15:50.298381: Epoch 881
|
593 |
+
2024-08-30 09:15:50.299062: Current learning rate: 0.00147
|
594 |
+
2024-08-30 09:16:42.509722: train_loss -0.8383
|
595 |
+
2024-08-30 09:16:42.511638: val_loss -0.8445
|
596 |
+
2024-08-30 09:16:42.512475: Pseudo dice [0.906, 0.94, 0.9296, 0.9233, 0.9014, 0.8822, 0.889, 0.8779, 0.8718, 0.8774, 0.8619, 0.8644, 0.949, 0.9536, 0.9521, 0.9286, 0.8987, 0.0, 0.9398, nan, 0.0]
|
597 |
+
2024-08-30 09:16:42.513572: Epoch time: 52.22 s
|
598 |
+
2024-08-30 09:16:43.979096:
|
599 |
+
2024-08-30 09:16:43.979967: Epoch 882
|
600 |
+
2024-08-30 09:16:43.980589: Current learning rate: 0.00146
|
601 |
+
2024-08-30 09:17:36.578169: train_loss -0.8375
|
602 |
+
2024-08-30 09:17:36.579384: val_loss -0.8352
|
603 |
+
2024-08-30 09:17:36.580246: Pseudo dice [0.9135, 0.9247, 0.9195, 0.9266, 0.933, 0.9166, 0.9008, 0.9009, 0.9012, 0.8523, 0.8407, 0.8942, 0.8531, 0.7992, 0.819, 0.8354, 0.8685, 0.0, 0.9542, nan, nan]
|
604 |
+
2024-08-30 09:17:36.581056: Epoch time: 52.6 s
|
605 |
+
2024-08-30 09:17:38.009704:
|
606 |
+
2024-08-30 09:17:38.010733: Epoch 883
|
607 |
+
2024-08-30 09:17:38.011462: Current learning rate: 0.00145
|
608 |
+
2024-08-30 09:18:28.543527: train_loss -0.8386
|
609 |
+
2024-08-30 09:18:28.545922: val_loss -0.8393
|
610 |
+
2024-08-30 09:18:28.546789: Pseudo dice [0.9315, 0.9584, 0.9628, 0.9606, 0.9533, 0.9611, 0.9706, 0.9649, 0.9607, 0.966, 0.9659, 0.9589, 0.9582, 0.9026, 0.8605, 0.7682, 0.7065, 0.0, 0.9238, nan, 0.0]
|
611 |
+
2024-08-30 09:18:28.547521: Epoch time: 50.54 s
|
612 |
+
2024-08-30 09:18:29.982323:
|
613 |
+
2024-08-30 09:18:29.983205: Epoch 884
|
614 |
+
2024-08-30 09:18:29.983942: Current learning rate: 0.00144
|
615 |
+
2024-08-30 09:19:22.844262: train_loss -0.8407
|
616 |
+
2024-08-30 09:19:22.845461: val_loss -0.8468
|
617 |
+
2024-08-30 09:19:22.846434: Pseudo dice [0.8979, 0.9238, 0.9197, 0.932, 0.9348, 0.9316, 0.9168, 0.9015, 0.9097, 0.9157, 0.8999, 0.8794, 0.9356, 0.9102, 0.8713, 0.8698, 0.9059, nan, 0.95, nan, 0.0]
|
618 |
+
2024-08-30 09:19:22.847541: Epoch time: 52.86 s
|
619 |
+
2024-08-30 09:19:24.125946:
|
620 |
+
2024-08-30 09:19:24.126772: Epoch 885
|
621 |
+
2024-08-30 09:19:24.127533: Current learning rate: 0.00143
|
622 |
+
2024-08-30 09:20:17.535192: train_loss -0.8435
|
623 |
+
2024-08-30 09:20:17.538976: val_loss -0.8716
|
624 |
+
2024-08-30 09:20:17.539990: Pseudo dice [0.9421, 0.9584, 0.9499, 0.9534, 0.9657, 0.9699, 0.9641, 0.9614, 0.9628, 0.9674, 0.9511, 0.9531, 0.9645, 0.961, 0.9554, 0.9447, 0.8955, nan, 0.9404, nan, nan]
|
625 |
+
2024-08-30 09:20:17.540793: Epoch time: 53.41 s
|
626 |
+
2024-08-30 09:20:19.032748:
|
627 |
+
2024-08-30 09:20:19.033603: Epoch 886
|
628 |
+
2024-08-30 09:20:19.034249: Current learning rate: 0.00142
|
629 |
+
2024-08-30 09:21:10.380071: train_loss -0.8451
|
630 |
+
2024-08-30 09:21:10.391248: val_loss -0.8491
|
631 |
+
2024-08-30 09:21:10.392237: Pseudo dice [0.923, 0.9434, 0.9261, 0.9186, 0.9428, 0.9434, 0.9265, 0.89, 0.8506, 0.8471, 0.899, 0.9226, 0.9481, 0.9505, 0.9462, 0.9108, 0.8979, 0.0, 0.9532, nan, 0.0]
|
632 |
+
2024-08-30 09:21:10.392935: Epoch time: 51.35 s
|
633 |
+
2024-08-30 09:21:11.701781:
|
634 |
+
2024-08-30 09:21:11.703192: Epoch 887
|
635 |
+
2024-08-30 09:21:11.703910: Current learning rate: 0.00141
|
636 |
+
2024-08-30 09:22:04.421584: train_loss -0.8447
|
637 |
+
2024-08-30 09:22:04.423617: val_loss -0.8645
|
638 |
+
2024-08-30 09:22:04.424931: Pseudo dice [0.9246, 0.9471, 0.9482, 0.954, 0.9557, 0.9558, 0.9515, 0.9305, 0.9371, 0.9582, 0.9573, 0.961, 0.9651, 0.9669, 0.9637, 0.9546, 0.9001, 0.0, 0.9515, nan, nan]
|
639 |
+
2024-08-30 09:22:04.425783: Epoch time: 52.72 s
|
640 |
+
2024-08-30 09:22:05.792814:
|
641 |
+
2024-08-30 09:22:05.793650: Epoch 888
|
642 |
+
2024-08-30 09:22:05.794380: Current learning rate: 0.00139
|
643 |
+
2024-08-30 09:22:54.214429: train_loss -0.8439
|
644 |
+
2024-08-30 09:22:54.215655: val_loss -0.8541
|
645 |
+
2024-08-30 09:22:54.216516: Pseudo dice [0.9066, 0.9146, 0.9315, 0.9335, 0.9391, 0.9587, 0.9621, 0.9562, 0.9455, 0.9305, 0.929, 0.9336, 0.9524, 0.9605, 0.9197, 0.9003, 0.8764, 0.0, 0.9465, nan, 0.0]
|
646 |
+
2024-08-30 09:22:54.217762: Epoch time: 48.42 s
|
647 |
+
2024-08-30 09:22:55.815449:
|
648 |
+
2024-08-30 09:22:55.816295: Epoch 889
|
649 |
+
2024-08-30 09:22:55.816990: Current learning rate: 0.00138
|
650 |
+
2024-08-30 09:23:47.761132: train_loss -0.8433
|
651 |
+
2024-08-30 09:23:47.763311: val_loss -0.8429
|
652 |
+
2024-08-30 09:23:47.764520: Pseudo dice [0.9395, 0.9501, 0.9441, 0.955, 0.9434, 0.9245, 0.8983, 0.8933, 0.9066, 0.9214, 0.9116, 0.8908, 0.9325, 0.9154, 0.9253, 0.9247, 0.8588, 0.0, 0.9472, nan, 0.0]
|
653 |
+
2024-08-30 09:23:47.765725: Epoch time: 51.95 s
|
654 |
+
2024-08-30 09:23:49.115229:
|
655 |
+
2024-08-30 09:23:49.116137: Epoch 890
|
656 |
+
2024-08-30 09:23:49.116842: Current learning rate: 0.00137
|
657 |
+
2024-08-30 09:24:41.991803: train_loss -0.8406
|
658 |
+
2024-08-30 09:24:41.993486: val_loss -0.8592
|
659 |
+
2024-08-30 09:24:41.994666: Pseudo dice [0.9254, 0.9517, 0.9499, 0.9592, 0.9642, 0.9661, 0.9649, 0.9717, 0.9627, 0.9479, 0.9179, 0.9195, 0.9294, 0.9121, 0.9151, 0.9349, 0.9356, 0.0, 0.9414, nan, 0.0]
|
660 |
+
2024-08-30 09:24:41.995376: Epoch time: 52.88 s
|
661 |
+
2024-08-30 09:24:44.173131:
|
662 |
+
2024-08-30 09:24:44.174046: Epoch 891
|
663 |
+
2024-08-30 09:24:44.174711: Current learning rate: 0.00136
|
664 |
+
2024-08-30 09:25:37.189787: train_loss -0.8482
|
665 |
+
2024-08-30 09:25:37.191763: val_loss -0.8563
|
666 |
+
2024-08-30 09:25:37.192794: Pseudo dice [0.9182, 0.9365, 0.9428, 0.9554, 0.9561, 0.9469, 0.8903, 0.8663, 0.9116, 0.9309, 0.9478, 0.9225, 0.9344, 0.961, 0.9478, 0.9201, 0.8967, 0.0, 0.9472, nan, 0.0]
|
667 |
+
2024-08-30 09:25:37.193596: Epoch time: 53.02 s
|
668 |
+
2024-08-30 09:25:38.676145:
|
669 |
+
2024-08-30 09:25:38.677552: Epoch 892
|
670 |
+
2024-08-30 09:25:38.678295: Current learning rate: 0.00135
|
671 |
+
2024-08-30 09:26:31.698332: train_loss -0.8475
|
672 |
+
2024-08-30 09:26:31.699583: val_loss -0.8362
|
673 |
+
2024-08-30 09:26:31.700533: Pseudo dice [0.8954, 0.8561, 0.8009, 0.8196, 0.8477, 0.8508, 0.8414, 0.8371, 0.8449, 0.8873, 0.924, 0.9297, 0.9266, 0.8997, 0.9062, 0.9172, 0.9063, 0.0, 0.935, nan, 0.0]
|
674 |
+
2024-08-30 09:26:31.701498: Epoch time: 53.03 s
|
675 |
+
2024-08-30 09:26:33.110759:
|
676 |
+
2024-08-30 09:26:33.111785: Epoch 893
|
677 |
+
2024-08-30 09:26:33.112508: Current learning rate: 0.00134
|
678 |
+
2024-08-30 09:27:26.704813: train_loss -0.8476
|
679 |
+
2024-08-30 09:27:26.706740: val_loss -0.8651
|
680 |
+
2024-08-30 09:27:26.707685: Pseudo dice [0.9512, 0.9608, 0.9606, 0.9644, 0.9625, 0.9649, 0.9582, 0.9636, 0.9649, 0.9654, 0.9666, 0.9673, 0.9705, 0.9697, 0.9489, 0.9124, 0.8684, nan, 0.9334, nan, nan]
|
681 |
+
2024-08-30 09:27:26.708473: Epoch time: 53.6 s
|
682 |
+
2024-08-30 09:27:28.148740:
|
683 |
+
2024-08-30 09:27:28.149570: Epoch 894
|
684 |
+
2024-08-30 09:27:28.150343: Current learning rate: 0.00133
|
685 |
+
2024-08-30 09:28:20.282127: train_loss -0.8424
|
686 |
+
2024-08-30 09:28:20.283353: val_loss -0.8536
|
687 |
+
2024-08-30 09:28:20.284453: Pseudo dice [0.9245, 0.9397, 0.9433, 0.9622, 0.9543, 0.9532, 0.9441, 0.9087, 0.8798, 0.901, 0.9302, 0.9616, 0.9584, 0.9219, 0.8654, 0.8241, 0.8262, 0.0, 0.9555, nan, 0.0]
|
688 |
+
2024-08-30 09:28:20.285292: Epoch time: 52.14 s
|
689 |
+
2024-08-30 09:28:21.591115:
|
690 |
+
2024-08-30 09:28:21.592039: Epoch 895
|
691 |
+
2024-08-30 09:28:21.592731: Current learning rate: 0.00132
|
692 |
+
2024-08-30 09:29:13.610711: train_loss -0.8371
|
693 |
+
2024-08-30 09:29:13.613045: val_loss -0.8447
|
694 |
+
2024-08-30 09:29:13.613831: Pseudo dice [0.9361, 0.9286, 0.913, 0.9208, 0.9298, 0.9371, 0.926, 0.9355, 0.9389, 0.9503, 0.9576, 0.9415, 0.9358, 0.9062, 0.8308, 0.8366, 0.8701, 0.0, 0.9601, nan, 0.0]
|
695 |
+
2024-08-30 09:29:13.614555: Epoch time: 52.02 s
|
696 |
+
2024-08-30 09:29:15.072567:
|
697 |
+
2024-08-30 09:29:15.073473: Epoch 896
|
698 |
+
2024-08-30 09:29:15.074170: Current learning rate: 0.0013
|
699 |
+
2024-08-30 09:30:05.941520: train_loss -0.8454
|
700 |
+
2024-08-30 09:30:05.942658: val_loss -0.8582
|
701 |
+
2024-08-30 09:30:05.943528: Pseudo dice [0.9268, 0.9585, 0.964, 0.9661, 0.9691, 0.9724, 0.9673, 0.9629, 0.9707, 0.9727, 0.9701, 0.9685, 0.9605, 0.942, 0.9377, 0.9236, 0.8866, 0.0, 0.9449, nan, nan]
|
702 |
+
2024-08-30 09:30:05.944242: Epoch time: 50.87 s
|
703 |
+
2024-08-30 09:30:07.385942:
|
704 |
+
2024-08-30 09:30:07.386874: Epoch 897
|
705 |
+
2024-08-30 09:30:07.387588: Current learning rate: 0.00129
|
706 |
+
2024-08-30 09:31:00.393564: train_loss -0.8407
|
707 |
+
2024-08-30 09:31:00.395521: val_loss -0.865
|
708 |
+
2024-08-30 09:31:00.396342: Pseudo dice [0.9355, 0.9507, 0.9489, 0.9609, 0.9657, 0.9578, 0.9545, 0.9639, 0.9379, 0.9462, 0.9622, 0.9518, 0.9627, 0.9566, 0.9622, 0.9657, 0.9541, 0.0, 0.9479, nan, nan]
|
709 |
+
2024-08-30 09:31:00.397204: Epoch time: 53.01 s
|
710 |
+
2024-08-30 09:31:01.742066:
|
711 |
+
2024-08-30 09:31:01.742928: Epoch 898
|
712 |
+
2024-08-30 09:31:01.743632: Current learning rate: 0.00128
|
713 |
+
2024-08-30 09:31:53.428476: train_loss -0.8454
|
714 |
+
2024-08-30 09:31:53.429614: val_loss -0.8472
|
715 |
+
2024-08-30 09:31:53.430410: Pseudo dice [0.9033, 0.9079, 0.896, 0.902, 0.9158, 0.9104, 0.915, 0.9356, 0.9496, 0.961, 0.9447, 0.933, 0.957, 0.9114, 0.8735, 0.8528, 0.8598, 0.0, 0.9625, nan, 0.0]
|
716 |
+
2024-08-30 09:31:53.431237: Epoch time: 51.69 s
|
717 |
+
2024-08-30 09:31:54.702623:
|
718 |
+
2024-08-30 09:31:54.703510: Epoch 899
|
719 |
+
2024-08-30 09:31:54.704171: Current learning rate: 0.00127
|
720 |
+
2024-08-30 09:32:46.171492: train_loss -0.8466
|
721 |
+
2024-08-30 09:32:46.173654: val_loss -0.8601
|
722 |
+
2024-08-30 09:32:46.174922: Pseudo dice [0.9211, 0.9391, 0.9364, 0.9198, 0.9132, 0.9096, 0.9293, 0.9308, 0.9449, 0.9456, 0.9465, 0.9689, 0.9747, 0.9552, 0.9235, 0.9176, 0.9041, 0.0, 0.9545, nan, nan]
|
723 |
+
2024-08-30 09:32:46.175740: Epoch time: 51.47 s
|
724 |
+
2024-08-30 09:32:50.069995:
|
725 |
+
2024-08-30 09:32:50.070864: Epoch 900
|
726 |
+
2024-08-30 09:32:50.071557: Current learning rate: 0.00126
|
727 |
+
2024-08-30 09:33:43.996930: train_loss -0.8401
|
728 |
+
2024-08-30 09:33:43.998721: val_loss -0.8438
|
729 |
+
2024-08-30 09:33:43.999696: Pseudo dice [0.9336, 0.9499, 0.9461, 0.9209, 0.8947, 0.9152, 0.9317, 0.9441, 0.9511, 0.9566, 0.9508, 0.9434, 0.9473, 0.9065, 0.8691, 0.8217, 0.7558, 0.0, 0.9578, nan, nan]
|
730 |
+
2024-08-30 09:33:44.000448: Epoch time: 53.93 s
|
731 |
+
2024-08-30 09:33:45.373267:
|
732 |
+
2024-08-30 09:33:45.374262: Epoch 901
|
733 |
+
2024-08-30 09:33:45.375159: Current learning rate: 0.00125
|
734 |
+
2024-08-30 09:34:37.210720: train_loss -0.8382
|
735 |
+
2024-08-30 09:34:37.212937: val_loss -0.8283
|
736 |
+
2024-08-30 09:34:37.213904: Pseudo dice [0.9264, 0.9371, 0.9033, 0.8907, 0.889, 0.8903, 0.8915, 0.8855, 0.9019, 0.8993, 0.8945, 0.8742, 0.8479, 0.7991, 0.7934, 0.8049, 0.7821, 0.0, 0.9344, nan, 0.0]
|
737 |
+
2024-08-30 09:34:37.215112: Epoch time: 51.84 s
|
738 |
+
2024-08-30 09:34:38.528410:
|
739 |
+
2024-08-30 09:34:38.529321: Epoch 902
|
740 |
+
2024-08-30 09:34:38.530048: Current learning rate: 0.00124
|
741 |
+
2024-08-30 09:35:29.587760: train_loss -0.8428
|
742 |
+
2024-08-30 09:35:29.588944: val_loss -0.8584
|
743 |
+
2024-08-30 09:35:29.589844: Pseudo dice [0.9253, 0.9443, 0.9539, 0.9341, 0.9249, 0.9271, 0.9274, 0.9276, 0.9304, 0.9191, 0.9291, 0.9536, 0.9517, 0.9346, 0.9329, 0.9325, 0.9259, nan, 0.9478, nan, nan]
|
744 |
+
2024-08-30 09:35:29.590628: Epoch time: 51.06 s
|
745 |
+
2024-08-30 09:35:31.090321:
|
746 |
+
2024-08-30 09:35:31.091160: Epoch 903
|
747 |
+
2024-08-30 09:35:31.091836: Current learning rate: 0.00122
|
748 |
+
2024-08-30 09:36:24.885172: train_loss -0.8496
|
749 |
+
2024-08-30 09:36:24.887835: val_loss -0.8514
|
750 |
+
2024-08-30 09:36:24.888869: Pseudo dice [0.9331, 0.9464, 0.956, 0.964, 0.9385, 0.923, 0.9325, 0.9447, 0.9396, 0.9333, 0.9441, 0.9419, 0.9493, 0.954, 0.9463, 0.9194, 0.8672, 0.0, 0.9469, nan, 0.0]
|
751 |
+
2024-08-30 09:36:24.889669: Epoch time: 53.8 s
|
752 |
+
2024-08-30 09:36:26.996505:
|
753 |
+
2024-08-30 09:36:26.997940: Epoch 904
|
754 |
+
2024-08-30 09:36:26.998565: Current learning rate: 0.00121
|
755 |
+
2024-08-30 09:37:17.406534: train_loss -0.8477
|
756 |
+
2024-08-30 09:37:17.407805: val_loss -0.8589
|
757 |
+
2024-08-30 09:37:17.408911: Pseudo dice [0.9487, 0.956, 0.948, 0.9506, 0.9625, 0.968, 0.9658, 0.9577, 0.9534, 0.9472, 0.9472, 0.9547, 0.9637, 0.9661, 0.9411, 0.906, 0.8633, 0.0, 0.9433, nan, nan]
|
758 |
+
2024-08-30 09:37:17.409878: Epoch time: 50.41 s
|
759 |
+
2024-08-30 09:37:18.711897:
|
760 |
+
2024-08-30 09:37:18.712965: Epoch 905
|
761 |
+
2024-08-30 09:37:18.713766: Current learning rate: 0.0012
|
762 |
+
2024-08-30 09:38:09.880386: train_loss -0.8408
|
763 |
+
2024-08-30 09:38:09.882540: val_loss -0.8431
|
764 |
+
2024-08-30 09:38:09.883490: Pseudo dice [0.9321, 0.9468, 0.93, 0.9346, 0.9374, 0.9305, 0.8557, 0.8492, 0.9061, 0.9543, 0.9579, 0.9045, 0.8719, 0.8568, 0.8335, 0.862, 0.8597, 0.0, 0.9552, nan, nan]
|
765 |
+
2024-08-30 09:38:09.884215: Epoch time: 51.17 s
|
766 |
+
2024-08-30 09:38:11.401536:
|
767 |
+
2024-08-30 09:38:11.402362: Epoch 906
|
768 |
+
2024-08-30 09:38:11.403127: Current learning rate: 0.00119
|
769 |
+
2024-08-30 09:39:07.168600: train_loss -0.8398
|
770 |
+
2024-08-30 09:39:07.169694: val_loss -0.8607
|
771 |
+
2024-08-30 09:39:07.170516: Pseudo dice [0.946, 0.9336, 0.9198, 0.9418, 0.9606, 0.9684, 0.9698, 0.9646, 0.9532, 0.9202, 0.8896, 0.9159, 0.9412, 0.9115, 0.9136, 0.9261, 0.9213, nan, 0.9496, nan, nan]
|
772 |
+
2024-08-30 09:39:07.171190: Epoch time: 55.77 s
|
773 |
+
2024-08-30 09:39:08.707077:
|
774 |
+
2024-08-30 09:39:08.708077: Epoch 907
|
775 |
+
2024-08-30 09:39:08.708838: Current learning rate: 0.00118
|
776 |
+
2024-08-30 09:40:01.850905: train_loss -0.8436
|
777 |
+
2024-08-30 09:40:01.852901: val_loss -0.8652
|
778 |
+
2024-08-30 09:40:01.853704: Pseudo dice [0.9414, 0.9518, 0.9357, 0.9309, 0.9494, 0.9599, 0.9594, 0.9684, 0.972, 0.9718, 0.9647, 0.9489, 0.9495, 0.9579, 0.9734, 0.9602, 0.9444, nan, 0.9474, nan, nan]
|
779 |
+
2024-08-30 09:40:01.854403: Epoch time: 53.15 s
|
780 |
+
2024-08-30 09:40:01.855169: Yayy! New best EMA pseudo Dice: 0.8798
|
781 |
+
2024-08-30 09:40:05.744240:
|
782 |
+
2024-08-30 09:40:05.745144: Epoch 908
|
783 |
+
2024-08-30 09:40:05.745888: Current learning rate: 0.00117
|
784 |
+
2024-08-30 09:40:56.679379: train_loss -0.8408
|
785 |
+
2024-08-30 09:40:56.681270: val_loss -0.8655
|
786 |
+
2024-08-30 09:40:56.682125: Pseudo dice [0.9359, 0.9569, 0.952, 0.9585, 0.9448, 0.9418, 0.9537, 0.9588, 0.9417, 0.9218, 0.9166, 0.9346, 0.9431, 0.9404, 0.9281, 0.9238, 0.913, 0.0, 0.9597, nan, nan]
|
787 |
+
2024-08-30 09:40:56.682843: Epoch time: 50.94 s
|
788 |
+
2024-08-30 09:40:56.683533: Yayy! New best EMA pseudo Dice: 0.8809
|
789 |
+
2024-08-30 09:41:00.972686:
|
790 |
+
2024-08-30 09:41:00.973612: Epoch 909
|
791 |
+
2024-08-30 09:41:00.974352: Current learning rate: 0.00116
|
792 |
+
2024-08-30 09:41:52.229133: train_loss -0.8412
|
793 |
+
2024-08-30 09:41:52.231776: val_loss -0.8727
|
794 |
+
2024-08-30 09:41:52.232930: Pseudo dice [0.918, 0.9451, 0.9422, 0.9508, 0.9556, 0.964, 0.9644, 0.9671, 0.9645, 0.9655, 0.9627, 0.9657, 0.973, 0.97, 0.9637, 0.9673, 0.9297, nan, 0.9402, nan, nan]
|
795 |
+
2024-08-30 09:41:52.234144: Epoch time: 51.26 s
|
796 |
+
2024-08-30 09:41:52.235236: Yayy! New best EMA pseudo Dice: 0.8884
|
797 |
+
2024-08-30 09:41:56.418429:
|
798 |
+
2024-08-30 09:41:56.419302: Epoch 910
|
799 |
+
2024-08-30 09:41:56.420158: Current learning rate: 0.00115
|
800 |
+
2024-08-30 09:42:49.296465: train_loss -0.8396
|
801 |
+
2024-08-30 09:42:49.297770: val_loss -0.8364
|
802 |
+
2024-08-30 09:42:49.298769: Pseudo dice [0.9266, 0.943, 0.9451, 0.9304, 0.9353, 0.9334, 0.9443, 0.9358, 0.9083, 0.9158, 0.8974, 0.8976, 0.8958, 0.8884, 0.8805, 0.8676, 0.8506, 0.0, 0.9446, nan, nan]
|
803 |
+
2024-08-30 09:42:49.299514: Epoch time: 52.88 s
|
804 |
+
2024-08-30 09:42:50.844342:
|
805 |
+
2024-08-30 09:42:50.845278: Epoch 911
|
806 |
+
2024-08-30 09:42:50.845987: Current learning rate: 0.00113
|
807 |
+
2024-08-30 09:43:43.097344: train_loss -0.8408
|
808 |
+
2024-08-30 09:43:43.099696: val_loss -0.8599
|
809 |
+
2024-08-30 09:43:43.100677: Pseudo dice [0.9409, 0.9516, 0.9496, 0.9518, 0.9491, 0.9517, 0.9611, 0.9717, 0.9486, 0.9486, 0.9627, 0.9627, 0.9714, 0.9641, 0.9298, 0.8623, 0.8033, 0.0, 0.9354, nan, nan]
|
810 |
+
2024-08-30 09:43:43.101291: Epoch time: 52.26 s
|
811 |
+
2024-08-30 09:43:44.533529:
|
812 |
+
2024-08-30 09:43:44.534385: Epoch 912
|
813 |
+
2024-08-30 09:43:44.535038: Current learning rate: 0.00112
|
814 |
+
2024-08-30 09:44:34.996822: train_loss -0.8517
|
815 |
+
2024-08-30 09:44:34.998015: val_loss -0.8617
|
816 |
+
2024-08-30 09:44:34.998905: Pseudo dice [0.943, 0.9519, 0.9501, 0.9613, 0.9655, 0.9678, 0.9643, 0.9611, 0.9606, 0.9492, 0.942, 0.9565, 0.9475, 0.9256, 0.9272, 0.9134, 0.8621, 0.0, 0.9564, nan, nan]
|
817 |
+
2024-08-30 09:44:34.999667: Epoch time: 50.47 s
|
818 |
+
2024-08-30 09:44:36.408237:
|
819 |
+
2024-08-30 09:44:36.409230: Epoch 913
|
820 |
+
2024-08-30 09:44:36.409955: Current learning rate: 0.00111
|
821 |
+
2024-08-30 09:45:28.878945: train_loss -0.8486
|
822 |
+
2024-08-30 09:45:28.881683: val_loss -0.8403
|
823 |
+
2024-08-30 09:45:28.882527: Pseudo dice [0.9299, 0.9509, 0.9427, 0.9301, 0.9125, 0.8868, 0.8903, 0.8965, 0.8822, 0.8931, 0.9089, 0.9366, 0.9198, 0.9021, 0.9141, 0.9048, 0.8939, 0.0, 0.9509, nan, 0.0]
|
824 |
+
2024-08-30 09:45:28.883183: Epoch time: 52.47 s
|
825 |
+
2024-08-30 09:45:30.225083:
|
826 |
+
2024-08-30 09:45:30.225914: Epoch 914
|
827 |
+
2024-08-30 09:45:30.226596: Current learning rate: 0.0011
|
828 |
+
2024-08-30 09:46:21.714068: train_loss -0.8507
|
829 |
+
2024-08-30 09:46:21.715301: val_loss -0.8477
|
830 |
+
2024-08-30 09:46:21.716247: Pseudo dice [0.93, 0.9427, 0.9337, 0.9243, 0.9162, 0.9224, 0.924, 0.8984, 0.8946, 0.9114, 0.8878, 0.8954, 0.9646, 0.9548, 0.9262, 0.9193, 0.9156, 0.0, 0.9416, nan, 0.0]
|
831 |
+
2024-08-30 09:46:21.717107: Epoch time: 51.49 s
|
832 |
+
2024-08-30 09:46:23.155884:
|
833 |
+
2024-08-30 09:46:23.156791: Epoch 915
|
834 |
+
2024-08-30 09:46:23.157521: Current learning rate: 0.00109
|
835 |
+
2024-08-30 09:47:17.947239: train_loss -0.8474
|
836 |
+
2024-08-30 09:47:17.949703: val_loss -0.8591
|
837 |
+
2024-08-30 09:47:17.950529: Pseudo dice [0.9407, 0.9571, 0.9483, 0.947, 0.9568, 0.9628, 0.9556, 0.9411, 0.9132, 0.9306, 0.9651, 0.969, 0.9726, 0.9683, 0.9356, 0.8999, 0.8389, 0.0, 0.9438, nan, nan]
|
838 |
+
2024-08-30 09:47:17.951321: Epoch time: 54.79 s
|
839 |
+
2024-08-30 09:47:20.035912:
|
840 |
+
2024-08-30 09:47:20.064198: Epoch 916
|
841 |
+
2024-08-30 09:47:20.108490: Current learning rate: 0.00108
|
842 |
+
2024-08-30 09:48:12.034337: train_loss -0.8468
|
843 |
+
2024-08-30 09:48:12.035556: val_loss -0.8684
|
844 |
+
2024-08-30 09:48:12.036769: Pseudo dice [0.9457, 0.9523, 0.9467, 0.9505, 0.9529, 0.9644, 0.9497, 0.9333, 0.934, 0.9413, 0.9452, 0.9597, 0.9665, 0.9701, 0.9652, 0.9567, 0.9102, 0.0, 0.9573, nan, 0.0]
|
845 |
+
2024-08-30 09:48:12.037589: Epoch time: 52.0 s
|
846 |
+
2024-08-30 09:48:13.400879:
|
847 |
+
2024-08-30 09:48:13.401742: Epoch 917
|
848 |
+
2024-08-30 09:48:13.402465: Current learning rate: 0.00106
|
849 |
+
2024-08-30 09:49:05.424017: train_loss -0.842
|
850 |
+
2024-08-30 09:49:05.426039: val_loss -0.8653
|
851 |
+
2024-08-30 09:49:05.426991: Pseudo dice [0.923, 0.9549, 0.9529, 0.9535, 0.9501, 0.9481, 0.9586, 0.9605, 0.9647, 0.9662, 0.9659, 0.9611, 0.9658, 0.9694, 0.959, 0.9398, 0.9007, 0.0, 0.9568, nan, nan]
|
852 |
+
2024-08-30 09:49:05.427838: Epoch time: 52.03 s
|
853 |
+
2024-08-30 09:49:06.859180:
|
854 |
+
2024-08-30 09:49:06.860131: Epoch 918
|
855 |
+
2024-08-30 09:49:06.860854: Current learning rate: 0.00105
|
856 |
+
2024-08-30 09:49:59.689979: train_loss -0.8431
|
857 |
+
2024-08-30 09:49:59.692385: val_loss -0.8596
|
858 |
+
2024-08-30 09:49:59.693380: Pseudo dice [0.9444, 0.9573, 0.9579, 0.9557, 0.9212, 0.9125, 0.9291, 0.945, 0.961, 0.9641, 0.955, 0.9588, 0.9684, 0.9568, 0.9043, 0.889, 0.8703, 0.0, 0.9561, nan, nan]
|
859 |
+
2024-08-30 09:49:59.694192: Epoch time: 52.83 s
|
860 |
+
2024-08-30 09:50:01.139813:
|
861 |
+
2024-08-30 09:50:01.140732: Epoch 919
|
862 |
+
2024-08-30 09:50:01.141433: Current learning rate: 0.00104
|
863 |
+
2024-08-30 09:50:52.872537: train_loss -0.8485
|
864 |
+
2024-08-30 09:50:52.874662: val_loss -0.8641
|
865 |
+
2024-08-30 09:50:52.875826: Pseudo dice [0.9312, 0.9414, 0.9545, 0.9463, 0.9217, 0.9152, 0.9335, 0.9404, 0.9481, 0.9544, 0.9634, 0.9661, 0.9726, 0.9641, 0.9549, 0.9482, 0.9081, 0.0, 0.9539, nan, nan]
|
866 |
+
2024-08-30 09:50:52.876757: Epoch time: 51.74 s
|
867 |
+
2024-08-30 09:50:54.344548:
|
868 |
+
2024-08-30 09:50:54.345468: Epoch 920
|
869 |
+
2024-08-30 09:50:54.346212: Current learning rate: 0.00103
|
870 |
+
2024-08-30 09:51:46.919124: train_loss -0.8516
|
871 |
+
2024-08-30 09:51:47.050050: val_loss -0.8514
|
872 |
+
2024-08-30 09:51:47.141948: Pseudo dice [0.9304, 0.9619, 0.9617, 0.9574, 0.953, 0.9654, 0.9728, 0.9746, 0.9669, 0.9523, 0.9483, 0.9447, 0.9217, 0.8759, 0.8593, 0.8695, 0.857, 0.0, 0.946, nan, nan]
|
873 |
+
2024-08-30 09:51:47.169266: Epoch time: 52.58 s
|
874 |
+
2024-08-30 09:51:48.569705:
|
875 |
+
2024-08-30 09:51:48.589292: Epoch 921
|
876 |
+
2024-08-30 09:51:48.589994: Current learning rate: 0.00102
|
877 |
+
2024-08-30 09:52:41.788446: train_loss -0.8457
|
878 |
+
2024-08-30 09:52:41.790469: val_loss -0.8683
|
879 |
+
2024-08-30 09:52:41.791498: Pseudo dice [0.9368, 0.955, 0.9573, 0.9629, 0.9651, 0.9514, 0.9302, 0.9205, 0.9281, 0.9265, 0.9274, 0.9183, 0.9399, 0.946, 0.9521, 0.9707, 0.9617, nan, 0.9598, nan, nan]
|
880 |
+
2024-08-30 09:52:41.792266: Epoch time: 53.22 s
|
881 |
+
2024-08-30 09:52:43.170865:
|
882 |
+
2024-08-30 09:52:43.171814: Epoch 922
|
883 |
+
2024-08-30 09:52:43.172486: Current learning rate: 0.00101
|
884 |
+
2024-08-30 09:53:36.126686: train_loss -0.8445
|
885 |
+
2024-08-30 09:53:36.127753: val_loss -0.8666
|
886 |
+
2024-08-30 09:53:36.128754: Pseudo dice [0.9288, 0.9428, 0.9474, 0.9483, 0.9434, 0.9349, 0.9364, 0.9442, 0.9444, 0.939, 0.939, 0.9428, 0.955, 0.967, 0.9688, 0.9616, 0.9327, 0.0, 0.951, nan, nan]
|
887 |
+
2024-08-30 09:53:36.129586: Epoch time: 52.96 s
|
888 |
+
2024-08-30 09:53:36.130816: Yayy! New best EMA pseudo Dice: 0.8885
|
889 |
+
2024-08-30 09:53:39.801655:
|
890 |
+
2024-08-30 09:53:39.802550: Epoch 923
|
891 |
+
2024-08-30 09:53:39.803259: Current learning rate: 0.001
|
892 |
+
2024-08-30 09:54:33.398716: train_loss -0.8484
|
893 |
+
2024-08-30 09:54:33.401361: val_loss -0.8515
|
894 |
+
2024-08-30 09:54:33.402522: Pseudo dice [0.8997, 0.9143, 0.9175, 0.9186, 0.9427, 0.9346, 0.9287, 0.9462, 0.9505, 0.9428, 0.9277, 0.9285, 0.8983, 0.8973, 0.9088, 0.9193, 0.9334, nan, 0.9575, nan, 0.0]
|
895 |
+
2024-08-30 09:54:33.403441: Epoch time: 53.6 s
|
896 |
+
2024-08-30 09:54:34.808954:
|
897 |
+
2024-08-30 09:54:34.809764: Epoch 924
|
898 |
+
2024-08-30 09:54:34.810424: Current learning rate: 0.00098
|
899 |
+
2024-08-30 09:55:28.088682: train_loss -0.8511
|
900 |
+
2024-08-30 09:55:28.090126: val_loss -0.8449
|
901 |
+
2024-08-30 09:55:28.091139: Pseudo dice [0.9267, 0.9525, 0.954, 0.9556, 0.967, 0.9433, 0.923, 0.9156, 0.909, 0.916, 0.9098, 0.9217, 0.9491, 0.9631, 0.9363, 0.8794, 0.8242, 0.0, 0.9476, nan, nan]
|
902 |
+
2024-08-30 09:55:28.091930: Epoch time: 53.28 s
|
903 |
+
2024-08-30 09:55:29.546050:
|
904 |
+
2024-08-30 09:55:29.546935: Epoch 925
|
905 |
+
2024-08-30 09:55:29.547622: Current learning rate: 0.00097
|
906 |
+
2024-08-30 09:56:20.578186: train_loss -0.8467
|
907 |
+
2024-08-30 09:56:20.580002: val_loss -0.8509
|
908 |
+
2024-08-30 09:56:20.580908: Pseudo dice [0.897, 0.9127, 0.9111, 0.9206, 0.9238, 0.9235, 0.9344, 0.9569, 0.9414, 0.9223, 0.9145, 0.9139, 0.9645, 0.9502, 0.9306, 0.9167, 0.875, 0.0, 0.9509, nan, 0.0]
|
909 |
+
2024-08-30 09:56:20.581635: Epoch time: 51.04 s
|
910 |
+
2024-08-30 09:56:22.278639:
|
911 |
+
2024-08-30 09:56:22.279523: Epoch 926
|
912 |
+
2024-08-30 09:56:22.280306: Current learning rate: 0.00096
|
913 |
+
2024-08-30 09:57:17.439591: train_loss -0.8471
|
914 |
+
2024-08-30 09:57:17.441475: val_loss -0.858
|
915 |
+
2024-08-30 09:57:17.442336: Pseudo dice [0.9342, 0.9531, 0.955, 0.9623, 0.9609, 0.9706, 0.9524, 0.9177, 0.9257, 0.9353, 0.9205, 0.9225, 0.9538, 0.947, 0.9409, 0.9246, 0.9254, 0.0, 0.9634, nan, 0.0]
|
916 |
+
2024-08-30 09:57:17.443338: Epoch time: 55.16 s
|
917 |
+
2024-08-30 09:57:18.847193:
|
918 |
+
2024-08-30 09:57:18.848045: Epoch 927
|
919 |
+
2024-08-30 09:57:18.848760: Current learning rate: 0.00095
|
920 |
+
2024-08-30 09:58:12.705588: train_loss -0.835
|
921 |
+
2024-08-30 09:58:12.707983: val_loss -0.8488
|
922 |
+
2024-08-30 09:58:12.709411: Pseudo dice [0.9369, 0.9562, 0.9525, 0.9439, 0.9239, 0.9238, 0.933, 0.9301, 0.9272, 0.936, 0.9399, 0.9489, 0.9658, 0.937, 0.8944, 0.8545, 0.8314, 0.0, 0.937, nan, nan]
|
923 |
+
2024-08-30 09:58:12.710254: Epoch time: 53.86 s
|
924 |
+
2024-08-30 09:58:14.085509:
|
925 |
+
2024-08-30 09:58:14.086400: Epoch 928
|
926 |
+
2024-08-30 09:58:14.087110: Current learning rate: 0.00094
|
927 |
+
2024-08-30 09:59:08.547267: train_loss -0.8446
|
928 |
+
2024-08-30 09:59:08.548403: val_loss -0.8622
|
929 |
+
2024-08-30 09:59:08.549146: Pseudo dice [0.9348, 0.9537, 0.9508, 0.9588, 0.9615, 0.9586, 0.957, 0.9564, 0.9631, 0.9582, 0.9505, 0.9516, 0.9662, 0.941, 0.9272, 0.9393, 0.9324, 0.0, 0.9551, nan, nan]
|
930 |
+
2024-08-30 09:59:08.549765: Epoch time: 54.46 s
|
931 |
+
2024-08-30 09:59:10.562996:
|
932 |
+
2024-08-30 09:59:10.563841: Epoch 929
|
933 |
+
2024-08-30 09:59:10.564563: Current learning rate: 0.00092
|
934 |
+
2024-08-30 10:00:07.417935: train_loss -0.8513
|
935 |
+
2024-08-30 10:00:07.420214: val_loss -0.847
|
936 |
+
2024-08-30 10:00:07.421000: Pseudo dice [0.9407, 0.9197, 0.9079, 0.9191, 0.9232, 0.9127, 0.9074, 0.9006, 0.8912, 0.9228, 0.9414, 0.9477, 0.9045, 0.8925, 0.8886, 0.859, 0.8429, 0.0, 0.952, nan, 0.0]
|
937 |
+
2024-08-30 10:00:07.421667: Epoch time: 56.86 s
|
938 |
+
2024-08-30 10:00:08.787585:
|
939 |
+
2024-08-30 10:00:08.788558: Epoch 930
|
940 |
+
2024-08-30 10:00:08.789189: Current learning rate: 0.00091
|
941 |
+
2024-08-30 10:01:01.196343: train_loss -0.8495
|
942 |
+
2024-08-30 10:01:01.197545: val_loss -0.8592
|
943 |
+
2024-08-30 10:01:01.198351: Pseudo dice [0.9444, 0.9518, 0.9453, 0.9306, 0.9506, 0.9457, 0.9509, 0.9376, 0.9328, 0.9368, 0.9286, 0.9335, 0.9487, 0.9432, 0.9172, 0.9221, 0.905, 0.0, 0.9579, nan, nan]
|
944 |
+
2024-08-30 10:01:01.199045: Epoch time: 52.41 s
|
945 |
+
2024-08-30 10:01:02.768086:
|
946 |
+
2024-08-30 10:01:02.768924: Epoch 931
|
947 |
+
2024-08-30 10:01:02.769610: Current learning rate: 0.0009
|
948 |
+
2024-08-30 10:01:55.521782: train_loss -0.8537
|
949 |
+
2024-08-30 10:01:55.524310: val_loss -0.8603
|
950 |
+
2024-08-30 10:01:55.525145: Pseudo dice [0.9412, 0.9493, 0.9577, 0.9637, 0.9387, 0.9095, 0.9091, 0.9166, 0.9083, 0.9041, 0.9196, 0.9138, 0.9356, 0.9538, 0.944, 0.9366, 0.9278, nan, 0.9592, nan, nan]
|
951 |
+
2024-08-30 10:01:55.526204: Epoch time: 52.76 s
|
952 |
+
2024-08-30 10:01:56.917265:
|
953 |
+
2024-08-30 10:01:56.918360: Epoch 932
|
954 |
+
2024-08-30 10:01:56.919020: Current learning rate: 0.00089
|
955 |
+
2024-08-30 10:02:50.055418: train_loss -0.8415
|
956 |
+
2024-08-30 10:02:50.056636: val_loss -0.8556
|
957 |
+
2024-08-30 10:02:50.057668: Pseudo dice [0.9323, 0.9378, 0.9381, 0.9547, 0.957, 0.9645, 0.963, 0.9492, 0.9467, 0.9584, 0.9704, 0.9722, 0.9571, 0.9301, 0.8894, 0.8112, 0.7112, 0.0, 0.9598, nan, nan]
|
958 |
+
2024-08-30 10:02:50.058403: Epoch time: 53.14 s
|
959 |
+
2024-08-30 10:02:51.538301:
|
960 |
+
2024-08-30 10:02:51.539323: Epoch 933
|
961 |
+
2024-08-30 10:02:51.540074: Current learning rate: 0.00088
|
962 |
+
2024-08-30 10:03:42.849202: train_loss -0.8494
|
963 |
+
2024-08-30 10:03:42.851120: val_loss -0.8589
|
964 |
+
2024-08-30 10:03:42.851995: Pseudo dice [0.9421, 0.9578, 0.952, 0.9497, 0.9338, 0.9144, 0.9034, 0.9141, 0.9011, 0.8926, 0.8806, 0.8981, 0.9358, 0.9459, 0.95, 0.9464, 0.9092, 0.0, 0.9503, nan, 0.0]
|
965 |
+
2024-08-30 10:03:42.852779: Epoch time: 51.31 s
|
966 |
+
2024-08-30 10:03:44.188414:
|
967 |
+
2024-08-30 10:03:44.189330: Epoch 934
|
968 |
+
2024-08-30 10:03:44.190004: Current learning rate: 0.00087
|
969 |
+
2024-08-30 10:04:36.412654: train_loss -0.8525
|
970 |
+
2024-08-30 10:04:36.414294: val_loss -0.8557
|
971 |
+
2024-08-30 10:04:36.415700: Pseudo dice [0.898, 0.9104, 0.9188, 0.9263, 0.9027, 0.9003, 0.9326, 0.9562, 0.9608, 0.9633, 0.9462, 0.9462, 0.9568, 0.9351, 0.9318, 0.925, 0.8876, 0.0, 0.9538, nan, nan]
|
972 |
+
2024-08-30 10:04:36.416319: Epoch time: 52.23 s
|
973 |
+
2024-08-30 10:04:38.010796:
|
974 |
+
2024-08-30 10:04:38.011621: Epoch 935
|
975 |
+
2024-08-30 10:04:38.012287: Current learning rate: 0.00085
|
976 |
+
2024-08-30 10:05:31.319292: train_loss -0.845
|
977 |
+
2024-08-30 10:05:31.321297: val_loss -0.8617
|
978 |
+
2024-08-30 10:05:31.322178: Pseudo dice [0.9201, 0.9441, 0.9469, 0.9533, 0.9553, 0.9651, 0.9658, 0.9542, 0.9534, 0.953, 0.9597, 0.9705, 0.9708, 0.9728, 0.9694, 0.9424, 0.8878, 0.0, 0.9479, nan, nan]
|
979 |
+
2024-08-30 10:05:31.322860: Epoch time: 53.31 s
|
980 |
+
2024-08-30 10:05:32.736392:
|
981 |
+
2024-08-30 10:05:32.737269: Epoch 936
|
982 |
+
2024-08-30 10:05:32.737977: Current learning rate: 0.00084
|
983 |
+
2024-08-30 10:06:25.349432: train_loss -0.8485
|
984 |
+
2024-08-30 10:06:25.350676: val_loss -0.8688
|
985 |
+
2024-08-30 10:06:25.351754: Pseudo dice [0.9399, 0.9506, 0.9487, 0.9533, 0.9548, 0.9631, 0.9571, 0.9414, 0.9548, 0.9739, 0.9745, 0.9736, 0.9703, 0.9209, 0.9098, 0.9307, 0.9211, 0.0, 0.9639, nan, nan]
|
986 |
+
2024-08-30 10:06:25.352515: Epoch time: 52.62 s
|
987 |
+
2024-08-30 10:06:27.018242:
|
988 |
+
2024-08-30 10:06:27.019112: Epoch 937
|
989 |
+
2024-08-30 10:06:27.019696: Current learning rate: 0.00083
|
990 |
+
2024-08-30 10:07:17.556547: train_loss -0.8495
|
991 |
+
2024-08-30 10:07:17.560977: val_loss -0.8565
|
992 |
+
2024-08-30 10:07:17.561835: Pseudo dice [0.9357, 0.9529, 0.9492, 0.9554, 0.9551, 0.9555, 0.9568, 0.9557, 0.954, 0.9616, 0.9713, 0.9707, 0.9661, 0.9526, 0.9377, 0.9244, 0.875, 0.0, 0.9574, nan, nan]
|
993 |
+
2024-08-30 10:07:17.562643: Epoch time: 50.54 s
|
994 |
+
2024-08-30 10:07:19.018479:
|
995 |
+
2024-08-30 10:07:19.019373: Epoch 938
|
996 |
+
2024-08-30 10:07:19.020112: Current learning rate: 0.00082
|
997 |
+
2024-08-30 10:08:10.795385: train_loss -0.8497
|
998 |
+
2024-08-30 10:08:10.796620: val_loss -0.8659
|
999 |
+
2024-08-30 10:08:10.797538: Pseudo dice [0.947, 0.9601, 0.9543, 0.9229, 0.8806, 0.8887, 0.902, 0.9312, 0.9388, 0.9307, 0.9311, 0.9564, 0.975, 0.9585, 0.9433, 0.9293, 0.9085, 0.0, 0.962, nan, nan]
|
1000 |
+
2024-08-30 10:08:10.798293: Epoch time: 51.78 s
|
1001 |
+
2024-08-30 10:08:12.109109:
|
1002 |
+
2024-08-30 10:08:12.110064: Epoch 939
|
1003 |
+
2024-08-30 10:08:12.111285: Current learning rate: 0.00081
|
1004 |
+
2024-08-30 10:09:05.459191: train_loss -0.8513
|
1005 |
+
2024-08-30 10:09:05.461498: val_loss -0.8631
|
1006 |
+
2024-08-30 10:09:05.462358: Pseudo dice [0.9396, 0.9531, 0.9499, 0.9481, 0.9592, 0.9571, 0.9458, 0.9453, 0.9477, 0.9393, 0.9074, 0.9308, 0.9717, 0.9697, 0.9644, 0.9672, 0.9457, 0.0, 0.9591, nan, nan]
|
1007 |
+
2024-08-30 10:09:05.463133: Epoch time: 53.35 s
|
1008 |
+
2024-08-30 10:09:06.766939:
|
1009 |
+
2024-08-30 10:09:06.767855: Epoch 940
|
1010 |
+
2024-08-30 10:09:06.768591: Current learning rate: 0.00079
|
1011 |
+
2024-08-30 10:10:00.599563: train_loss -0.8483
|
1012 |
+
2024-08-30 10:10:00.600801: val_loss -0.8512
|
1013 |
+
2024-08-30 10:10:00.601717: Pseudo dice [0.9324, 0.9517, 0.9488, 0.9508, 0.9469, 0.9442, 0.9477, 0.9416, 0.9403, 0.9358, 0.9401, 0.9398, 0.9471, 0.9298, 0.9133, 0.9053, 0.8635, 0.0, 0.9542, nan, nan]
|
1014 |
+
2024-08-30 10:10:00.602605: Epoch time: 53.84 s
|
1015 |
+
2024-08-30 10:10:01.960508:
|
1016 |
+
2024-08-30 10:10:01.961484: Epoch 941
|
1017 |
+
2024-08-30 10:10:01.963042: Current learning rate: 0.00078
|
1018 |
+
2024-08-30 10:10:55.619714: train_loss -0.8418
|
1019 |
+
2024-08-30 10:10:55.621739: val_loss -0.8536
|
1020 |
+
2024-08-30 10:10:55.623024: Pseudo dice [0.9313, 0.9371, 0.9095, 0.9275, 0.9525, 0.9471, 0.9326, 0.9257, 0.9455, 0.9546, 0.9468, 0.9524, 0.9538, 0.9208, 0.8967, 0.9093, 0.9048, 0.0, 0.9623, nan, nan]
|
1021 |
+
2024-08-30 10:10:55.623796: Epoch time: 53.66 s
|
1022 |
+
2024-08-30 10:10:57.169628:
|
1023 |
+
2024-08-30 10:10:57.170497: Epoch 942
|
1024 |
+
2024-08-30 10:10:57.171117: Current learning rate: 0.00077
|
1025 |
+
2024-08-30 10:11:50.687285: train_loss -0.8446
|
1026 |
+
2024-08-30 10:11:50.689352: val_loss -0.8614
|
1027 |
+
2024-08-30 10:11:50.690510: Pseudo dice [0.9353, 0.9151, 0.8989, 0.9208, 0.9384, 0.9407, 0.9502, 0.9572, 0.9437, 0.9392, 0.9362, 0.9302, 0.9763, 0.9756, 0.9776, 0.9761, 0.9484, nan, 0.9563, nan, 0.0]
|
1028 |
+
2024-08-30 10:11:50.691401: Epoch time: 53.52 s
|
1029 |
+
2024-08-30 10:11:52.028788:
|
1030 |
+
2024-08-30 10:11:52.029699: Epoch 943
|
1031 |
+
2024-08-30 10:11:52.030868: Current learning rate: 0.00076
|
1032 |
+
2024-08-30 10:12:44.592405: train_loss -0.8482
|
1033 |
+
2024-08-30 10:12:44.594781: val_loss -0.8415
|
1034 |
+
2024-08-30 10:12:44.595926: Pseudo dice [0.936, 0.9144, 0.8553, 0.8094, 0.8132, 0.8317, 0.8481, 0.867, 0.8431, 0.8296, 0.8696, 0.9108, 0.9678, 0.9529, 0.9487, 0.9601, 0.9371, 0.0, 0.9445, nan, 0.0]
|
1035 |
+
2024-08-30 10:12:44.597034: Epoch time: 52.57 s
|
1036 |
+
2024-08-30 10:12:45.926375:
|
1037 |
+
2024-08-30 10:12:45.927188: Epoch 944
|
1038 |
+
2024-08-30 10:12:45.927808: Current learning rate: 0.00075
|
1039 |
+
2024-08-30 10:13:38.911172: train_loss -0.8484
|
1040 |
+
2024-08-30 10:13:38.912382: val_loss -0.8621
|
1041 |
+
2024-08-30 10:13:38.913141: Pseudo dice [0.942, 0.9591, 0.9562, 0.9496, 0.9505, 0.9673, 0.9708, 0.9698, 0.9483, 0.9332, 0.9397, 0.9332, 0.9444, 0.9698, 0.9592, 0.9467, 0.9292, 0.0, 0.9606, nan, nan]
|
1042 |
+
2024-08-30 10:13:38.913756: Epoch time: 52.99 s
|
1043 |
+
2024-08-30 10:13:40.294965:
|
1044 |
+
2024-08-30 10:13:40.295834: Epoch 945
|
1045 |
+
2024-08-30 10:13:40.296935: Current learning rate: 0.00074
|
1046 |
+
2024-08-30 10:14:31.573598: train_loss -0.8465
|
1047 |
+
2024-08-30 10:14:31.575586: val_loss -0.8651
|
1048 |
+
2024-08-30 10:14:31.576441: Pseudo dice [0.9389, 0.9581, 0.9415, 0.9293, 0.9294, 0.9374, 0.9554, 0.9534, 0.9519, 0.9529, 0.9394, 0.937, 0.9609, 0.9627, 0.9668, 0.9657, 0.9276, 0.0, 0.9481, nan, nan]
|
1049 |
+
2024-08-30 10:14:31.577214: Epoch time: 51.28 s
|
1050 |
+
2024-08-30 10:14:32.872183:
|
1051 |
+
2024-08-30 10:14:32.873069: Epoch 946
|
1052 |
+
2024-08-30 10:14:32.873809: Current learning rate: 0.00072
|
1053 |
+
2024-08-30 10:15:23.738815: train_loss -0.8518
|
1054 |
+
2024-08-30 10:15:23.740160: val_loss -0.8489
|
1055 |
+
2024-08-30 10:15:23.741539: Pseudo dice [0.9433, 0.9538, 0.9368, 0.9441, 0.9668, 0.9477, 0.9269, 0.9266, 0.9073, 0.8988, 0.9023, 0.8886, 0.9207, 0.9466, 0.9653, 0.9378, 0.8964, 0.0, 0.9341, nan, nan]
|
1056 |
+
2024-08-30 10:15:23.742865: Epoch time: 50.87 s
|
1057 |
+
2024-08-30 10:15:25.223688:
|
1058 |
+
2024-08-30 10:15:25.225006: Epoch 947
|
1059 |
+
2024-08-30 10:15:25.225703: Current learning rate: 0.00071
|
1060 |
+
2024-08-30 10:16:17.235266: train_loss -0.8482
|
1061 |
+
2024-08-30 10:16:17.237830: val_loss -0.8527
|
1062 |
+
2024-08-30 10:16:17.238741: Pseudo dice [0.9489, 0.9583, 0.9581, 0.9618, 0.9507, 0.9305, 0.9245, 0.9277, 0.9346, 0.9362, 0.9359, 0.9237, 0.95, 0.9412, 0.9226, 0.9141, 0.8771, 0.0, 0.9424, nan, 0.0]
|
1063 |
+
2024-08-30 10:16:17.239523: Epoch time: 52.01 s
|
1064 |
+
2024-08-30 10:16:18.688260:
|
1065 |
+
2024-08-30 10:16:18.689226: Epoch 948
|
1066 |
+
2024-08-30 10:16:18.689977: Current learning rate: 0.0007
|
1067 |
+
2024-08-30 10:17:10.591592: train_loss -0.8459
|
1068 |
+
2024-08-30 10:17:10.592764: val_loss -0.8723
|
1069 |
+
2024-08-30 10:17:10.593572: Pseudo dice [0.9477, 0.9599, 0.9539, 0.961, 0.9535, 0.9472, 0.9611, 0.9546, 0.9446, 0.9533, 0.9679, 0.9669, 0.972, 0.9611, 0.9489, 0.9493, 0.9316, nan, 0.9558, nan, nan]
|
1070 |
+
2024-08-30 10:17:10.595065: Epoch time: 51.91 s
|
1071 |
+
2024-08-30 10:17:12.050292:
|
1072 |
+
2024-08-30 10:17:12.051152: Epoch 949
|
1073 |
+
2024-08-30 10:17:12.051842: Current learning rate: 0.00069
|
1074 |
+
2024-08-30 10:18:04.503434: train_loss -0.8511
|
1075 |
+
2024-08-30 10:18:04.505441: val_loss -0.8623
|
1076 |
+
2024-08-30 10:18:04.506627: Pseudo dice [0.9348, 0.9528, 0.9361, 0.9381, 0.94, 0.9376, 0.953, 0.9522, 0.9423, 0.9401, 0.9352, 0.9533, 0.9705, 0.9692, 0.9558, 0.9346, 0.8951, 0.0, 0.9522, nan, nan]
|
1077 |
+
2024-08-30 10:18:04.507421: Epoch time: 52.46 s
|
1078 |
+
2024-08-30 10:18:08.332660:
|
1079 |
+
2024-08-30 10:18:08.333610: Epoch 950
|
1080 |
+
2024-08-30 10:18:08.334377: Current learning rate: 0.00067
|
1081 |
+
2024-08-30 10:18:58.365385: train_loss -0.8504
|
1082 |
+
2024-08-30 10:18:58.366978: val_loss -0.8706
|
1083 |
+
2024-08-30 10:18:58.367857: Pseudo dice [0.9368, 0.9528, 0.955, 0.9555, 0.957, 0.9679, 0.9657, 0.9605, 0.9534, 0.9609, 0.972, 0.971, 0.9625, 0.9421, 0.9312, 0.943, 0.9552, nan, 0.9642, nan, nan]
|
1084 |
+
2024-08-30 10:18:58.368901: Epoch time: 50.04 s
|
1085 |
+
2024-08-30 10:18:58.370017: Yayy! New best EMA pseudo Dice: 0.8934
|
1086 |
+
2024-08-30 10:19:02.179308:
|
1087 |
+
2024-08-30 10:19:02.180123: Epoch 951
|
1088 |
+
2024-08-30 10:19:02.180810: Current learning rate: 0.00066
|
1089 |
+
2024-08-30 10:19:54.884330: train_loss -0.8495
|
1090 |
+
2024-08-30 10:19:54.886884: val_loss -0.8577
|
1091 |
+
2024-08-30 10:19:54.888390: Pseudo dice [0.9374, 0.9279, 0.9047, 0.9259, 0.9199, 0.9153, 0.914, 0.9093, 0.8888, 0.896, 0.9074, 0.9266, 0.9699, 0.9726, 0.9751, 0.9716, 0.9423, 0.0, 0.9379, nan, 0.0]
|
1092 |
+
2024-08-30 10:19:54.889423: Epoch time: 52.71 s
|
1093 |
+
2024-08-30 10:19:56.320306:
|
1094 |
+
2024-08-30 10:19:56.321118: Epoch 952
|
1095 |
+
2024-08-30 10:19:56.321761: Current learning rate: 0.00065
|
1096 |
+
2024-08-30 10:20:50.129279: train_loss -0.8535
|
1097 |
+
2024-08-30 10:20:50.132541: val_loss -0.8661
|
1098 |
+
2024-08-30 10:20:50.133902: Pseudo dice [0.9408, 0.9576, 0.9585, 0.9653, 0.9425, 0.9266, 0.93, 0.9352, 0.9346, 0.9343, 0.9364, 0.9454, 0.9632, 0.9652, 0.9092, 0.865, 0.8257, 0.0, 0.9382, nan, nan]
|
1099 |
+
2024-08-30 10:20:50.135259: Epoch time: 53.81 s
|
1100 |
+
2024-08-30 10:20:51.537604:
|
1101 |
+
2024-08-30 10:20:51.538487: Epoch 953
|
1102 |
+
2024-08-30 10:20:51.539279: Current learning rate: 0.00064
|
1103 |
+
2024-08-30 10:21:45.772618: train_loss -0.8468
|
1104 |
+
2024-08-30 10:21:45.774719: val_loss -0.8624
|
1105 |
+
2024-08-30 10:21:45.775752: Pseudo dice [0.9431, 0.9498, 0.9357, 0.9299, 0.9372, 0.9245, 0.928, 0.9424, 0.9433, 0.946, 0.9426, 0.9343, 0.9528, 0.9536, 0.9578, 0.9525, 0.9319, 0.0, 0.9445, nan, 0.0]
|
1106 |
+
2024-08-30 10:21:45.776482: Epoch time: 54.24 s
|
1107 |
+
2024-08-30 10:21:47.152239:
|
1108 |
+
2024-08-30 10:21:47.153165: Epoch 954
|
1109 |
+
2024-08-30 10:21:47.153802: Current learning rate: 0.00063
|
1110 |
+
2024-08-30 10:22:37.036758: train_loss -0.8478
|
1111 |
+
2024-08-30 10:22:37.038927: val_loss -0.8647
|
1112 |
+
2024-08-30 10:22:37.040422: Pseudo dice [0.9471, 0.9597, 0.9566, 0.961, 0.9667, 0.9687, 0.9667, 0.9666, 0.972, 0.9698, 0.9631, 0.9627, 0.9761, 0.9688, 0.9384, 0.8997, 0.8372, 0.0, 0.9544, nan, nan]
|
1113 |
+
2024-08-30 10:22:37.042530: Epoch time: 49.89 s
|
1114 |
+
2024-08-30 10:22:39.197179:
|
1115 |
+
2024-08-30 10:22:39.198064: Epoch 955
|
1116 |
+
2024-08-30 10:22:39.198757: Current learning rate: 0.00061
|
1117 |
+
2024-08-30 10:23:32.983045: train_loss -0.8514
|
1118 |
+
2024-08-30 10:23:32.984947: val_loss -0.8497
|
1119 |
+
2024-08-30 10:23:32.985809: Pseudo dice [0.9417, 0.9458, 0.9277, 0.9272, 0.9095, 0.8993, 0.8906, 0.8902, 0.8748, 0.8802, 0.8889, 0.9122, 0.9483, 0.9236, 0.9233, 0.9228, 0.8949, 0.0, 0.9582, nan, nan]
|
1120 |
+
2024-08-30 10:23:32.986533: Epoch time: 53.79 s
|
1121 |
+
2024-08-30 10:23:34.411091:
|
1122 |
+
2024-08-30 10:23:34.412478: Epoch 956
|
1123 |
+
2024-08-30 10:23:34.413252: Current learning rate: 0.0006
|
1124 |
+
2024-08-30 10:24:25.120140: train_loss -0.8509
|
1125 |
+
2024-08-30 10:24:25.122234: val_loss -0.8607
|
1126 |
+
2024-08-30 10:24:25.123515: Pseudo dice [0.9355, 0.9569, 0.9581, 0.956, 0.9607, 0.9507, 0.9372, 0.9236, 0.9172, 0.9216, 0.92, 0.9142, 0.9196, 0.9479, 0.9709, 0.9716, 0.9498, nan, 0.9579, nan, nan]
|
1127 |
+
2024-08-30 10:24:25.124424: Epoch time: 50.71 s
|
1128 |
+
2024-08-30 10:24:26.560558:
|
1129 |
+
2024-08-30 10:24:26.561507: Epoch 957
|
1130 |
+
2024-08-30 10:24:26.562325: Current learning rate: 0.00059
|
1131 |
+
2024-08-30 10:25:18.697950: train_loss -0.8523
|
1132 |
+
2024-08-30 10:25:18.699968: val_loss -0.8638
|
1133 |
+
2024-08-30 10:25:18.701277: Pseudo dice [0.9488, 0.9585, 0.9532, 0.9477, 0.9519, 0.9542, 0.9468, 0.9408, 0.9509, 0.9569, 0.952, 0.9572, 0.9749, 0.9708, 0.9532, 0.9316, 0.8887, 0.0, 0.9573, nan, nan]
|
1134 |
+
2024-08-30 10:25:18.702055: Epoch time: 52.14 s
|
1135 |
+
2024-08-30 10:25:20.114854:
|
1136 |
+
2024-08-30 10:25:20.115787: Epoch 958
|
1137 |
+
2024-08-30 10:25:20.116835: Current learning rate: 0.00058
|
1138 |
+
2024-08-30 10:26:16.232098: train_loss -0.8528
|
1139 |
+
2024-08-30 10:26:16.234014: val_loss -0.8627
|
1140 |
+
2024-08-30 10:26:16.235000: Pseudo dice [0.9405, 0.951, 0.9565, 0.9631, 0.9693, 0.9697, 0.9625, 0.9524, 0.9215, 0.9027, 0.9065, 0.9178, 0.9741, 0.958, 0.9463, 0.9399, 0.9012, 0.0, 0.961, nan, 0.0]
|
1141 |
+
2024-08-30 10:26:16.235769: Epoch time: 56.12 s
|
1142 |
+
2024-08-30 10:26:17.819944:
|
1143 |
+
2024-08-30 10:26:17.820786: Epoch 959
|
1144 |
+
2024-08-30 10:26:17.821509: Current learning rate: 0.00056
|
1145 |
+
2024-08-30 10:27:11.785599: train_loss -0.8528
|
1146 |
+
2024-08-30 10:27:11.787754: val_loss -0.8548
|
1147 |
+
2024-08-30 10:27:11.788694: Pseudo dice [0.9354, 0.9556, 0.9546, 0.9597, 0.9667, 0.9674, 0.9668, 0.9666, 0.9609, 0.9453, 0.9371, 0.9278, 0.9338, 0.9075, 0.9006, 0.9045, 0.9078, 0.0, 0.9586, nan, 0.0]
|
1148 |
+
2024-08-30 10:27:11.789490: Epoch time: 53.97 s
|
1149 |
+
2024-08-30 10:27:13.205766:
|
1150 |
+
2024-08-30 10:27:13.206791: Epoch 960
|
1151 |
+
2024-08-30 10:27:13.207520: Current learning rate: 0.00055
|
1152 |
+
2024-08-30 10:28:03.886426: train_loss -0.8516
|
1153 |
+
2024-08-30 10:28:03.888169: val_loss -0.8626
|
1154 |
+
2024-08-30 10:28:03.889139: Pseudo dice [0.9472, 0.9578, 0.9634, 0.9636, 0.963, 0.9673, 0.9643, 0.9611, 0.9578, 0.9668, 0.9577, 0.9583, 0.9769, 0.9593, 0.9384, 0.9226, 0.8688, 0.0, 0.9635, nan, 0.0]
|
1155 |
+
2024-08-30 10:28:03.889918: Epoch time: 50.68 s
|
1156 |
+
2024-08-30 10:28:05.554668:
|
1157 |
+
2024-08-30 10:28:05.555649: Epoch 961
|
1158 |
+
2024-08-30 10:28:05.556334: Current learning rate: 0.00054
|
1159 |
+
2024-08-30 10:28:56.406420: train_loss -0.8509
|
1160 |
+
2024-08-30 10:28:56.408435: val_loss -0.865
|
1161 |
+
2024-08-30 10:28:56.409498: Pseudo dice [0.9393, 0.9537, 0.9568, 0.9603, 0.9462, 0.9466, 0.9639, 0.9547, 0.9497, 0.9518, 0.9599, 0.9745, 0.9762, 0.9508, 0.9305, 0.9266, 0.9018, 0.0, 0.9513, nan, nan]
|
1162 |
+
2024-08-30 10:28:56.410274: Epoch time: 50.85 s
|
1163 |
+
2024-08-30 10:28:57.787627:
|
1164 |
+
2024-08-30 10:28:57.788509: Epoch 962
|
1165 |
+
2024-08-30 10:28:57.789255: Current learning rate: 0.00053
|
1166 |
+
2024-08-30 10:29:50.047581: train_loss -0.8475
|
1167 |
+
2024-08-30 10:29:50.069606: val_loss -0.8617
|
1168 |
+
2024-08-30 10:29:50.070552: Pseudo dice [0.9431, 0.9537, 0.9611, 0.9603, 0.9618, 0.9615, 0.9668, 0.9719, 0.9749, 0.9735, 0.951, 0.9393, 0.9506, 0.9258, 0.8976, 0.886, 0.8648, 0.0, 0.9555, nan, nan]
|
1169 |
+
2024-08-30 10:29:50.071630: Epoch time: 52.26 s
|
1170 |
+
2024-08-30 10:29:51.671604:
|
1171 |
+
2024-08-30 10:29:51.672522: Epoch 963
|
1172 |
+
2024-08-30 10:29:51.673182: Current learning rate: 0.00051
|
1173 |
+
2024-08-30 10:30:47.847667: train_loss -0.8439
|
1174 |
+
2024-08-30 10:30:47.849686: val_loss -0.8609
|
1175 |
+
2024-08-30 10:30:47.850615: Pseudo dice [0.9255, 0.9272, 0.9185, 0.9198, 0.9233, 0.9346, 0.9349, 0.9366, 0.9476, 0.9637, 0.9615, 0.9644, 0.963, 0.9656, 0.9522, 0.9402, 0.8983, 0.0, 0.9443, nan, 0.0]
|
1176 |
+
2024-08-30 10:30:47.851458: Epoch time: 56.18 s
|
1177 |
+
2024-08-30 10:30:49.205772:
|
1178 |
+
2024-08-30 10:30:49.206768: Epoch 964
|
1179 |
+
2024-08-30 10:30:49.207551: Current learning rate: 0.0005
|
1180 |
+
2024-08-30 10:31:39.172107: train_loss -0.8554
|
1181 |
+
2024-08-30 10:31:39.174134: val_loss -0.8646
|
1182 |
+
2024-08-30 10:31:39.175307: Pseudo dice [0.9501, 0.9612, 0.9612, 0.9636, 0.9571, 0.9479, 0.9443, 0.9442, 0.9453, 0.941, 0.9372, 0.9385, 0.9616, 0.9487, 0.951, 0.9487, 0.9177, 0.0, 0.9416, nan, 0.0]
|
1183 |
+
2024-08-30 10:31:39.177831: Epoch time: 49.97 s
|
1184 |
+
2024-08-30 10:31:40.590867:
|
1185 |
+
2024-08-30 10:31:40.591808: Epoch 965
|
1186 |
+
2024-08-30 10:31:40.592421: Current learning rate: 0.00049
|
1187 |
+
2024-08-30 10:32:33.608516: train_loss -0.8513
|
1188 |
+
2024-08-30 10:32:33.610365: val_loss -0.8555
|
1189 |
+
2024-08-30 10:32:33.611141: Pseudo dice [0.9339, 0.9336, 0.9374, 0.9543, 0.9635, 0.9545, 0.9334, 0.9245, 0.9256, 0.9326, 0.9357, 0.9326, 0.9589, 0.9568, 0.9721, 0.9653, 0.9163, 0.0, 0.9506, nan, 0.0]
|
1190 |
+
2024-08-30 10:32:33.611813: Epoch time: 53.02 s
|
1191 |
+
2024-08-30 10:32:34.942311:
|
1192 |
+
2024-08-30 10:32:34.943221: Epoch 966
|
1193 |
+
2024-08-30 10:32:34.944027: Current learning rate: 0.00048
|
1194 |
+
2024-08-30 10:33:28.128527: train_loss -0.8563
|
1195 |
+
2024-08-30 10:33:28.130756: val_loss -0.8634
|
1196 |
+
2024-08-30 10:33:28.131817: Pseudo dice [0.9453, 0.9552, 0.9565, 0.9553, 0.965, 0.9665, 0.9683, 0.9665, 0.9685, 0.9705, 0.9719, 0.9666, 0.9477, 0.931, 0.9283, 0.9128, 0.8518, 0.0, 0.9563, nan, nan]
|
1197 |
+
2024-08-30 10:33:28.132555: Epoch time: 53.19 s
|
1198 |
+
2024-08-30 10:33:29.519173:
|
1199 |
+
2024-08-30 10:33:29.519992: Epoch 967
|
1200 |
+
2024-08-30 10:33:29.520659: Current learning rate: 0.00046
|
1201 |
+
2024-08-30 10:34:18.766661: train_loss -0.8448
|
1202 |
+
2024-08-30 10:34:18.769221: val_loss -0.8632
|
1203 |
+
2024-08-30 10:34:18.770272: Pseudo dice [0.9457, 0.9606, 0.9631, 0.9632, 0.9679, 0.9677, 0.9682, 0.965, 0.9589, 0.9661, 0.9746, 0.974, 0.9591, 0.9485, 0.9088, 0.9065, 0.8714, 0.0, 0.9564, nan, nan]
|
1204 |
+
2024-08-30 10:34:18.772259: Epoch time: 49.25 s
|
1205 |
+
2024-08-30 10:34:20.964338:
|
1206 |
+
2024-08-30 10:34:20.965245: Epoch 968
|
1207 |
+
2024-08-30 10:34:20.965929: Current learning rate: 0.00045
|
1208 |
+
2024-08-30 10:35:11.805357: train_loss -0.8609
|
1209 |
+
2024-08-30 10:35:11.807266: val_loss -0.8651
|
1210 |
+
2024-08-30 10:35:11.808123: Pseudo dice [0.9342, 0.9529, 0.9568, 0.9437, 0.9374, 0.9392, 0.9332, 0.9365, 0.9466, 0.9474, 0.9442, 0.9262, 0.9469, 0.962, 0.9653, 0.9653, 0.9407, 0.0, 0.9573, nan, 0.0]
|
1211 |
+
2024-08-30 10:35:11.808921: Epoch time: 50.84 s
|
1212 |
+
2024-08-30 10:35:13.364706:
|
1213 |
+
2024-08-30 10:35:13.365916: Epoch 969
|
1214 |
+
2024-08-30 10:35:13.366628: Current learning rate: 0.00044
|
1215 |
+
2024-08-30 10:36:03.890323: train_loss -0.8518
|
1216 |
+
2024-08-30 10:36:03.892499: val_loss -0.8605
|
1217 |
+
2024-08-30 10:36:03.893434: Pseudo dice [0.9404, 0.9564, 0.9572, 0.9579, 0.9614, 0.9661, 0.9651, 0.9691, 0.9508, 0.9418, 0.9412, 0.9487, 0.9676, 0.9618, 0.9378, 0.9173, 0.896, 0.0, 0.9498, nan, nan]
|
1218 |
+
2024-08-30 10:36:03.894784: Epoch time: 50.53 s
|
1219 |
+
2024-08-30 10:36:05.254843:
|
1220 |
+
2024-08-30 10:36:05.255668: Epoch 970
|
1221 |
+
2024-08-30 10:36:05.256326: Current learning rate: 0.00043
|
1222 |
+
2024-08-30 10:36:56.767352: train_loss -0.8553
|
1223 |
+
2024-08-30 10:36:56.769070: val_loss -0.8669
|
1224 |
+
2024-08-30 10:36:56.770013: Pseudo dice [0.9409, 0.9599, 0.9579, 0.9625, 0.9647, 0.9703, 0.9704, 0.9722, 0.9749, 0.9766, 0.9753, 0.9784, 0.973, 0.9625, 0.958, 0.9415, 0.9008, 0.0, 0.9396, nan, nan]
|
1225 |
+
2024-08-30 10:36:56.770851: Epoch time: 51.52 s
|
1226 |
+
2024-08-30 10:36:58.303058:
|
1227 |
+
2024-08-30 10:36:58.303992: Epoch 971
|
1228 |
+
2024-08-30 10:36:58.304650: Current learning rate: 0.00041
|
1229 |
+
2024-08-30 10:37:51.052966: train_loss -0.8511
|
1230 |
+
2024-08-30 10:37:51.057210: val_loss -0.8695
|
1231 |
+
2024-08-30 10:37:51.058389: Pseudo dice [0.9424, 0.9567, 0.9528, 0.9567, 0.9566, 0.9596, 0.9671, 0.9708, 0.9727, 0.974, 0.9613, 0.9613, 0.9678, 0.9598, 0.9492, 0.9396, 0.9303, 0.0, 0.9603, nan, nan]
|
1232 |
+
2024-08-30 10:37:51.059462: Epoch time: 52.75 s
|
1233 |
+
2024-08-30 10:37:52.660736:
|
1234 |
+
2024-08-30 10:37:52.661870: Epoch 972
|
1235 |
+
2024-08-30 10:37:52.662707: Current learning rate: 0.0004
|
1236 |
+
2024-08-30 10:38:46.115569: train_loss -0.8473
|
1237 |
+
2024-08-30 10:38:46.117456: val_loss -0.8688
|
1238 |
+
2024-08-30 10:38:46.118410: Pseudo dice [0.9467, 0.9542, 0.9552, 0.963, 0.9655, 0.9685, 0.9669, 0.9641, 0.9567, 0.9394, 0.9496, 0.9705, 0.9757, 0.9614, 0.9687, 0.9639, 0.9063, 0.0, 0.9488, nan, nan]
|
1239 |
+
2024-08-30 10:38:46.119312: Epoch time: 53.46 s
|
1240 |
+
2024-08-30 10:38:47.479060:
|
1241 |
+
2024-08-30 10:38:47.480008: Epoch 973
|
1242 |
+
2024-08-30 10:38:47.480727: Current learning rate: 0.00039
|
1243 |
+
2024-08-30 10:39:39.407035: train_loss -0.8547
|
1244 |
+
2024-08-30 10:39:39.410274: val_loss -0.8459
|
1245 |
+
2024-08-30 10:39:39.411667: Pseudo dice [0.9431, 0.9664, 0.9625, 0.9673, 0.9484, 0.9083, 0.8803, 0.8826, 0.9039, 0.897, 0.8781, 0.8948, 0.9488, 0.933, 0.9362, 0.9335, 0.8887, 0.0, 0.9493, nan, 0.0]
|
1246 |
+
2024-08-30 10:39:39.412500: Epoch time: 51.93 s
|
1247 |
+
2024-08-30 10:39:40.981354:
|
1248 |
+
2024-08-30 10:39:40.982215: Epoch 974
|
1249 |
+
2024-08-30 10:39:40.982883: Current learning rate: 0.00037
|
1250 |
+
2024-08-30 10:40:33.646824: train_loss -0.8498
|
1251 |
+
2024-08-30 10:40:33.648756: val_loss -0.8655
|
1252 |
+
2024-08-30 10:40:33.649667: Pseudo dice [0.948, 0.9587, 0.9602, 0.9651, 0.9637, 0.957, 0.9566, 0.9663, 0.9671, 0.9683, 0.9659, 0.9546, 0.9351, 0.9272, 0.9344, 0.9261, 0.9274, nan, 0.9631, nan, nan]
|
1253 |
+
2024-08-30 10:40:33.650858: Epoch time: 52.67 s
|
1254 |
+
2024-08-30 10:40:34.929865:
|
1255 |
+
2024-08-30 10:40:34.930692: Epoch 975
|
1256 |
+
2024-08-30 10:40:34.931337: Current learning rate: 0.00036
|
1257 |
+
2024-08-30 10:41:27.566087: train_loss -0.8482
|
1258 |
+
2024-08-30 10:41:27.568153: val_loss -0.8608
|
1259 |
+
2024-08-30 10:41:27.569205: Pseudo dice [0.9477, 0.9617, 0.9573, 0.9613, 0.9665, 0.9704, 0.9684, 0.967, 0.9444, 0.9346, 0.9434, 0.9492, 0.9617, 0.967, 0.9451, 0.9407, 0.9227, 0.0, 0.9552, nan, nan]
|
1260 |
+
2024-08-30 10:41:27.570002: Epoch time: 52.64 s
|
1261 |
+
2024-08-30 10:41:28.975760:
|
1262 |
+
2024-08-30 10:41:28.976546: Epoch 976
|
1263 |
+
2024-08-30 10:41:28.977172: Current learning rate: 0.00035
|
1264 |
+
2024-08-30 10:42:21.734678: train_loss -0.8545
|
1265 |
+
2024-08-30 10:42:21.736466: val_loss -0.8763
|
1266 |
+
2024-08-30 10:42:21.737272: Pseudo dice [0.9477, 0.9601, 0.9572, 0.9584, 0.9648, 0.969, 0.9733, 0.9764, 0.9763, 0.9755, 0.9749, 0.9739, 0.9763, 0.9746, 0.976, 0.9676, 0.9392, 0.0, 0.9537, nan, nan]
|
1267 |
+
2024-08-30 10:42:21.738056: Epoch time: 52.76 s
|
1268 |
+
2024-08-30 10:42:23.365764:
|
1269 |
+
2024-08-30 10:42:23.366827: Epoch 977
|
1270 |
+
2024-08-30 10:42:23.367524: Current learning rate: 0.00034
|
1271 |
+
2024-08-30 10:43:16.225462: train_loss -0.8596
|
1272 |
+
2024-08-30 10:43:16.227605: val_loss -0.8631
|
1273 |
+
2024-08-30 10:43:16.228473: Pseudo dice [0.9426, 0.9546, 0.9606, 0.9654, 0.9691, 0.9702, 0.9716, 0.9671, 0.9671, 0.9696, 0.9652, 0.9478, 0.9705, 0.9558, 0.9122, 0.8836, 0.8467, 0.0, 0.9624, nan, 0.0]
|
1274 |
+
2024-08-30 10:43:16.229274: Epoch time: 52.86 s
|
1275 |
+
2024-08-30 10:43:17.593902:
|
1276 |
+
2024-08-30 10:43:17.594803: Epoch 978
|
1277 |
+
2024-08-30 10:43:17.595508: Current learning rate: 0.00032
|
1278 |
+
2024-08-30 10:44:08.411565: train_loss -0.8548
|
1279 |
+
2024-08-30 10:44:08.414885: val_loss -0.8666
|
1280 |
+
2024-08-30 10:44:08.415771: Pseudo dice [0.9365, 0.9421, 0.9448, 0.9496, 0.9406, 0.9489, 0.9591, 0.9543, 0.9571, 0.9643, 0.9624, 0.9663, 0.96, 0.9627, 0.9621, 0.9482, 0.9437, nan, 0.9587, nan, nan]
|
1281 |
+
2024-08-30 10:44:08.416511: Epoch time: 50.82 s
|
1282 |
+
2024-08-30 10:44:08.417217: Yayy! New best EMA pseudo Dice: 0.8949
|
1283 |
+
2024-08-30 10:44:12.315161:
|
1284 |
+
2024-08-30 10:44:12.315899: Epoch 979
|
1285 |
+
2024-08-30 10:44:12.316523: Current learning rate: 0.00031
|
1286 |
+
2024-08-30 10:45:03.216096: train_loss -0.8622
|
1287 |
+
2024-08-30 10:45:03.218657: val_loss -0.8621
|
1288 |
+
2024-08-30 10:45:03.219755: Pseudo dice [0.9129, 0.9241, 0.9367, 0.933, 0.9333, 0.93, 0.9326, 0.9444, 0.9535, 0.9536, 0.9569, 0.9657, 0.9696, 0.9659, 0.9547, 0.9481, 0.8986, 0.0, 0.9508, nan, nan]
|
1289 |
+
2024-08-30 10:45:03.220763: Epoch time: 50.9 s
|
1290 |
+
2024-08-30 10:45:04.848525:
|
1291 |
+
2024-08-30 10:45:04.849304: Epoch 980
|
1292 |
+
2024-08-30 10:45:04.850012: Current learning rate: 0.0003
|
1293 |
+
2024-08-30 10:45:55.391572: train_loss -0.8482
|
1294 |
+
2024-08-30 10:45:55.393471: val_loss -0.8648
|
1295 |
+
2024-08-30 10:45:55.394352: Pseudo dice [0.9452, 0.9577, 0.9545, 0.9547, 0.9622, 0.9648, 0.9702, 0.9624, 0.9351, 0.9312, 0.9371, 0.9542, 0.973, 0.9738, 0.9742, 0.9427, 0.9357, 0.0, 0.9481, nan, nan]
|
1296 |
+
2024-08-30 10:45:55.395075: Epoch time: 50.55 s
|
1297 |
+
2024-08-30 10:45:55.395663: Yayy! New best EMA pseudo Dice: 0.8956
|
1298 |
+
2024-08-30 10:45:59.639190:
|
1299 |
+
2024-08-30 10:45:59.640233: Epoch 981
|
1300 |
+
2024-08-30 10:45:59.640931: Current learning rate: 0.00028
|
1301 |
+
2024-08-30 10:46:51.220230: train_loss -0.8533
|
1302 |
+
2024-08-30 10:46:51.221961: val_loss -0.8645
|
1303 |
+
2024-08-30 10:46:51.222793: Pseudo dice [0.9084, 0.9289, 0.9261, 0.9328, 0.937, 0.9523, 0.9694, 0.9728, 0.9708, 0.9676, 0.9626, 0.9544, 0.965, 0.9726, 0.9763, 0.9739, 0.9374, 0.0, 0.9653, nan, 0.0]
|
1304 |
+
2024-08-30 10:46:51.223505: Epoch time: 51.58 s
|
1305 |
+
2024-08-30 10:46:52.604110:
|
1306 |
+
2024-08-30 10:46:52.604980: Epoch 982
|
1307 |
+
2024-08-30 10:46:52.605722: Current learning rate: 0.00027
|
1308 |
+
2024-08-30 10:47:45.156091: train_loss -0.8554
|
1309 |
+
2024-08-30 10:47:45.157852: val_loss -0.8633
|
1310 |
+
2024-08-30 10:47:45.158680: Pseudo dice [0.9449, 0.9627, 0.9621, 0.9571, 0.9606, 0.9661, 0.9689, 0.9756, 0.9716, 0.967, 0.9673, 0.9673, 0.9699, 0.9623, 0.9641, 0.9725, 0.9054, 0.0, 0.9439, nan, nan]
|
1311 |
+
2024-08-30 10:47:45.159342: Epoch time: 52.55 s
|
1312 |
+
2024-08-30 10:47:46.722095:
|
1313 |
+
2024-08-30 10:47:46.723049: Epoch 983
|
1314 |
+
2024-08-30 10:47:46.723816: Current learning rate: 0.00026
|
1315 |
+
2024-08-30 10:48:40.935488: train_loss -0.8473
|
1316 |
+
2024-08-30 10:48:40.937503: val_loss -0.8671
|
1317 |
+
2024-08-30 10:48:40.938333: Pseudo dice [0.9467, 0.9546, 0.9525, 0.9532, 0.957, 0.961, 0.9544, 0.9614, 0.9701, 0.9724, 0.9739, 0.9706, 0.9625, 0.9511, 0.9437, 0.9373, 0.9353, 0.0, 0.9564, nan, nan]
|
1318 |
+
2024-08-30 10:48:40.939109: Epoch time: 54.22 s
|
1319 |
+
2024-08-30 10:48:42.372318:
|
1320 |
+
2024-08-30 10:48:42.373050: Epoch 984
|
1321 |
+
2024-08-30 10:48:42.373538: Current learning rate: 0.00024
|
1322 |
+
2024-08-30 10:49:31.493529: train_loss -0.8557
|
1323 |
+
2024-08-30 10:49:31.495258: val_loss -0.8646
|
1324 |
+
2024-08-30 10:49:31.496255: Pseudo dice [0.9378, 0.9574, 0.9616, 0.9586, 0.966, 0.9679, 0.9707, 0.9704, 0.9639, 0.9673, 0.9545, 0.9563, 0.9653, 0.953, 0.9537, 0.9435, 0.9318, nan, 0.9561, nan, nan]
|
1325 |
+
2024-08-30 10:49:31.496979: Epoch time: 49.12 s
|
1326 |
+
2024-08-30 10:49:31.497591: Yayy! New best EMA pseudo Dice: 0.9012
|
1327 |
+
2024-08-30 10:49:36.175643:
|
1328 |
+
2024-08-30 10:49:36.176605: Epoch 985
|
1329 |
+
2024-08-30 10:49:36.177341: Current learning rate: 0.00023
|
1330 |
+
2024-08-30 10:50:23.749144: train_loss -0.8586
|
1331 |
+
2024-08-30 10:50:23.751307: val_loss -0.8555
|
1332 |
+
2024-08-30 10:50:23.752171: Pseudo dice [0.8816, 0.9021, 0.9041, 0.9093, 0.9088, 0.9426, 0.9738, 0.9772, 0.9757, 0.9724, 0.9509, 0.9501, 0.9708, 0.9572, 0.9328, 0.9296, 0.8655, 0.0, 0.9413, nan, nan]
|
1333 |
+
2024-08-30 10:50:23.752908: Epoch time: 47.58 s
|
1334 |
+
2024-08-30 10:50:25.481323:
|
1335 |
+
2024-08-30 10:50:25.482186: Epoch 986
|
1336 |
+
2024-08-30 10:50:25.482784: Current learning rate: 0.00021
|
1337 |
+
2024-08-30 10:51:18.112314: train_loss -0.8506
|
1338 |
+
2024-08-30 10:51:18.114195: val_loss -0.8693
|
1339 |
+
2024-08-30 10:51:18.115175: Pseudo dice [0.9465, 0.9583, 0.96, 0.9615, 0.9657, 0.9728, 0.9767, 0.9735, 0.9725, 0.9713, 0.9715, 0.9567, 0.9247, 0.9496, 0.9447, 0.9297, 0.9349, nan, 0.9532, nan, 0.0]
|
1340 |
+
2024-08-30 10:51:18.116014: Epoch time: 52.63 s
|
1341 |
+
2024-08-30 10:51:19.735790:
|
1342 |
+
2024-08-30 10:51:19.736687: Epoch 987
|
1343 |
+
2024-08-30 10:51:19.737489: Current learning rate: 0.0002
|
1344 |
+
2024-08-30 10:52:10.007353: train_loss -0.8558
|
1345 |
+
2024-08-30 10:52:10.010361: val_loss -0.8589
|
1346 |
+
2024-08-30 10:52:10.011275: Pseudo dice [0.9473, 0.962, 0.9418, 0.9268, 0.9331, 0.9368, 0.9345, 0.9339, 0.9359, 0.9444, 0.9562, 0.9696, 0.9589, 0.9211, 0.9083, 0.8948, 0.8692, 0.0, 0.9597, nan, nan]
|
1347 |
+
2024-08-30 10:52:10.011992: Epoch time: 50.27 s
|
1348 |
+
2024-08-30 10:52:11.381682:
|
1349 |
+
2024-08-30 10:52:11.383344: Epoch 988
|
1350 |
+
2024-08-30 10:52:11.384220: Current learning rate: 0.00019
|
1351 |
+
2024-08-30 10:53:01.295525: train_loss -0.8603
|
1352 |
+
2024-08-30 10:53:01.297323: val_loss -0.8651
|
1353 |
+
2024-08-30 10:53:01.298688: Pseudo dice [0.9434, 0.9505, 0.9562, 0.9635, 0.9697, 0.9582, 0.9502, 0.9632, 0.9739, 0.975, 0.9757, 0.9708, 0.9752, 0.9574, 0.9289, 0.9098, 0.8922, 0.0, 0.94, nan, nan]
|
1354 |
+
2024-08-30 10:53:01.299486: Epoch time: 49.92 s
|
1355 |
+
2024-08-30 10:53:02.663652:
|
1356 |
+
2024-08-30 10:53:02.664519: Epoch 989
|
1357 |
+
2024-08-30 10:53:02.665169: Current learning rate: 0.00017
|
1358 |
+
2024-08-30 10:53:57.241668: train_loss -0.8637
|
1359 |
+
2024-08-30 10:53:57.243889: val_loss -0.8555
|
1360 |
+
2024-08-30 10:53:57.245089: Pseudo dice [0.9463, 0.9575, 0.9632, 0.9629, 0.9615, 0.9643, 0.9715, 0.9745, 0.9738, 0.9749, 0.9727, 0.9755, 0.9715, 0.94, 0.885, 0.8641, 0.8483, 0.0, 0.9524, nan, nan]
|
1361 |
+
2024-08-30 10:53:57.246208: Epoch time: 54.58 s
|
1362 |
+
2024-08-30 10:53:58.841140:
|
1363 |
+
2024-08-30 10:53:58.841934: Epoch 990
|
1364 |
+
2024-08-30 10:53:58.845400: Current learning rate: 0.00016
|
1365 |
+
2024-08-30 10:54:48.863036: train_loss -0.8539
|
1366 |
+
2024-08-30 10:54:48.865016: val_loss -0.8523
|
1367 |
+
2024-08-30 10:54:48.866725: Pseudo dice [0.9465, 0.9529, 0.9595, 0.9615, 0.9678, 0.969, 0.9484, 0.9364, 0.9373, 0.9403, 0.9349, 0.9211, 0.9456, 0.9531, 0.9398, 0.9109, 0.8844, 0.0, 0.96, nan, 0.0]
|
1368 |
+
2024-08-30 10:54:48.867565: Epoch time: 50.02 s
|
1369 |
+
2024-08-30 10:54:50.234685:
|
1370 |
+
2024-08-30 10:54:50.235577: Epoch 991
|
1371 |
+
2024-08-30 10:54:50.236278: Current learning rate: 0.00014
|
1372 |
+
2024-08-30 10:55:42.558640: train_loss -0.8595
|
1373 |
+
2024-08-30 10:55:42.563770: val_loss -0.873
|
1374 |
+
2024-08-30 10:55:42.565078: Pseudo dice [0.9482, 0.9565, 0.945, 0.9437, 0.9534, 0.9617, 0.972, 0.9719, 0.9712, 0.9729, 0.9746, 0.9741, 0.9722, 0.9761, 0.9763, 0.9711, 0.9205, 0.0, 0.95, nan, nan]
|
1375 |
+
2024-08-30 10:55:42.568004: Epoch time: 52.33 s
|
1376 |
+
2024-08-30 10:55:43.959184:
|
1377 |
+
2024-08-30 10:55:43.960096: Epoch 992
|
1378 |
+
2024-08-30 10:55:43.960778: Current learning rate: 0.00013
|
1379 |
+
2024-08-30 10:56:34.464707: train_loss -0.8517
|
1380 |
+
2024-08-30 10:56:34.466847: val_loss -0.865
|
1381 |
+
2024-08-30 10:56:34.468580: Pseudo dice [0.934, 0.9282, 0.9167, 0.9142, 0.9235, 0.9438, 0.9612, 0.9664, 0.9682, 0.9635, 0.9569, 0.9621, 0.9717, 0.9743, 0.9725, 0.9734, 0.9392, 0.0, 0.9582, nan, nan]
|
1382 |
+
2024-08-30 10:56:34.469472: Epoch time: 50.51 s
|
1383 |
+
2024-08-30 10:56:35.844016:
|
1384 |
+
2024-08-30 10:56:35.844839: Epoch 993
|
1385 |
+
2024-08-30 10:56:35.845395: Current learning rate: 0.00011
|
1386 |
+
2024-08-30 10:57:27.177797: train_loss -0.8557
|
1387 |
+
2024-08-30 10:57:27.179776: val_loss -0.8512
|
1388 |
+
2024-08-30 10:57:27.180766: Pseudo dice [0.9189, 0.9003, 0.8902, 0.8881, 0.8969, 0.9066, 0.9233, 0.9348, 0.9333, 0.9261, 0.9133, 0.9102, 0.9065, 0.9142, 0.9102, 0.8891, 0.9107, nan, 0.9603, nan, nan]
|
1389 |
+
2024-08-30 10:57:27.181525: Epoch time: 51.34 s
|
1390 |
+
2024-08-30 10:57:28.516779:
|
1391 |
+
2024-08-30 10:57:28.517621: Epoch 994
|
1392 |
+
2024-08-30 10:57:28.518277: Current learning rate: 0.0001
|
1393 |
+
2024-08-30 10:58:20.734412: train_loss -0.8588
|
1394 |
+
2024-08-30 10:58:20.736292: val_loss -0.8618
|
1395 |
+
2024-08-30 10:58:20.737302: Pseudo dice [0.9448, 0.9628, 0.9664, 0.9663, 0.9707, 0.9715, 0.9521, 0.933, 0.9258, 0.9169, 0.9189, 0.9543, 0.9757, 0.9759, 0.9676, 0.9625, 0.9047, 0.0, 0.9469, nan, nan]
|
1396 |
+
2024-08-30 10:58:20.738145: Epoch time: 52.22 s
|
1397 |
+
2024-08-30 10:58:22.078560:
|
1398 |
+
2024-08-30 10:58:22.079548: Epoch 995
|
1399 |
+
2024-08-30 10:58:22.080193: Current learning rate: 8e-05
|
1400 |
+
2024-08-30 10:59:15.489184: train_loss -0.8588
|
1401 |
+
2024-08-30 10:59:15.491495: val_loss -0.8563
|
1402 |
+
2024-08-30 10:59:15.492667: Pseudo dice [0.9419, 0.9519, 0.9545, 0.9647, 0.9683, 0.9687, 0.9691, 0.967, 0.9676, 0.9707, 0.9644, 0.9673, 0.9713, 0.9637, 0.9456, 0.9309, 0.889, 0.0, 0.9452, nan, nan]
|
1403 |
+
2024-08-30 10:59:15.496259: Epoch time: 53.41 s
|
1404 |
+
2024-08-30 10:59:16.895959:
|
1405 |
+
2024-08-30 10:59:16.896968: Epoch 996
|
1406 |
+
2024-08-30 10:59:16.897671: Current learning rate: 7e-05
|
1407 |
+
2024-08-30 11:00:08.924720: train_loss -0.8567
|
1408 |
+
2024-08-30 11:00:08.926616: val_loss -0.8535
|
1409 |
+
2024-08-30 11:00:08.927627: Pseudo dice [0.9473, 0.9595, 0.9598, 0.9639, 0.96, 0.963, 0.968, 0.9631, 0.9458, 0.9417, 0.943, 0.9533, 0.8951, 0.88, 0.8663, 0.8353, 0.7941, 0.0, 0.9559, nan, 0.0]
|
1410 |
+
2024-08-30 11:00:08.928442: Epoch time: 52.03 s
|
1411 |
+
2024-08-30 11:00:10.462140:
|
1412 |
+
2024-08-30 11:00:10.463281: Epoch 997
|
1413 |
+
2024-08-30 11:00:10.464050: Current learning rate: 5e-05
|
1414 |
+
2024-08-30 11:01:02.995422: train_loss -0.8582
|
1415 |
+
2024-08-30 11:01:02.997510: val_loss -0.8605
|
1416 |
+
2024-08-30 11:01:02.998402: Pseudo dice [0.9504, 0.9636, 0.9663, 0.9672, 0.9691, 0.9594, 0.9465, 0.9246, 0.8972, 0.898, 0.9106, 0.9234, 0.9464, 0.9293, 0.914, 0.9057, 0.906, 0.0, 0.9568, nan, nan]
|
1417 |
+
2024-08-30 11:01:02.999275: Epoch time: 52.54 s
|
1418 |
+
2024-08-30 11:01:04.470241:
|
1419 |
+
2024-08-30 11:01:04.471272: Epoch 998
|
1420 |
+
2024-08-30 11:01:04.471981: Current learning rate: 4e-05
|
1421 |
+
2024-08-30 11:01:56.748690: train_loss -0.8576
|
1422 |
+
2024-08-30 11:01:56.750474: val_loss -0.8595
|
1423 |
+
2024-08-30 11:01:56.751365: Pseudo dice [0.942, 0.9497, 0.9356, 0.9318, 0.944, 0.9526, 0.9614, 0.9519, 0.9392, 0.9468, 0.9599, 0.9453, 0.9154, 0.9521, 0.956, 0.9309, 0.8729, 0.0, 0.954, nan, 0.0]
|
1424 |
+
2024-08-30 11:01:56.752067: Epoch time: 52.28 s
|
1425 |
+
2024-08-30 11:01:58.278047:
|
1426 |
+
2024-08-30 11:01:58.278900: Epoch 999
|
1427 |
+
2024-08-30 11:01:58.279588: Current learning rate: 2e-05
|
1428 |
+
2024-08-30 11:02:48.987234: train_loss -0.8578
|
1429 |
+
2024-08-30 11:02:48.990110: val_loss -0.8588
|
1430 |
+
2024-08-30 11:02:48.991422: Pseudo dice [0.9047, 0.8957, 0.8757, 0.8963, 0.9056, 0.9127, 0.9236, 0.9078, 0.9045, 0.9242, 0.9359, 0.9477, 0.9653, 0.9733, 0.9759, 0.9586, 0.9448, 0.0, 0.9615, nan, nan]
|
1431 |
+
2024-08-30 11:02:48.992284: Epoch time: 50.71 s
|
1432 |
+
2024-08-30 11:02:53.416574: Training done.
|
nnUNetTrainer__nnUNetResEncUNetMPlans__3d_fullres/plans.json
ADDED
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dataset_name": "Dataset601_VertebralBodies",
|
3 |
+
"plans_name": "nnUNetResEncUNetMPlans",
|
4 |
+
"original_median_spacing_after_transp": [
|
5 |
+
1.5,
|
6 |
+
1.5,
|
7 |
+
1.5
|
8 |
+
],
|
9 |
+
"original_median_shape_after_transp": [
|
10 |
+
239,
|
11 |
+
252,
|
12 |
+
252
|
13 |
+
],
|
14 |
+
"image_reader_writer": "SimpleITKIO",
|
15 |
+
"transpose_forward": [
|
16 |
+
0,
|
17 |
+
1,
|
18 |
+
2
|
19 |
+
],
|
20 |
+
"transpose_backward": [
|
21 |
+
0,
|
22 |
+
1,
|
23 |
+
2
|
24 |
+
],
|
25 |
+
"configurations": {
|
26 |
+
"2d": {
|
27 |
+
"data_identifier": "nnUNetPlans_2d",
|
28 |
+
"preprocessor_name": "DefaultPreprocessor",
|
29 |
+
"batch_size": 48,
|
30 |
+
"patch_size": [
|
31 |
+
256,
|
32 |
+
256
|
33 |
+
],
|
34 |
+
"median_image_size_in_voxels": [
|
35 |
+
240.0,
|
36 |
+
240.0
|
37 |
+
],
|
38 |
+
"spacing": [
|
39 |
+
1.5,
|
40 |
+
1.5
|
41 |
+
],
|
42 |
+
"normalization_schemes": [
|
43 |
+
"CTNormalization"
|
44 |
+
],
|
45 |
+
"use_mask_for_norm": [
|
46 |
+
false
|
47 |
+
],
|
48 |
+
"resampling_fn_data": "resample_data_or_seg_to_shape",
|
49 |
+
"resampling_fn_seg": "resample_data_or_seg_to_shape",
|
50 |
+
"resampling_fn_data_kwargs": {
|
51 |
+
"is_seg": false,
|
52 |
+
"order": 3,
|
53 |
+
"order_z": 0,
|
54 |
+
"force_separate_z": null
|
55 |
+
},
|
56 |
+
"resampling_fn_seg_kwargs": {
|
57 |
+
"is_seg": true,
|
58 |
+
"order": 1,
|
59 |
+
"order_z": 0,
|
60 |
+
"force_separate_z": null
|
61 |
+
},
|
62 |
+
"resampling_fn_probabilities": "resample_data_or_seg_to_shape",
|
63 |
+
"resampling_fn_probabilities_kwargs": {
|
64 |
+
"is_seg": false,
|
65 |
+
"order": 1,
|
66 |
+
"order_z": 0,
|
67 |
+
"force_separate_z": null
|
68 |
+
},
|
69 |
+
"architecture": {
|
70 |
+
"network_class_name": "dynamic_network_architectures.architectures.unet.ResidualEncoderUNet",
|
71 |
+
"arch_kwargs": {
|
72 |
+
"n_stages": 7,
|
73 |
+
"features_per_stage": [
|
74 |
+
32,
|
75 |
+
64,
|
76 |
+
128,
|
77 |
+
256,
|
78 |
+
512,
|
79 |
+
512,
|
80 |
+
512
|
81 |
+
],
|
82 |
+
"conv_op": "torch.nn.modules.conv.Conv2d",
|
83 |
+
"kernel_sizes": [
|
84 |
+
[
|
85 |
+
3,
|
86 |
+
3
|
87 |
+
],
|
88 |
+
[
|
89 |
+
3,
|
90 |
+
3
|
91 |
+
],
|
92 |
+
[
|
93 |
+
3,
|
94 |
+
3
|
95 |
+
],
|
96 |
+
[
|
97 |
+
3,
|
98 |
+
3
|
99 |
+
],
|
100 |
+
[
|
101 |
+
3,
|
102 |
+
3
|
103 |
+
],
|
104 |
+
[
|
105 |
+
3,
|
106 |
+
3
|
107 |
+
],
|
108 |
+
[
|
109 |
+
3,
|
110 |
+
3
|
111 |
+
]
|
112 |
+
],
|
113 |
+
"strides": [
|
114 |
+
[
|
115 |
+
1,
|
116 |
+
1
|
117 |
+
],
|
118 |
+
[
|
119 |
+
2,
|
120 |
+
2
|
121 |
+
],
|
122 |
+
[
|
123 |
+
2,
|
124 |
+
2
|
125 |
+
],
|
126 |
+
[
|
127 |
+
2,
|
128 |
+
2
|
129 |
+
],
|
130 |
+
[
|
131 |
+
2,
|
132 |
+
2
|
133 |
+
],
|
134 |
+
[
|
135 |
+
2,
|
136 |
+
2
|
137 |
+
],
|
138 |
+
[
|
139 |
+
2,
|
140 |
+
2
|
141 |
+
]
|
142 |
+
],
|
143 |
+
"n_blocks_per_stage": [
|
144 |
+
1,
|
145 |
+
3,
|
146 |
+
4,
|
147 |
+
6,
|
148 |
+
6,
|
149 |
+
6,
|
150 |
+
6
|
151 |
+
],
|
152 |
+
"n_conv_per_stage_decoder": [
|
153 |
+
1,
|
154 |
+
1,
|
155 |
+
1,
|
156 |
+
1,
|
157 |
+
1,
|
158 |
+
1
|
159 |
+
],
|
160 |
+
"conv_bias": true,
|
161 |
+
"norm_op": "torch.nn.modules.instancenorm.InstanceNorm2d",
|
162 |
+
"norm_op_kwargs": {
|
163 |
+
"eps": 1e-05,
|
164 |
+
"affine": true
|
165 |
+
},
|
166 |
+
"dropout_op": null,
|
167 |
+
"dropout_op_kwargs": null,
|
168 |
+
"nonlin": "torch.nn.LeakyReLU",
|
169 |
+
"nonlin_kwargs": {
|
170 |
+
"inplace": true
|
171 |
+
}
|
172 |
+
},
|
173 |
+
"_kw_requires_import": [
|
174 |
+
"conv_op",
|
175 |
+
"norm_op",
|
176 |
+
"dropout_op",
|
177 |
+
"nonlin"
|
178 |
+
]
|
179 |
+
},
|
180 |
+
"batch_dice": true
|
181 |
+
},
|
182 |
+
"3d_fullres": {
|
183 |
+
"data_identifier": "nnUNetPlans_3d_fullres",
|
184 |
+
"preprocessor_name": "DefaultPreprocessor",
|
185 |
+
"batch_size": 2,
|
186 |
+
"patch_size": [
|
187 |
+
128,
|
188 |
+
128,
|
189 |
+
128
|
190 |
+
],
|
191 |
+
"median_image_size_in_voxels": [
|
192 |
+
235.0,
|
193 |
+
240.0,
|
194 |
+
240.0
|
195 |
+
],
|
196 |
+
"spacing": [
|
197 |
+
1.5,
|
198 |
+
1.5,
|
199 |
+
1.5
|
200 |
+
],
|
201 |
+
"normalization_schemes": [
|
202 |
+
"CTNormalization"
|
203 |
+
],
|
204 |
+
"use_mask_for_norm": [
|
205 |
+
false
|
206 |
+
],
|
207 |
+
"resampling_fn_data": "resample_data_or_seg_to_shape",
|
208 |
+
"resampling_fn_seg": "resample_data_or_seg_to_shape",
|
209 |
+
"resampling_fn_data_kwargs": {
|
210 |
+
"is_seg": false,
|
211 |
+
"order": 3,
|
212 |
+
"order_z": 0,
|
213 |
+
"force_separate_z": null
|
214 |
+
},
|
215 |
+
"resampling_fn_seg_kwargs": {
|
216 |
+
"is_seg": true,
|
217 |
+
"order": 1,
|
218 |
+
"order_z": 0,
|
219 |
+
"force_separate_z": null
|
220 |
+
},
|
221 |
+
"resampling_fn_probabilities": "resample_data_or_seg_to_shape",
|
222 |
+
"resampling_fn_probabilities_kwargs": {
|
223 |
+
"is_seg": false,
|
224 |
+
"order": 1,
|
225 |
+
"order_z": 0,
|
226 |
+
"force_separate_z": null
|
227 |
+
},
|
228 |
+
"architecture": {
|
229 |
+
"network_class_name": "dynamic_network_architectures.architectures.unet.ResidualEncoderUNet",
|
230 |
+
"arch_kwargs": {
|
231 |
+
"n_stages": 6,
|
232 |
+
"features_per_stage": [
|
233 |
+
32,
|
234 |
+
64,
|
235 |
+
128,
|
236 |
+
256,
|
237 |
+
320,
|
238 |
+
320
|
239 |
+
],
|
240 |
+
"conv_op": "torch.nn.modules.conv.Conv3d",
|
241 |
+
"kernel_sizes": [
|
242 |
+
[
|
243 |
+
3,
|
244 |
+
3,
|
245 |
+
3
|
246 |
+
],
|
247 |
+
[
|
248 |
+
3,
|
249 |
+
3,
|
250 |
+
3
|
251 |
+
],
|
252 |
+
[
|
253 |
+
3,
|
254 |
+
3,
|
255 |
+
3
|
256 |
+
],
|
257 |
+
[
|
258 |
+
3,
|
259 |
+
3,
|
260 |
+
3
|
261 |
+
],
|
262 |
+
[
|
263 |
+
3,
|
264 |
+
3,
|
265 |
+
3
|
266 |
+
],
|
267 |
+
[
|
268 |
+
3,
|
269 |
+
3,
|
270 |
+
3
|
271 |
+
]
|
272 |
+
],
|
273 |
+
"strides": [
|
274 |
+
[
|
275 |
+
1,
|
276 |
+
1,
|
277 |
+
1
|
278 |
+
],
|
279 |
+
[
|
280 |
+
2,
|
281 |
+
2,
|
282 |
+
2
|
283 |
+
],
|
284 |
+
[
|
285 |
+
2,
|
286 |
+
2,
|
287 |
+
2
|
288 |
+
],
|
289 |
+
[
|
290 |
+
2,
|
291 |
+
2,
|
292 |
+
2
|
293 |
+
],
|
294 |
+
[
|
295 |
+
2,
|
296 |
+
2,
|
297 |
+
2
|
298 |
+
],
|
299 |
+
[
|
300 |
+
2,
|
301 |
+
2,
|
302 |
+
2
|
303 |
+
]
|
304 |
+
],
|
305 |
+
"n_blocks_per_stage": [
|
306 |
+
1,
|
307 |
+
3,
|
308 |
+
4,
|
309 |
+
6,
|
310 |
+
6,
|
311 |
+
6
|
312 |
+
],
|
313 |
+
"n_conv_per_stage_decoder": [
|
314 |
+
1,
|
315 |
+
1,
|
316 |
+
1,
|
317 |
+
1,
|
318 |
+
1
|
319 |
+
],
|
320 |
+
"conv_bias": true,
|
321 |
+
"norm_op": "torch.nn.modules.instancenorm.InstanceNorm3d",
|
322 |
+
"norm_op_kwargs": {
|
323 |
+
"eps": 1e-05,
|
324 |
+
"affine": true
|
325 |
+
},
|
326 |
+
"dropout_op": null,
|
327 |
+
"dropout_op_kwargs": null,
|
328 |
+
"nonlin": "torch.nn.LeakyReLU",
|
329 |
+
"nonlin_kwargs": {
|
330 |
+
"inplace": true
|
331 |
+
}
|
332 |
+
},
|
333 |
+
"_kw_requires_import": [
|
334 |
+
"conv_op",
|
335 |
+
"norm_op",
|
336 |
+
"dropout_op",
|
337 |
+
"nonlin"
|
338 |
+
]
|
339 |
+
},
|
340 |
+
"batch_dice": false
|
341 |
+
}
|
342 |
+
},
|
343 |
+
"experiment_planner_used": "nnUNetPlannerResEncM",
|
344 |
+
"label_manager": "LabelManager",
|
345 |
+
"foreground_intensity_properties_per_channel": {
|
346 |
+
"0": {
|
347 |
+
"max": 10076.0,
|
348 |
+
"mean": 251.18613451837237,
|
349 |
+
"median": 216.0,
|
350 |
+
"min": -2048.0,
|
351 |
+
"percentile_00_5": -158.0,
|
352 |
+
"percentile_99_5": 1100.0,
|
353 |
+
"std": 205.1041738973246
|
354 |
+
}
|
355 |
+
}
|
356 |
+
}
|