File size: 3,369 Bytes
ec2fdb1 3844811 7c59103 5394486 03608e5 d750750 fdfc99a 197cf7a eb40cdf 5bdcf16 6fc84af 94b2230 b24877b caf95c2 bf265ca 535a9c1 7411a43 d34e089 c5f1408 63f1c02 ceccc29 5743c78 ae1f38f a5fbc89 d93c03a 9c9e463 4e74cb5 6a43f6e 7c84c42 acf7a03 c015ae6 e277f2b 90b8862 ac55872 de81d07 b5a527c 4af90b7 65dc7d4 2c9a3f4 d3c07b5 9d9a5c9 a417a75 ac8f475 cfc9567 5403a4b 66602f7 0123c1a e479cf7 faf1cf3 f329468 d73a733 3d23d6a 3ffd43c b7bbf0a ef33f3f d006cfd daff9c4 c27d1c1 d3be3f6 e9fd4e8 12c988a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
Started at: 09:20:17 nb-bert-base, 0.001, 512 ({'_name_or_path': '/disk4/folder1/working/checkpoints/huggingface/native_pytorch/step4_8/', 'attention_probs_dropout_prob': 0.1, 'directionality': 'bidi', 'gradient_checkpointing': False, 'hidden_act': 'gelu', 'hidden_dropout_prob': 0.1, 'hidden_size': 768, 'initializer_range': 0.02, 'intermediate_size': 3072, 'layer_norm_eps': 1e-12, 'max_position_embeddings': 512, 'model_type': 'bert', 'num_attention_heads': 12, 'num_hidden_layers': 12, 'pad_token_id': 0, 'pooler_fc_size': 768, 'pooler_num_attention_heads': 12, 'pooler_num_fc_layers': 3, 'pooler_size_per_head': 128, 'pooler_type': 'first_token_transform', 'position_embedding_type': 'absolute', 'type_vocab_size': 2, 'vocab_size': 119547, '_commit_hash': '82b194c0b3ea1fcad65f1eceee04adb26f9f71ac'}, {}) Epoch: 0 Training loss: 0.6738183131584754 - MAE: 0.671559791949798 Validation loss : 0.3496964991092682 - MAE: 0.481417017739273 Epoch: 1 Training loss: 0.2604064620458163 - MAE: 0.3969383760031138 Validation loss : 0.22093867957592012 - MAE: 0.35313969813939866 Epoch: 2 Training loss: 0.20208562452059525 - MAE: 0.3434821083990275 Validation loss : 0.1883617550134659 - MAE: 0.3379014120649767 Epoch: 3 Training loss: 0.1782165696987739 - MAE: 0.31999390375888026 Validation loss : 0.16966865658760072 - MAE: 0.31801389648176803 Epoch: 4 Training loss: 0.16990482119413522 - MAE: 0.31338787739566665 Validation loss : 0.1638330489397049 - MAE: 0.3106934884794657 Epoch: 5 Training loss: 0.1681439051261315 - MAE: 0.31001855013012786 Validation loss : 0.16361375153064728 - MAE: 0.3107548141570412 Epoch: 6 Training loss: 0.16585055337502405 - MAE: 0.3068083279056544 Validation loss : 0.15926897823810576 - MAE: 0.30532344804199296 Epoch: 7 Training loss: 0.1640594372382531 - MAE: 0.30565590681538174 Validation loss : 0.15682196021080017 - MAE: 0.3021364879085161 Epoch: 8 Training loss: 0.15877233560268694 - MAE: 0.30019124717441487 Validation loss : 0.15556727051734925 - MAE: 0.3009043710813451 Epoch: 9 Training loss: 0.157441843014497 - MAE: 0.29782973353451203 Validation loss : 0.15489959716796875 - MAE: 0.3003483373637247 Epoch: 10 Training loss: 0.1569079951598094 - MAE: 0.29850384416372977 Validation loss : 0.15268584191799164 - MAE: 0.2973265470132604 Epoch: 11 Training loss: 0.15540967537806585 - MAE: 0.29631087713396775 Validation loss : 0.1516599327325821 - MAE: 0.29605310392714956 Epoch: 12 Training loss: 0.15601101173804358 - MAE: 0.29642744149228534 Validation loss : 0.1511812061071396 - MAE: 0.2954877034441389 Epoch: 13 Training loss: 0.15504613060217637 - MAE: 0.295577142279247 Validation loss : 0.15018589794635773 - MAE: 0.29435485554038643 Epoch: 14 Training loss: 0.15459523980434126 - MAE: 0.2962148725041275 Validation loss : 0.15019148588180542 - MAE: 0.2946234458034584 Epoch: 15 Training loss: 0.15409607612169707 - MAE: 0.294694945039677 Validation loss : 0.1487586498260498 - MAE: 0.2924272191814044 Epoch: 16 Training loss: 0.1530148948614414 - MAE: 0.29393082466301157 Validation loss : 0.14888427257537842 - MAE: 0.29260277990049094 Epoch: 17 Training loss: 0.15252518138060203 - MAE: 0.294490830454799 Validation loss : 0.14751896858215333 - MAE: 0.2906014123844735 Epoch: 18 Training loss: 0.1529261859563681 - MAE: 0.2943059218531379 Validation loss : 0.14786641597747802 - MAE: 0.291260706520114 Epoch: 19 |