|
{ |
|
"best_metric": 0.5360824742268041, |
|
"best_model_checkpoint": "videomae-base-finetuned-lift-data/checkpoint-140", |
|
"epoch": 7.102564102564102, |
|
"eval_steps": 500, |
|
"global_step": 156, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0641025641025641, |
|
"grad_norm": 5.224398136138916, |
|
"learning_rate": 3.125e-05, |
|
"loss": 1.6104, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1282051282051282, |
|
"grad_norm": 6.130595684051514, |
|
"learning_rate": 4.8571428571428576e-05, |
|
"loss": 1.6135, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1282051282051282, |
|
"eval_accuracy": 0.22974963181148747, |
|
"eval_loss": 1.653665542602539, |
|
"eval_runtime": 2113.0859, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.04, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.064102564102564, |
|
"grad_norm": 4.675447940826416, |
|
"learning_rate": 4.5e-05, |
|
"loss": 1.4479, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1282051282051282, |
|
"grad_norm": 9.729595184326172, |
|
"learning_rate": 4.1428571428571437e-05, |
|
"loss": 1.3333, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.1282051282051282, |
|
"eval_accuracy": 0.28718703976435933, |
|
"eval_loss": 1.5571914911270142, |
|
"eval_runtime": 2153.3219, |
|
"eval_samples_per_second": 0.315, |
|
"eval_steps_per_second": 0.039, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.064102564102564, |
|
"grad_norm": 9.443714141845703, |
|
"learning_rate": 3.785714285714286e-05, |
|
"loss": 1.2919, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.128205128205128, |
|
"grad_norm": 6.667464733123779, |
|
"learning_rate": 3.428571428571429e-05, |
|
"loss": 1.4219, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.128205128205128, |
|
"eval_accuracy": 0.30338733431516934, |
|
"eval_loss": 1.446468472480774, |
|
"eval_runtime": 2046.258, |
|
"eval_samples_per_second": 0.332, |
|
"eval_steps_per_second": 0.042, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.064102564102564, |
|
"grad_norm": 8.536487579345703, |
|
"learning_rate": 3.071428571428572e-05, |
|
"loss": 1.2711, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.128205128205128, |
|
"grad_norm": 9.417764663696289, |
|
"learning_rate": 2.714285714285714e-05, |
|
"loss": 1.1874, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.128205128205128, |
|
"eval_accuracy": 0.30633284241531666, |
|
"eval_loss": 1.407287836074829, |
|
"eval_runtime": 2182.1497, |
|
"eval_samples_per_second": 0.311, |
|
"eval_steps_per_second": 0.039, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.064102564102564, |
|
"grad_norm": 3.6362509727478027, |
|
"learning_rate": 2.357142857142857e-05, |
|
"loss": 1.1012, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.128205128205128, |
|
"grad_norm": 5.921740531921387, |
|
"learning_rate": 2e-05, |
|
"loss": 1.1121, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.128205128205128, |
|
"eval_accuracy": 0.29013254786450665, |
|
"eval_loss": 1.324741244316101, |
|
"eval_runtime": 2052.0114, |
|
"eval_samples_per_second": 0.331, |
|
"eval_steps_per_second": 0.041, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.064102564102564, |
|
"grad_norm": 9.157364845275879, |
|
"learning_rate": 1.642857142857143e-05, |
|
"loss": 1.1462, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.128205128205128, |
|
"grad_norm": 5.3379645347595215, |
|
"learning_rate": 1.2857142857142857e-05, |
|
"loss": 0.938, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 5.128205128205128, |
|
"eval_accuracy": 0.32547864506627394, |
|
"eval_loss": 1.2014753818511963, |
|
"eval_runtime": 2141.2727, |
|
"eval_samples_per_second": 0.317, |
|
"eval_steps_per_second": 0.04, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.064102564102564, |
|
"grad_norm": 7.765174388885498, |
|
"learning_rate": 9.285714285714286e-06, |
|
"loss": 0.9989, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.128205128205128, |
|
"grad_norm": 15.063241958618164, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 1.1317, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 6.128205128205128, |
|
"eval_accuracy": 0.5360824742268041, |
|
"eval_loss": 1.2458850145339966, |
|
"eval_runtime": 2040.1861, |
|
"eval_samples_per_second": 0.333, |
|
"eval_steps_per_second": 0.042, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 7.064102564102564, |
|
"grad_norm": 6.829884052276611, |
|
"learning_rate": 2.142857142857143e-06, |
|
"loss": 1.0411, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"eval_accuracy": 0.4683357879234168, |
|
"eval_loss": 1.1582751274108887, |
|
"eval_runtime": 2040.2319, |
|
"eval_samples_per_second": 0.333, |
|
"eval_steps_per_second": 0.042, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"step": 156, |
|
"total_flos": 1.528962156685394e+18, |
|
"train_loss": 1.2294957026457176, |
|
"train_runtime": 20749.4564, |
|
"train_samples_per_second": 0.06, |
|
"train_steps_per_second": 0.008 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"eval_accuracy": 0.6066790352504638, |
|
"eval_loss": 1.2405734062194824, |
|
"eval_runtime": 1591.1567, |
|
"eval_samples_per_second": 0.339, |
|
"eval_steps_per_second": 0.043, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"eval_accuracy": 0.6066790352504638, |
|
"eval_loss": 1.2410632371902466, |
|
"eval_runtime": 1594.8747, |
|
"eval_samples_per_second": 0.338, |
|
"eval_steps_per_second": 0.043, |
|
"step": 156 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.528962156685394e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|