|
{ |
|
"best_metric": 0.8571428571428571, |
|
"best_model_checkpoint": "timesformer-base-finetuned-k400-finetuned-ucf101-subset/checkpoint-19", |
|
"epoch": 3.2083333333333335, |
|
"eval_steps": 500, |
|
"global_step": 72, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1388888888888889, |
|
"grad_norm": 11.173284530639648, |
|
"learning_rate": 4.8437500000000005e-05, |
|
"loss": 0.8729, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2638888888888889, |
|
"eval_accuracy": 0.8571428571428571, |
|
"eval_loss": 0.41932326555252075, |
|
"eval_runtime": 147.7861, |
|
"eval_samples_per_second": 2.558, |
|
"eval_steps_per_second": 0.325, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.0138888888888888, |
|
"grad_norm": 4.447381019592285, |
|
"learning_rate": 4.0625000000000005e-05, |
|
"loss": 0.4879, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.1527777777777777, |
|
"grad_norm": 3.373873233795166, |
|
"learning_rate": 3.2812500000000005e-05, |
|
"loss": 0.5106, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2638888888888888, |
|
"eval_accuracy": 0.8571428571428571, |
|
"eval_loss": 0.45790210366249084, |
|
"eval_runtime": 146.1696, |
|
"eval_samples_per_second": 2.586, |
|
"eval_steps_per_second": 0.328, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.0277777777777777, |
|
"grad_norm": 0.6665381193161011, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.2482, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.1666666666666665, |
|
"grad_norm": 8.423311233520508, |
|
"learning_rate": 1.71875e-05, |
|
"loss": 0.4699, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.263888888888889, |
|
"eval_accuracy": 0.8571428571428571, |
|
"eval_loss": 0.4250260889530182, |
|
"eval_runtime": 146.1132, |
|
"eval_samples_per_second": 2.587, |
|
"eval_steps_per_second": 0.329, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 3.0416666666666665, |
|
"grad_norm": 0.42366838455200195, |
|
"learning_rate": 9.375000000000001e-06, |
|
"loss": 0.0982, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.1805555555555554, |
|
"grad_norm": 4.436389446258545, |
|
"learning_rate": 1.5625e-06, |
|
"loss": 0.109, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.2083333333333335, |
|
"eval_accuracy": 0.8571428571428571, |
|
"eval_loss": 0.5306517481803894, |
|
"eval_runtime": 145.944, |
|
"eval_samples_per_second": 2.59, |
|
"eval_steps_per_second": 0.329, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 3.2083333333333335, |
|
"step": 72, |
|
"total_flos": 4.967755240837939e+17, |
|
"train_loss": 0.3919994781414668, |
|
"train_runtime": 877.0447, |
|
"train_samples_per_second": 0.657, |
|
"train_steps_per_second": 0.082 |
|
}, |
|
{ |
|
"epoch": 3.2083333333333335, |
|
"eval_accuracy": 0.8125, |
|
"eval_loss": 0.4621736705303192, |
|
"eval_runtime": 114.6382, |
|
"eval_samples_per_second": 2.512, |
|
"eval_steps_per_second": 0.314, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 3.2083333333333335, |
|
"eval_accuracy": 0.8125, |
|
"eval_loss": 0.462173730134964, |
|
"eval_runtime": 113.0732, |
|
"eval_samples_per_second": 2.547, |
|
"eval_steps_per_second": 0.318, |
|
"step": 72 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 72, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"total_flos": 4.967755240837939e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|