|
{ |
|
"best_metric": 0.6156604453386092, |
|
"best_model_checkpoint": "./runtime-ar/checkpoint-152", |
|
"epoch": 3.9950900163666123, |
|
"global_step": 152, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 5e-05, |
|
"loss": 3.3108, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001, |
|
"loss": 4.8935, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 3.6355, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0002, |
|
"loss": 3.4513, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001999048221581858, |
|
"loss": 3.2178, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019961946980917456, |
|
"loss": 3.0584, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 2.908, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 2.7828, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019762960071199333, |
|
"loss": 2.6914, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 2.6232, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001953716950748227, |
|
"loss": 2.5839, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 2.5405, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 2.4946, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.000190630778703665, |
|
"loss": 2.4245, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00018870108331782217, |
|
"loss": 2.4099, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 2.3972, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001843391445812886, |
|
"loss": 2.3371, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001819152044288992, |
|
"loss": 2.276, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 2.3067, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5594373263429553, |
|
"eval_loss": 2.3559441566467285, |
|
"eval_runtime": 87.3704, |
|
"eval_samples_per_second": 6.192, |
|
"eval_steps_per_second": 3.102, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 2.3629, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0001737277336810124, |
|
"loss": 2.0942, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 2.0975, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00016755902076156604, |
|
"loss": 2.0554, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 2.0336, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 2.0584, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001573576436351046, |
|
"loss": 1.9959, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001537299608346824, |
|
"loss": 1.9855, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.9941, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00014617486132350343, |
|
"loss": 1.9589, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00014226182617406996, |
|
"loss": 1.9918, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.9673, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 1.9626, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00013007057995042732, |
|
"loss": 1.9327, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 1.9334, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00012164396139381029, |
|
"loss": 1.9405, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 1.8394, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 1.9284, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00010871557427476583, |
|
"loss": 1.9243, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5974633090939628, |
|
"eval_loss": 2.1282806396484375, |
|
"eval_runtime": 88.3059, |
|
"eval_samples_per_second": 6.126, |
|
"eval_steps_per_second": 3.069, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 0.00010436193873653361, |
|
"loss": 1.8745, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6456, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 9.563806126346642e-05, |
|
"loss": 1.6496, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 9.128442572523417e-05, |
|
"loss": 1.6571, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 1.6366, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 1.6033, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 7.835603860618972e-05, |
|
"loss": 1.6042, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 1.6111, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 6.992942004957271e-05, |
|
"loss": 1.6058, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 1.6079, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 1.5789, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 5.773817382593008e-05, |
|
"loss": 1.6267, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.382513867649663e-05, |
|
"loss": 1.5811, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 1.5605, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.6270039165317605e-05, |
|
"loss": 1.5828, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 4.264235636489542e-05, |
|
"loss": 1.5579, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 1.5991, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 1.5641, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.244097923843398e-05, |
|
"loss": 1.5971, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.6139908093264839, |
|
"eval_loss": 2.075925827026367, |
|
"eval_runtime": 88.4185, |
|
"eval_samples_per_second": 6.119, |
|
"eval_steps_per_second": 3.065, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 1.5486, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 2.6272266318987603e-05, |
|
"loss": 1.4341, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 2.339555568810221e-05, |
|
"loss": 1.4085, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 1.3819, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.808479557110081e-05, |
|
"loss": 1.3935, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 1.566085541871145e-05, |
|
"loss": 1.4073, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 1.419, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 1.129891668217783e-05, |
|
"loss": 1.3807, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 9.369221296335006e-06, |
|
"loss": 1.3881, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 1.3674, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 6.030737921409169e-06, |
|
"loss": 1.3847, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 4.628304925177318e-06, |
|
"loss": 1.3734, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 1.3969, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 2.3703992880066638e-06, |
|
"loss": 1.3544, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.5192246987791981e-06, |
|
"loss": 1.4021, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 1.4061, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 3.805301908254455e-07, |
|
"loss": 1.3621, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 9.517784181422019e-08, |
|
"loss": 1.3759, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.4139, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6156604453386092, |
|
"eval_loss": 2.101821184158325, |
|
"eval_runtime": 88.2947, |
|
"eval_samples_per_second": 6.127, |
|
"eval_steps_per_second": 3.069, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 152, |
|
"total_flos": 8.481201929060352e+16, |
|
"train_loss": 1.970301656346572, |
|
"train_runtime": 9467.2073, |
|
"train_samples_per_second": 2.062, |
|
"train_steps_per_second": 0.016 |
|
} |
|
], |
|
"max_steps": 152, |
|
"num_train_epochs": 4, |
|
"total_flos": 8.481201929060352e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|