Spaces:
Build error
Build error
logical-reasoning
/
llama-factory
/saves
/Llama3.1-8B-Chinese-Chat
/checkpoint-315
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.7910447761194028, | |
"eval_steps": 35, | |
"global_step": 315, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.028429282160625444, | |
"grad_norm": 6.4533820152282715, | |
"learning_rate": 1.4285714285714285e-05, | |
"loss": 0.9693, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.05685856432125089, | |
"grad_norm": 4.657992839813232, | |
"learning_rate": 2.857142857142857e-05, | |
"loss": 0.7795, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.08528784648187633, | |
"grad_norm": 2.398066997528076, | |
"learning_rate": 4.2857142857142856e-05, | |
"loss": 0.4535, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.11371712864250177, | |
"grad_norm": 1.7500923871994019, | |
"learning_rate": 5.714285714285714e-05, | |
"loss": 0.4519, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.14214641080312723, | |
"grad_norm": 1.989443063735962, | |
"learning_rate": 7.142857142857143e-05, | |
"loss": 0.4128, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.17057569296375266, | |
"grad_norm": 1.1362711191177368, | |
"learning_rate": 8.571428571428571e-05, | |
"loss": 0.3446, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"grad_norm": 1.1770528554916382, | |
"learning_rate": 0.0001, | |
"loss": 0.3253, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"eval_loss": 0.28751906752586365, | |
"eval_runtime": 239.7962, | |
"eval_samples_per_second": 10.426, | |
"eval_steps_per_second": 10.426, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.22743425728500355, | |
"grad_norm": 1.1288448572158813, | |
"learning_rate": 9.993784606094612e-05, | |
"loss": 0.3065, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.255863539445629, | |
"grad_norm": 1.0276695489883423, | |
"learning_rate": 9.975153876827008e-05, | |
"loss": 0.302, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.28429282160625446, | |
"grad_norm": 1.2982385158538818, | |
"learning_rate": 9.944154131125642e-05, | |
"loss": 0.307, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.31272210376687987, | |
"grad_norm": 1.4258345365524292, | |
"learning_rate": 9.900862439242719e-05, | |
"loss": 0.2908, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.3411513859275053, | |
"grad_norm": 0.7898867726325989, | |
"learning_rate": 9.84538643114539e-05, | |
"loss": 0.3112, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.3695806680881308, | |
"grad_norm": 0.9717791676521301, | |
"learning_rate": 9.777864028930705e-05, | |
"loss": 0.2596, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"grad_norm": 1.0619926452636719, | |
"learning_rate": 9.698463103929542e-05, | |
"loss": 0.2868, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"eval_loss": 0.2600123882293701, | |
"eval_runtime": 240.5378, | |
"eval_samples_per_second": 10.393, | |
"eval_steps_per_second": 10.393, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.42643923240938164, | |
"grad_norm": 0.8634427189826965, | |
"learning_rate": 9.607381059352038e-05, | |
"loss": 0.2763, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.4548685145700071, | |
"grad_norm": 1.2894893884658813, | |
"learning_rate": 9.504844339512095e-05, | |
"loss": 0.254, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.48329779673063256, | |
"grad_norm": 1.4364824295043945, | |
"learning_rate": 9.391107866851143e-05, | |
"loss": 0.2834, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.511727078891258, | |
"grad_norm": 0.9127787947654724, | |
"learning_rate": 9.266454408160779e-05, | |
"loss": 0.2509, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.5401563610518835, | |
"grad_norm": 0.9939984083175659, | |
"learning_rate": 9.131193871579975e-05, | |
"loss": 0.266, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.5685856432125089, | |
"grad_norm": 1.184008002281189, | |
"learning_rate": 8.985662536114613e-05, | |
"loss": 0.2562, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"grad_norm": 0.6923903226852417, | |
"learning_rate": 8.83022221559489e-05, | |
"loss": 0.2583, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"eval_loss": 0.25079870223999023, | |
"eval_runtime": 240.544, | |
"eval_samples_per_second": 10.393, | |
"eval_steps_per_second": 10.393, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.6254442075337597, | |
"grad_norm": 0.8162419199943542, | |
"learning_rate": 8.665259359149132e-05, | |
"loss": 0.2529, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.6538734896943852, | |
"grad_norm": 0.7928385734558105, | |
"learning_rate": 8.491184090430364e-05, | |
"loss": 0.2482, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.6823027718550106, | |
"grad_norm": 0.8485226035118103, | |
"learning_rate": 8.308429187984297e-05, | |
"loss": 0.244, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.7107320540156361, | |
"grad_norm": 0.8197255730628967, | |
"learning_rate": 8.117449009293668e-05, | |
"loss": 0.2453, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.7391613361762616, | |
"grad_norm": 0.9954875707626343, | |
"learning_rate": 7.91871836117395e-05, | |
"loss": 0.2451, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.767590618336887, | |
"grad_norm": 1.4009358882904053, | |
"learning_rate": 7.712731319328798e-05, | |
"loss": 0.2749, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"grad_norm": 0.8014578819274902, | |
"learning_rate": 7.500000000000001e-05, | |
"loss": 0.2559, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"eval_loss": 0.2278721183538437, | |
"eval_runtime": 239.2855, | |
"eval_samples_per_second": 10.448, | |
"eval_steps_per_second": 10.448, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.8244491826581379, | |
"grad_norm": 1.1141350269317627, | |
"learning_rate": 7.281053286765815e-05, | |
"loss": 0.2483, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.8528784648187633, | |
"grad_norm": 0.9138433933258057, | |
"learning_rate": 7.056435515653059e-05, | |
"loss": 0.2666, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.8813077469793887, | |
"grad_norm": 0.5915958285331726, | |
"learning_rate": 6.826705121831976e-05, | |
"loss": 0.2576, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.9097370291400142, | |
"grad_norm": 0.724046528339386, | |
"learning_rate": 6.592433251258423e-05, | |
"loss": 0.2552, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.9381663113006397, | |
"grad_norm": 1.355237364768982, | |
"learning_rate": 6.354202340715026e-05, | |
"loss": 0.2287, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.9665955934612651, | |
"grad_norm": 1.1470948457717896, | |
"learning_rate": 6.112604669781572e-05, | |
"loss": 0.2469, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"grad_norm": 0.7694109678268433, | |
"learning_rate": 5.868240888334653e-05, | |
"loss": 0.2516, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"eval_loss": 0.22209325432777405, | |
"eval_runtime": 239.4751, | |
"eval_samples_per_second": 10.439, | |
"eval_steps_per_second": 10.439, | |
"step": 175 | |
}, | |
{ | |
"epoch": 1.023454157782516, | |
"grad_norm": 0.8034222722053528, | |
"learning_rate": 5.621718523237427e-05, | |
"loss": 0.2339, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.0518834399431414, | |
"grad_norm": 0.7792671322822571, | |
"learning_rate": 5.373650467932122e-05, | |
"loss": 0.2192, | |
"step": 185 | |
}, | |
{ | |
"epoch": 1.080312722103767, | |
"grad_norm": 1.2179112434387207, | |
"learning_rate": 5.124653458690365e-05, | |
"loss": 0.2306, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.1087420042643923, | |
"grad_norm": 0.7679367065429688, | |
"learning_rate": 4.875346541309637e-05, | |
"loss": 0.213, | |
"step": 195 | |
}, | |
{ | |
"epoch": 1.1371712864250179, | |
"grad_norm": 0.7573937773704529, | |
"learning_rate": 4.626349532067879e-05, | |
"loss": 0.2306, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.1656005685856432, | |
"grad_norm": 0.8733441233634949, | |
"learning_rate": 4.378281476762576e-05, | |
"loss": 0.2235, | |
"step": 205 | |
}, | |
{ | |
"epoch": 1.1940298507462686, | |
"grad_norm": 0.7307979464530945, | |
"learning_rate": 4.131759111665349e-05, | |
"loss": 0.2086, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.1940298507462686, | |
"eval_loss": 0.22708775103092194, | |
"eval_runtime": 239.5157, | |
"eval_samples_per_second": 10.438, | |
"eval_steps_per_second": 10.438, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.2224591329068941, | |
"grad_norm": 0.7165330052375793, | |
"learning_rate": 3.887395330218429e-05, | |
"loss": 0.2027, | |
"step": 215 | |
}, | |
{ | |
"epoch": 1.2508884150675195, | |
"grad_norm": 0.9405038952827454, | |
"learning_rate": 3.6457976592849754e-05, | |
"loss": 0.1961, | |
"step": 220 | |
}, | |
{ | |
"epoch": 1.279317697228145, | |
"grad_norm": 1.5330524444580078, | |
"learning_rate": 3.4075667487415785e-05, | |
"loss": 0.2274, | |
"step": 225 | |
}, | |
{ | |
"epoch": 1.3077469793887704, | |
"grad_norm": 0.8158367872238159, | |
"learning_rate": 3.173294878168025e-05, | |
"loss": 0.215, | |
"step": 230 | |
}, | |
{ | |
"epoch": 1.336176261549396, | |
"grad_norm": 0.6741064786911011, | |
"learning_rate": 2.9435644843469436e-05, | |
"loss": 0.2164, | |
"step": 235 | |
}, | |
{ | |
"epoch": 1.3646055437100213, | |
"grad_norm": 0.841311514377594, | |
"learning_rate": 2.718946713234185e-05, | |
"loss": 0.2213, | |
"step": 240 | |
}, | |
{ | |
"epoch": 1.3930348258706466, | |
"grad_norm": 0.8467437624931335, | |
"learning_rate": 2.500000000000001e-05, | |
"loss": 0.238, | |
"step": 245 | |
}, | |
{ | |
"epoch": 1.3930348258706466, | |
"eval_loss": 0.2183476835489273, | |
"eval_runtime": 239.5588, | |
"eval_samples_per_second": 10.436, | |
"eval_steps_per_second": 10.436, | |
"step": 245 | |
}, | |
{ | |
"epoch": 1.4214641080312722, | |
"grad_norm": 0.8148757219314575, | |
"learning_rate": 2.2872686806712035e-05, | |
"loss": 0.2179, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.4498933901918978, | |
"grad_norm": 0.7469142079353333, | |
"learning_rate": 2.0812816388260518e-05, | |
"loss": 0.2057, | |
"step": 255 | |
}, | |
{ | |
"epoch": 1.4783226723525231, | |
"grad_norm": 0.8624520897865295, | |
"learning_rate": 1.8825509907063327e-05, | |
"loss": 0.1965, | |
"step": 260 | |
}, | |
{ | |
"epoch": 1.5067519545131485, | |
"grad_norm": 0.6433588862419128, | |
"learning_rate": 1.691570812015704e-05, | |
"loss": 0.2171, | |
"step": 265 | |
}, | |
{ | |
"epoch": 1.535181236673774, | |
"grad_norm": 0.8499258756637573, | |
"learning_rate": 1.5088159095696363e-05, | |
"loss": 0.2217, | |
"step": 270 | |
}, | |
{ | |
"epoch": 1.5636105188343994, | |
"grad_norm": 1.1365426778793335, | |
"learning_rate": 1.3347406408508695e-05, | |
"loss": 0.2035, | |
"step": 275 | |
}, | |
{ | |
"epoch": 1.5920398009950247, | |
"grad_norm": 0.8611994981765747, | |
"learning_rate": 1.1697777844051105e-05, | |
"loss": 0.2176, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.5920398009950247, | |
"eval_loss": 0.22061596810817719, | |
"eval_runtime": 242.1258, | |
"eval_samples_per_second": 10.325, | |
"eval_steps_per_second": 10.325, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.6204690831556503, | |
"grad_norm": 1.0038255453109741, | |
"learning_rate": 1.0143374638853891e-05, | |
"loss": 0.2203, | |
"step": 285 | |
}, | |
{ | |
"epoch": 1.6488983653162759, | |
"grad_norm": 0.7821207642555237, | |
"learning_rate": 8.688061284200266e-06, | |
"loss": 0.2171, | |
"step": 290 | |
}, | |
{ | |
"epoch": 1.6773276474769012, | |
"grad_norm": 0.8067914247512817, | |
"learning_rate": 7.33545591839222e-06, | |
"loss": 0.2009, | |
"step": 295 | |
}, | |
{ | |
"epoch": 1.7057569296375266, | |
"grad_norm": 0.7486416697502136, | |
"learning_rate": 6.088921331488568e-06, | |
"loss": 0.2224, | |
"step": 300 | |
}, | |
{ | |
"epoch": 1.7341862117981521, | |
"grad_norm": 0.8486853241920471, | |
"learning_rate": 4.951556604879048e-06, | |
"loss": 0.224, | |
"step": 305 | |
}, | |
{ | |
"epoch": 1.7626154939587777, | |
"grad_norm": 0.7262605428695679, | |
"learning_rate": 3.9261894064796135e-06, | |
"loss": 0.1882, | |
"step": 310 | |
}, | |
{ | |
"epoch": 1.7910447761194028, | |
"grad_norm": 0.8881399631500244, | |
"learning_rate": 3.0153689607045845e-06, | |
"loss": 0.2022, | |
"step": 315 | |
}, | |
{ | |
"epoch": 1.7910447761194028, | |
"eval_loss": 0.22089815139770508, | |
"eval_runtime": 240.8241, | |
"eval_samples_per_second": 10.381, | |
"eval_steps_per_second": 10.381, | |
"step": 315 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 350, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 35, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.1272530553342525e+18, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |