Spaces:
Build error
Build error
logical-reasoning
/
llama-factory
/saves
/Llama3.1-70B-Chinese-Chat
/checkpoint-210
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.1940298507462686, | |
"eval_steps": 35, | |
"global_step": 210, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.028429282160625444, | |
"grad_norm": 14.865596771240234, | |
"learning_rate": 1.4285714285714285e-05, | |
"loss": 1.887, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.05685856432125089, | |
"grad_norm": 8.584558486938477, | |
"learning_rate": 2.857142857142857e-05, | |
"loss": 1.5936, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.08528784648187633, | |
"grad_norm": 4.329889297485352, | |
"learning_rate": 4.2857142857142856e-05, | |
"loss": 0.5385, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.11371712864250177, | |
"grad_norm": 1.5808465480804443, | |
"learning_rate": 5.714285714285714e-05, | |
"loss": 0.4072, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.14214641080312723, | |
"grad_norm": 1.9611109495162964, | |
"learning_rate": 7.142857142857143e-05, | |
"loss": 0.3629, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.17057569296375266, | |
"grad_norm": 0.6985721588134766, | |
"learning_rate": 8.571428571428571e-05, | |
"loss": 0.3385, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"grad_norm": 0.5211701989173889, | |
"learning_rate": 0.0001, | |
"loss": 0.3051, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"eval_loss": 0.25876832008361816, | |
"eval_runtime": 1601.7861, | |
"eval_samples_per_second": 1.561, | |
"eval_steps_per_second": 1.561, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.22743425728500355, | |
"grad_norm": 0.7382380962371826, | |
"learning_rate": 9.993784606094612e-05, | |
"loss": 0.2997, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.255863539445629, | |
"grad_norm": 0.7649598121643066, | |
"learning_rate": 9.975153876827008e-05, | |
"loss": 0.2794, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.28429282160625446, | |
"grad_norm": 0.8737666606903076, | |
"learning_rate": 9.944154131125642e-05, | |
"loss": 0.2777, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.31272210376687987, | |
"grad_norm": 18.472232818603516, | |
"learning_rate": 9.900862439242719e-05, | |
"loss": 0.2866, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.3411513859275053, | |
"grad_norm": 0.9156910181045532, | |
"learning_rate": 9.84538643114539e-05, | |
"loss": 0.2993, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.3695806680881308, | |
"grad_norm": 0.5505309700965881, | |
"learning_rate": 9.777864028930705e-05, | |
"loss": 0.2459, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"grad_norm": 0.6314955949783325, | |
"learning_rate": 9.698463103929542e-05, | |
"loss": 0.2726, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"eval_loss": 0.24190224707126617, | |
"eval_runtime": 1601.1971, | |
"eval_samples_per_second": 1.561, | |
"eval_steps_per_second": 1.561, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.42643923240938164, | |
"grad_norm": 0.37724295258522034, | |
"learning_rate": 9.607381059352038e-05, | |
"loss": 0.2722, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.4548685145700071, | |
"grad_norm": 0.5261896848678589, | |
"learning_rate": 9.504844339512095e-05, | |
"loss": 0.2427, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.48329779673063256, | |
"grad_norm": 0.5982173681259155, | |
"learning_rate": 9.391107866851143e-05, | |
"loss": 0.2672, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.511727078891258, | |
"grad_norm": 0.5829192399978638, | |
"learning_rate": 9.266454408160779e-05, | |
"loss": 0.2195, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.5401563610518835, | |
"grad_norm": 0.5800922513008118, | |
"learning_rate": 9.131193871579975e-05, | |
"loss": 0.2454, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.5685856432125089, | |
"grad_norm": 0.6000517010688782, | |
"learning_rate": 8.985662536114613e-05, | |
"loss": 0.3725, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"grad_norm": 0.5690158605575562, | |
"learning_rate": 8.83022221559489e-05, | |
"loss": 0.2387, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"eval_loss": 0.23953676223754883, | |
"eval_runtime": 1600.7721, | |
"eval_samples_per_second": 1.562, | |
"eval_steps_per_second": 1.562, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.6254442075337597, | |
"grad_norm": 0.38492974638938904, | |
"learning_rate": 8.665259359149132e-05, | |
"loss": 0.2324, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.6538734896943852, | |
"grad_norm": 0.9776778221130371, | |
"learning_rate": 8.491184090430364e-05, | |
"loss": 0.2409, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.6823027718550106, | |
"grad_norm": 0.6985746622085571, | |
"learning_rate": 8.308429187984297e-05, | |
"loss": 0.2512, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.7107320540156361, | |
"grad_norm": 0.38226836919784546, | |
"learning_rate": 8.117449009293668e-05, | |
"loss": 0.2347, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.7391613361762616, | |
"grad_norm": 0.5587658882141113, | |
"learning_rate": 7.91871836117395e-05, | |
"loss": 0.2357, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.767590618336887, | |
"grad_norm": 0.6417293548583984, | |
"learning_rate": 7.712731319328798e-05, | |
"loss": 0.2607, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"grad_norm": 0.3534771502017975, | |
"learning_rate": 7.500000000000001e-05, | |
"loss": 0.2508, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"eval_loss": 0.22614409029483795, | |
"eval_runtime": 1601.6762, | |
"eval_samples_per_second": 1.561, | |
"eval_steps_per_second": 1.561, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.8244491826581379, | |
"grad_norm": 0.4052274525165558, | |
"learning_rate": 7.281053286765815e-05, | |
"loss": 0.2338, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.8528784648187633, | |
"grad_norm": 0.544508695602417, | |
"learning_rate": 7.056435515653059e-05, | |
"loss": 0.2547, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.8813077469793887, | |
"grad_norm": 0.4221990406513214, | |
"learning_rate": 6.826705121831976e-05, | |
"loss": 0.2481, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.9097370291400142, | |
"grad_norm": 0.5856378078460693, | |
"learning_rate": 6.592433251258423e-05, | |
"loss": 0.2431, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.9381663113006397, | |
"grad_norm": 0.8348550796508789, | |
"learning_rate": 6.354202340715026e-05, | |
"loss": 0.228, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.9665955934612651, | |
"grad_norm": 0.45988309383392334, | |
"learning_rate": 6.112604669781572e-05, | |
"loss": 0.2229, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"grad_norm": 0.3123863637447357, | |
"learning_rate": 5.868240888334653e-05, | |
"loss": 0.2356, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"eval_loss": 0.21728534996509552, | |
"eval_runtime": 1603.0018, | |
"eval_samples_per_second": 1.56, | |
"eval_steps_per_second": 1.56, | |
"step": 175 | |
}, | |
{ | |
"epoch": 1.023454157782516, | |
"grad_norm": 0.38809412717819214, | |
"learning_rate": 5.621718523237427e-05, | |
"loss": 0.2172, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.0518834399431414, | |
"grad_norm": 0.45351433753967285, | |
"learning_rate": 5.373650467932122e-05, | |
"loss": 0.2047, | |
"step": 185 | |
}, | |
{ | |
"epoch": 1.080312722103767, | |
"grad_norm": 0.6264550685882568, | |
"learning_rate": 5.124653458690365e-05, | |
"loss": 0.2212, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.1087420042643923, | |
"grad_norm": 0.40359142422676086, | |
"learning_rate": 4.875346541309637e-05, | |
"loss": 0.211, | |
"step": 195 | |
}, | |
{ | |
"epoch": 1.1371712864250179, | |
"grad_norm": 0.39740586280822754, | |
"learning_rate": 4.626349532067879e-05, | |
"loss": 0.2332, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.1656005685856432, | |
"grad_norm": 0.406650573015213, | |
"learning_rate": 4.378281476762576e-05, | |
"loss": 0.2216, | |
"step": 205 | |
}, | |
{ | |
"epoch": 1.1940298507462686, | |
"grad_norm": 0.32201454043388367, | |
"learning_rate": 4.131759111665349e-05, | |
"loss": 0.2079, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.1940298507462686, | |
"eval_loss": 0.22092726826667786, | |
"eval_runtime": 1600.8467, | |
"eval_samples_per_second": 1.562, | |
"eval_steps_per_second": 1.562, | |
"step": 210 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 350, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 35, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 6.950336201831743e+18, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |