text-seg-lm-qwen2-0.5b / trainer_state.json
ajinauser's picture
Upload 14 files
b686147 verified
raw
history blame
11.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.227067050596462,
"eval_steps": 500,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16454134101192924,
"grad_norm": 0.8724656105041504,
"learning_rate": 1e-05,
"loss": 0.9845,
"step": 100
},
{
"epoch": 0.3290826820238585,
"grad_norm": 0.6713359951972961,
"learning_rate": 2e-05,
"loss": 0.5833,
"step": 200
},
{
"epoch": 0.49362402303578773,
"grad_norm": 1.1457316875457764,
"learning_rate": 3e-05,
"loss": 0.4109,
"step": 300
},
{
"epoch": 0.658165364047717,
"grad_norm": 1.878250241279602,
"learning_rate": 4e-05,
"loss": 0.3576,
"step": 400
},
{
"epoch": 0.8227067050596463,
"grad_norm": 1.0137649774551392,
"learning_rate": 5e-05,
"loss": 0.3388,
"step": 500
},
{
"epoch": 0.8227067050596463,
"eval_loss": 0.29997578263282776,
"eval_runtime": 2.1518,
"eval_samples_per_second": 4.647,
"eval_steps_per_second": 2.324,
"step": 500
},
{
"epoch": 0.9872480460715755,
"grad_norm": 1.1368335485458374,
"learning_rate": 4.888888888888889e-05,
"loss": 0.3053,
"step": 600
},
{
"epoch": 1.1517893870835048,
"grad_norm": 0.9668510556221008,
"learning_rate": 4.7777777777777784e-05,
"loss": 0.2896,
"step": 700
},
{
"epoch": 1.316330728095434,
"grad_norm": 1.2195199728012085,
"learning_rate": 4.666666666666667e-05,
"loss": 0.2852,
"step": 800
},
{
"epoch": 1.4808720691073631,
"grad_norm": 1.9592208862304688,
"learning_rate": 4.555555555555556e-05,
"loss": 0.2706,
"step": 900
},
{
"epoch": 1.6454134101192923,
"grad_norm": 2.3532769680023193,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.2645,
"step": 1000
},
{
"epoch": 1.6454134101192923,
"eval_loss": 0.25353601574897766,
"eval_runtime": 2.1517,
"eval_samples_per_second": 4.647,
"eval_steps_per_second": 2.324,
"step": 1000
},
{
"epoch": 1.8099547511312217,
"grad_norm": 0.9459941983222961,
"learning_rate": 4.3333333333333334e-05,
"loss": 0.2723,
"step": 1100
},
{
"epoch": 1.974496092143151,
"grad_norm": 4.532841682434082,
"learning_rate": 4.222222222222222e-05,
"loss": 0.2589,
"step": 1200
},
{
"epoch": 2.1390374331550803,
"grad_norm": 1.5837737321853638,
"learning_rate": 4.111111111111111e-05,
"loss": 0.2544,
"step": 1300
},
{
"epoch": 2.3035787741670095,
"grad_norm": 2.3430395126342773,
"learning_rate": 4e-05,
"loss": 0.2377,
"step": 1400
},
{
"epoch": 2.4681201151789387,
"grad_norm": 0.8787763118743896,
"learning_rate": 3.888888888888889e-05,
"loss": 0.2407,
"step": 1500
},
{
"epoch": 2.4681201151789387,
"eval_loss": 0.2262120544910431,
"eval_runtime": 2.1241,
"eval_samples_per_second": 4.708,
"eval_steps_per_second": 2.354,
"step": 1500
},
{
"epoch": 2.632661456190868,
"grad_norm": 2.3523459434509277,
"learning_rate": 3.777777777777778e-05,
"loss": 0.2321,
"step": 1600
},
{
"epoch": 2.797202797202797,
"grad_norm": 1.0582653284072876,
"learning_rate": 3.6666666666666666e-05,
"loss": 0.2333,
"step": 1700
},
{
"epoch": 2.9617441382147263,
"grad_norm": 2.3663830757141113,
"learning_rate": 3.555555555555556e-05,
"loss": 0.2292,
"step": 1800
},
{
"epoch": 3.126285479226656,
"grad_norm": 1.1538413763046265,
"learning_rate": 3.444444444444445e-05,
"loss": 0.2162,
"step": 1900
},
{
"epoch": 3.290826820238585,
"grad_norm": 3.5856103897094727,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.2151,
"step": 2000
},
{
"epoch": 3.290826820238585,
"eval_loss": 0.1963958442211151,
"eval_runtime": 2.1323,
"eval_samples_per_second": 4.69,
"eval_steps_per_second": 2.345,
"step": 2000
},
{
"epoch": 3.4553681612505143,
"grad_norm": 2.666325092315674,
"learning_rate": 3.222222222222223e-05,
"loss": 0.212,
"step": 2100
},
{
"epoch": 3.6199095022624435,
"grad_norm": 1.245647668838501,
"learning_rate": 3.111111111111111e-05,
"loss": 0.2157,
"step": 2200
},
{
"epoch": 3.7844508432743726,
"grad_norm": 2.961127281188965,
"learning_rate": 3e-05,
"loss": 0.2123,
"step": 2300
},
{
"epoch": 3.948992184286302,
"grad_norm": 1.1635862588882446,
"learning_rate": 2.8888888888888888e-05,
"loss": 0.2044,
"step": 2400
},
{
"epoch": 4.113533525298231,
"grad_norm": 5.296552658081055,
"learning_rate": 2.777777777777778e-05,
"loss": 0.2003,
"step": 2500
},
{
"epoch": 4.113533525298231,
"eval_loss": 0.20144304633140564,
"eval_runtime": 2.1228,
"eval_samples_per_second": 4.711,
"eval_steps_per_second": 2.355,
"step": 2500
},
{
"epoch": 4.278074866310161,
"grad_norm": 3.240604877471924,
"learning_rate": 2.6666666666666667e-05,
"loss": 0.1918,
"step": 2600
},
{
"epoch": 4.442616207322089,
"grad_norm": 4.324797630310059,
"learning_rate": 2.5555555555555554e-05,
"loss": 0.1954,
"step": 2700
},
{
"epoch": 4.607157548334019,
"grad_norm": 2.8329825401306152,
"learning_rate": 2.4444444444444445e-05,
"loss": 0.1942,
"step": 2800
},
{
"epoch": 4.771698889345949,
"grad_norm": 1.8872101306915283,
"learning_rate": 2.3333333333333336e-05,
"loss": 0.188,
"step": 2900
},
{
"epoch": 4.936240230357877,
"grad_norm": 6.444405555725098,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.1918,
"step": 3000
},
{
"epoch": 4.936240230357877,
"eval_loss": 0.2167702168226242,
"eval_runtime": 2.1187,
"eval_samples_per_second": 4.72,
"eval_steps_per_second": 2.36,
"step": 3000
},
{
"epoch": 5.100781571369807,
"grad_norm": 3.5297648906707764,
"learning_rate": 2.111111111111111e-05,
"loss": 0.1844,
"step": 3100
},
{
"epoch": 5.265322912381736,
"grad_norm": 1.4769492149353027,
"learning_rate": 2e-05,
"loss": 0.174,
"step": 3200
},
{
"epoch": 5.429864253393665,
"grad_norm": 5.627166748046875,
"learning_rate": 1.888888888888889e-05,
"loss": 0.1773,
"step": 3300
},
{
"epoch": 5.594405594405594,
"grad_norm": 1.1272560358047485,
"learning_rate": 1.777777777777778e-05,
"loss": 0.1836,
"step": 3400
},
{
"epoch": 5.758946935417524,
"grad_norm": 0.9033811092376709,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.1823,
"step": 3500
},
{
"epoch": 5.758946935417524,
"eval_loss": 0.2007795125246048,
"eval_runtime": 2.1257,
"eval_samples_per_second": 4.704,
"eval_steps_per_second": 2.352,
"step": 3500
},
{
"epoch": 5.9234882764294525,
"grad_norm": 1.0827577114105225,
"learning_rate": 1.5555555555555555e-05,
"loss": 0.1784,
"step": 3600
},
{
"epoch": 6.088029617441382,
"grad_norm": 3.107731819152832,
"learning_rate": 1.4444444444444444e-05,
"loss": 0.1762,
"step": 3700
},
{
"epoch": 6.252570958453312,
"grad_norm": 1.4182140827178955,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.1711,
"step": 3800
},
{
"epoch": 6.4171122994652405,
"grad_norm": 1.3971117734909058,
"learning_rate": 1.2222222222222222e-05,
"loss": 0.1679,
"step": 3900
},
{
"epoch": 6.58165364047717,
"grad_norm": 3.196338415145874,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.1663,
"step": 4000
},
{
"epoch": 6.58165364047717,
"eval_loss": 0.19463543593883514,
"eval_runtime": 2.135,
"eval_samples_per_second": 4.684,
"eval_steps_per_second": 2.342,
"step": 4000
},
{
"epoch": 6.746194981489099,
"grad_norm": 1.7863503694534302,
"learning_rate": 1e-05,
"loss": 0.1672,
"step": 4100
},
{
"epoch": 6.9107363225010285,
"grad_norm": 2.310948610305786,
"learning_rate": 8.88888888888889e-06,
"loss": 0.1704,
"step": 4200
},
{
"epoch": 7.075277663512957,
"grad_norm": 2.464998722076416,
"learning_rate": 7.777777777777777e-06,
"loss": 0.1625,
"step": 4300
},
{
"epoch": 7.239819004524887,
"grad_norm": 2.8195559978485107,
"learning_rate": 6.666666666666667e-06,
"loss": 0.1569,
"step": 4400
},
{
"epoch": 7.404360345536816,
"grad_norm": 1.7469302415847778,
"learning_rate": 5.555555555555556e-06,
"loss": 0.1568,
"step": 4500
},
{
"epoch": 7.404360345536816,
"eval_loss": 0.18890294432640076,
"eval_runtime": 2.1046,
"eval_samples_per_second": 4.752,
"eval_steps_per_second": 2.376,
"step": 4500
},
{
"epoch": 7.568901686548745,
"grad_norm": 1.7718433141708374,
"learning_rate": 4.444444444444445e-06,
"loss": 0.1559,
"step": 4600
},
{
"epoch": 7.733443027560675,
"grad_norm": 0.978532075881958,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.1604,
"step": 4700
},
{
"epoch": 7.897984368572604,
"grad_norm": 1.3090927600860596,
"learning_rate": 2.2222222222222225e-06,
"loss": 0.1649,
"step": 4800
},
{
"epoch": 8.062525709584532,
"grad_norm": 1.2589505910873413,
"learning_rate": 1.1111111111111112e-06,
"loss": 0.1581,
"step": 4900
},
{
"epoch": 8.227067050596462,
"grad_norm": 1.417422890663147,
"learning_rate": 0.0,
"loss": 0.159,
"step": 5000
},
{
"epoch": 8.227067050596462,
"eval_loss": 0.19616171717643738,
"eval_runtime": 2.1191,
"eval_samples_per_second": 4.719,
"eval_steps_per_second": 2.359,
"step": 5000
}
],
"logging_steps": 100,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.653061211756956e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}