|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3000.0, |
|
"eval_steps": 500, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 100.0, |
|
"grad_norm": 0.06228038668632507, |
|
"learning_rate": 0.00019946308119914323, |
|
"loss": 0.5217, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"grad_norm": 0.02468216046690941, |
|
"learning_rate": 0.00019783647889781136, |
|
"loss": 0.0023, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 300.0, |
|
"grad_norm": 0.0070268092676997185, |
|
"learning_rate": 0.00019513795966007562, |
|
"loss": 0.0006, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 400.0, |
|
"grad_norm": 0.00667573232203722, |
|
"learning_rate": 0.00019139708902740613, |
|
"loss": 0.0004, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 500.0, |
|
"grad_norm": 0.005153825972229242, |
|
"learning_rate": 0.00018665485276128188, |
|
"loss": 0.0002, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 600.0, |
|
"grad_norm": 0.004352587275207043, |
|
"learning_rate": 0.00018096320779461132, |
|
"loss": 0.0002, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 700.0, |
|
"grad_norm": 0.0017930130707100034, |
|
"learning_rate": 0.00017438451298070252, |
|
"loss": 0.0001, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 800.0, |
|
"grad_norm": 0.0013685841113328934, |
|
"learning_rate": 0.0001669908458766171, |
|
"loss": 0.0001, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 900.0, |
|
"grad_norm": 0.003938326612114906, |
|
"learning_rate": 0.0001588632130463717, |
|
"loss": 0.0002, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1000.0, |
|
"grad_norm": 0.008353661745786667, |
|
"learning_rate": 0.000150090662536071, |
|
"loss": 0.0083, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1100.0, |
|
"grad_norm": 0.0031552647706121206, |
|
"learning_rate": 0.00014086490747363493, |
|
"loss": 0.0008, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1200.0, |
|
"grad_norm": 0.0014439212391152978, |
|
"learning_rate": 0.0001312003296688415, |
|
"loss": 0.0032, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1300.0, |
|
"grad_norm": 0.0022927618119865656, |
|
"learning_rate": 0.00012109836010773424, |
|
"loss": 0.0073, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1400.0, |
|
"grad_norm": 0.0016441670013591647, |
|
"learning_rate": 0.00011076523249817094, |
|
"loss": 0.0001, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1500.0, |
|
"grad_norm": 0.0006704287370666862, |
|
"learning_rate": 0.00010031415874858797, |
|
"loss": 0.0001, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1600.0, |
|
"grad_norm": 0.0009293225011788309, |
|
"learning_rate": 8.985964301001353e-05, |
|
"loss": 0.0001, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1700.0, |
|
"grad_norm": 0.0012109966482967138, |
|
"learning_rate": 7.951622714462746e-05, |
|
"loss": 0.0, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1800.0, |
|
"grad_norm": 0.0008299258770421147, |
|
"learning_rate": 6.939723578114993e-05, |
|
"loss": 0.0, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1900.0, |
|
"grad_norm": 0.0008420582162216306, |
|
"learning_rate": 5.961353470649426e-05, |
|
"loss": 0.0, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2000.0, |
|
"grad_norm": 0.0005832781316712499, |
|
"learning_rate": 5.0272316197005396e-05, |
|
"loss": 0.0, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2100.0, |
|
"grad_norm": 0.00045646895887330174, |
|
"learning_rate": 4.1475924597449024e-05, |
|
"loss": 0.0, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2200.0, |
|
"grad_norm": 0.0006278394139371812, |
|
"learning_rate": 3.3320735014953076e-05, |
|
"loss": 0.0, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2300.0, |
|
"grad_norm": 0.0005234285490587354, |
|
"learning_rate": 2.5896097413166564e-05, |
|
"loss": 0.0, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2400.0, |
|
"grad_norm": 0.0005604827310889959, |
|
"learning_rate": 1.928335767535997e-05, |
|
"loss": 0.0, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2500.0, |
|
"grad_norm": 0.0004753954126499593, |
|
"learning_rate": 1.3554966361905464e-05, |
|
"loss": 0.0, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2600.0, |
|
"grad_norm": 0.00042428780579939485, |
|
"learning_rate": 8.77368492677616e-06, |
|
"loss": 0.0, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2700.0, |
|
"grad_norm": 0.00040628627175465226, |
|
"learning_rate": 4.991898089922819e-06, |
|
"loss": 0.0, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2800.0, |
|
"grad_norm": 0.0004407366504892707, |
|
"learning_rate": 2.2510398993198067e-06, |
|
"loss": 0.0, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2900.0, |
|
"grad_norm": 0.0009461056906729937, |
|
"learning_rate": 5.811397708647803e-07, |
|
"loss": 0.0, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 3000.0, |
|
"grad_norm": 0.0005067897727712989, |
|
"learning_rate": 4.934798141786879e-10, |
|
"loss": 0.0, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 3000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3000, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 589966765056000.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|