|
{ |
|
"best_metric": 0.3867419362068176, |
|
"best_model_checkpoint": "finetuned-bangladeshi-traditional-food/checkpoint-200", |
|
"epoch": 4.0, |
|
"eval_steps": 100, |
|
"global_step": 256, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15625, |
|
"grad_norm": 104808.0078125, |
|
"learning_rate": 0.0001921875, |
|
"loss": 2.5683, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3125, |
|
"grad_norm": 121324.2109375, |
|
"learning_rate": 0.000184375, |
|
"loss": 2.0721, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.46875, |
|
"grad_norm": 112550.9140625, |
|
"learning_rate": 0.00017656250000000002, |
|
"loss": 1.5956, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 90598.2265625, |
|
"learning_rate": 0.00016875, |
|
"loss": 1.3772, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.78125, |
|
"grad_norm": 100583.53125, |
|
"learning_rate": 0.0001609375, |
|
"loss": 1.1265, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9375, |
|
"grad_norm": 92339.28125, |
|
"learning_rate": 0.000153125, |
|
"loss": 1.0868, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.09375, |
|
"grad_norm": 94877.703125, |
|
"learning_rate": 0.00014531250000000002, |
|
"loss": 0.8778, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 129495.6640625, |
|
"learning_rate": 0.0001375, |
|
"loss": 0.6839, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.40625, |
|
"grad_norm": 150202.59375, |
|
"learning_rate": 0.0001296875, |
|
"loss": 0.6589, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"grad_norm": 128498.6953125, |
|
"learning_rate": 0.00012187500000000001, |
|
"loss": 0.703, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"eval_accuracy": 0.9022346368715084, |
|
"eval_loss": 0.612750232219696, |
|
"eval_runtime": 6.1858, |
|
"eval_samples_per_second": 57.874, |
|
"eval_steps_per_second": 3.718, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.71875, |
|
"grad_norm": 172968.234375, |
|
"learning_rate": 0.0001140625, |
|
"loss": 0.6179, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 193931.03125, |
|
"learning_rate": 0.00010625000000000001, |
|
"loss": 0.5416, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.03125, |
|
"grad_norm": 100722.890625, |
|
"learning_rate": 9.84375e-05, |
|
"loss": 0.4705, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.1875, |
|
"grad_norm": 111848.0234375, |
|
"learning_rate": 9.062500000000001e-05, |
|
"loss": 0.4895, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.34375, |
|
"grad_norm": 102607.46875, |
|
"learning_rate": 8.28125e-05, |
|
"loss": 0.4002, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 228628.328125, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.3947, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.65625, |
|
"grad_norm": 144436.234375, |
|
"learning_rate": 6.71875e-05, |
|
"loss": 0.3387, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.8125, |
|
"grad_norm": 218134.15625, |
|
"learning_rate": 5.9375e-05, |
|
"loss": 0.3805, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.96875, |
|
"grad_norm": 132064.421875, |
|
"learning_rate": 5.15625e-05, |
|
"loss": 0.3356, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.125, |
|
"grad_norm": 24524.32421875, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.252, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.125, |
|
"eval_accuracy": 0.9189944134078212, |
|
"eval_loss": 0.3867419362068176, |
|
"eval_runtime": 6.1019, |
|
"eval_samples_per_second": 58.67, |
|
"eval_steps_per_second": 3.769, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.28125, |
|
"grad_norm": 37676.6171875, |
|
"learning_rate": 3.59375e-05, |
|
"loss": 0.2721, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.4375, |
|
"grad_norm": 105781.78125, |
|
"learning_rate": 2.8125000000000003e-05, |
|
"loss": 0.2583, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.59375, |
|
"grad_norm": 42383.23828125, |
|
"learning_rate": 2.0312500000000002e-05, |
|
"loss": 0.2163, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 24589.4375, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.2903, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.90625, |
|
"grad_norm": 235633.53125, |
|
"learning_rate": 4.6875000000000004e-06, |
|
"loss": 0.2813, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 256, |
|
"total_flos": 6.287051649950024e+17, |
|
"train_loss": 0.7197832907550037, |
|
"train_runtime": 268.6443, |
|
"train_samples_per_second": 30.196, |
|
"train_steps_per_second": 0.953 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 256, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.287051649950024e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|