RudranshAgnihotri's picture
Upload 27 files
fb16342
raw
history blame
3.55 kB
{
"best_metric": 1.2936323881149292,
"best_model_checkpoint": " vicuna-sentiment-fintuned/checkpoint-200",
"epoch": 1.032258064516129,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 2.9999999999999997e-05,
"loss": 2.9781,
"step": 10
},
{
"epoch": 0.1,
"learning_rate": 5.9999999999999995e-05,
"loss": 2.8488,
"step": 20
},
{
"epoch": 0.15,
"learning_rate": 8.999999999999999e-05,
"loss": 2.5361,
"step": 30
},
{
"epoch": 0.21,
"learning_rate": 0.00011999999999999999,
"loss": 2.0695,
"step": 40
},
{
"epoch": 0.26,
"learning_rate": 0.00015,
"loss": 1.6545,
"step": 50
},
{
"epoch": 0.26,
"eval_loss": 1.5430564880371094,
"eval_runtime": 40.717,
"eval_samples_per_second": 4.912,
"eval_steps_per_second": 0.614,
"step": 50
},
{
"epoch": 0.31,
"learning_rate": 0.00017999999999999998,
"loss": 1.4716,
"step": 60
},
{
"epoch": 0.36,
"learning_rate": 0.00020999999999999998,
"loss": 1.4078,
"step": 70
},
{
"epoch": 0.41,
"learning_rate": 0.00023999999999999998,
"loss": 1.3884,
"step": 80
},
{
"epoch": 0.46,
"learning_rate": 0.00027,
"loss": 1.4108,
"step": 90
},
{
"epoch": 0.52,
"learning_rate": 0.0003,
"loss": 1.3022,
"step": 100
},
{
"epoch": 0.52,
"eval_loss": 1.3164671659469604,
"eval_runtime": 40.8127,
"eval_samples_per_second": 4.9,
"eval_steps_per_second": 0.613,
"step": 100
},
{
"epoch": 0.57,
"learning_rate": 0.000285,
"loss": 1.2699,
"step": 110
},
{
"epoch": 0.62,
"learning_rate": 0.00027,
"loss": 1.2852,
"step": 120
},
{
"epoch": 0.67,
"learning_rate": 0.00025499999999999996,
"loss": 1.278,
"step": 130
},
{
"epoch": 0.72,
"learning_rate": 0.00023999999999999998,
"loss": 1.2947,
"step": 140
},
{
"epoch": 0.77,
"learning_rate": 0.000225,
"loss": 1.2836,
"step": 150
},
{
"epoch": 0.77,
"eval_loss": 1.2997195720672607,
"eval_runtime": 40.7239,
"eval_samples_per_second": 4.911,
"eval_steps_per_second": 0.614,
"step": 150
},
{
"epoch": 0.83,
"learning_rate": 0.00020999999999999998,
"loss": 1.2874,
"step": 160
},
{
"epoch": 0.88,
"learning_rate": 0.000195,
"loss": 1.3118,
"step": 170
},
{
"epoch": 0.93,
"learning_rate": 0.00017999999999999998,
"loss": 1.2895,
"step": 180
},
{
"epoch": 0.98,
"learning_rate": 0.000165,
"loss": 1.2736,
"step": 190
},
{
"epoch": 1.03,
"learning_rate": 0.00015,
"loss": 1.2634,
"step": 200
},
{
"epoch": 1.03,
"eval_loss": 1.2936323881149292,
"eval_runtime": 40.7736,
"eval_samples_per_second": 4.905,
"eval_steps_per_second": 0.613,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 300,
"num_train_epochs": 2,
"save_steps": 50,
"total_flos": 2.1403198486904832e+17,
"trial_name": null,
"trial_params": null
}