|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9748953974895398, |
|
"eval_steps": 500, |
|
"global_step": 118, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016736401673640166, |
|
"grad_norm": 7.772315443006173, |
|
"learning_rate": 4.166666666666666e-08, |
|
"logits/chosen": -2.807276487350464, |
|
"logits/rejected": -2.7759768962860107, |
|
"logps/chosen": -315.42626953125, |
|
"logps/rejected": -227.5915985107422, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16736401673640167, |
|
"grad_norm": 7.471216567487246, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -2.757866382598877, |
|
"logits/rejected": -2.7485244274139404, |
|
"logps/chosen": -272.32708740234375, |
|
"logps/rejected": -260.8048095703125, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.4895833432674408, |
|
"rewards/chosen": 0.0014287387020885944, |
|
"rewards/margins": 0.000867457827553153, |
|
"rewards/rejected": 0.0005612808163277805, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33472803347280333, |
|
"grad_norm": 6.635008900115668, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -2.796506404876709, |
|
"logits/rejected": -2.7773609161376953, |
|
"logps/chosen": -261.6402893066406, |
|
"logps/rejected": -249.42807006835938, |
|
"loss": 0.6816, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": 0.03041391633450985, |
|
"rewards/margins": 0.024269890040159225, |
|
"rewards/rejected": 0.006144027225673199, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.502092050209205, |
|
"grad_norm": 7.298705785979101, |
|
"learning_rate": 4.652609029418388e-07, |
|
"logits/chosen": -2.790365219116211, |
|
"logits/rejected": -2.771557331085205, |
|
"logps/chosen": -291.3450012207031, |
|
"logps/rejected": -249.59716796875, |
|
"loss": 0.6546, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.0011108840117231011, |
|
"rewards/margins": 0.10550198704004288, |
|
"rewards/rejected": -0.10439109802246094, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6694560669456067, |
|
"grad_norm": 8.142120480110528, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -2.7561163902282715, |
|
"logits/rejected": -2.7333474159240723, |
|
"logps/chosen": -268.2800598144531, |
|
"logps/rejected": -261.1468200683594, |
|
"loss": 0.6342, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.1536058485507965, |
|
"rewards/margins": 0.17475590109825134, |
|
"rewards/rejected": -0.32836171984672546, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8368200836820083, |
|
"grad_norm": 10.537722600272415, |
|
"learning_rate": 3.5751630056913013e-07, |
|
"logits/chosen": -2.7716526985168457, |
|
"logits/rejected": -2.751047134399414, |
|
"logps/chosen": -292.7337646484375, |
|
"logps/rejected": -306.22894287109375, |
|
"loss": 0.6074, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.3572509288787842, |
|
"rewards/margins": 0.1899140328168869, |
|
"rewards/rejected": -0.5471649765968323, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.00418410041841, |
|
"grad_norm": 10.000348369076317, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -2.7915871143341064, |
|
"logits/rejected": -2.757317304611206, |
|
"logps/chosen": -319.2095947265625, |
|
"logps/rejected": -317.7526550292969, |
|
"loss": 0.5933, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": -0.38744381070137024, |
|
"rewards/margins": 0.33469024300575256, |
|
"rewards/rejected": -0.722133994102478, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.1715481171548117, |
|
"grad_norm": 12.875178033360463, |
|
"learning_rate": 2.1308835899937972e-07, |
|
"logits/chosen": -2.8087921142578125, |
|
"logits/rejected": -2.763150691986084, |
|
"logps/chosen": -327.20989990234375, |
|
"logps/rejected": -348.8834228515625, |
|
"loss": 0.4977, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.3436049818992615, |
|
"rewards/margins": 0.6280093789100647, |
|
"rewards/rejected": -0.9716142416000366, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.3389121338912133, |
|
"grad_norm": 14.484605319084237, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -2.686325788497925, |
|
"logits/rejected": -2.6680502891540527, |
|
"logps/chosen": -312.7579650878906, |
|
"logps/rejected": -380.6637268066406, |
|
"loss": 0.4745, |
|
"rewards/accuracies": 0.840624988079071, |
|
"rewards/chosen": -0.3891797661781311, |
|
"rewards/margins": 0.8134768605232239, |
|
"rewards/rejected": -1.2026567459106445, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.506276150627615, |
|
"grad_norm": 13.391182563638015, |
|
"learning_rate": 8.125424962044741e-08, |
|
"logits/chosen": -2.7235772609710693, |
|
"logits/rejected": -2.7114028930664062, |
|
"logps/chosen": -313.1542053222656, |
|
"logps/rejected": -370.19891357421875, |
|
"loss": 0.4657, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -0.5553286671638489, |
|
"rewards/margins": 0.7646803259849548, |
|
"rewards/rejected": -1.3200088739395142, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6736401673640167, |
|
"grad_norm": 16.395163898429864, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": -2.637983798980713, |
|
"logits/rejected": -2.605607748031616, |
|
"logps/chosen": -308.8186340332031, |
|
"logps/rejected": -373.8332824707031, |
|
"loss": 0.4672, |
|
"rewards/accuracies": 0.8343750238418579, |
|
"rewards/chosen": -0.5417979955673218, |
|
"rewards/margins": 0.7996915578842163, |
|
"rewards/rejected": -1.341489553451538, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8410041841004183, |
|
"grad_norm": 15.975673050956397, |
|
"learning_rate": 6.994271479897313e-09, |
|
"logits/chosen": -2.689378261566162, |
|
"logits/rejected": -2.638415813446045, |
|
"logps/chosen": -333.02862548828125, |
|
"logps/rejected": -368.8213195800781, |
|
"loss": 0.4608, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": -0.6281369924545288, |
|
"rewards/margins": 0.7409355044364929, |
|
"rewards/rejected": -1.3690725564956665, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.9748953974895398, |
|
"step": 118, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5588717278787645, |
|
"train_runtime": 3312.4835, |
|
"train_samples_per_second": 9.228, |
|
"train_steps_per_second": 0.036 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 118, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|