{ "best_metric": 0.9963201471941122, "best_model_checkpoint": "data/train-test/roberta-base-output//model/checkpoint-1676", "epoch": 2.0, "eval_steps": 500, "global_step": 1676, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.6, "grad_norm": 1.825121283531189, "learning_rate": 1.4033412887828164e-05, "loss": 0.0322, "step": 500 }, { "epoch": 1.0, "eval_accuracy": null, "eval_f1": 0.9926538108356291, "eval_loss": 0.00715277437120676, "eval_precision": 0.9908340971585701, "eval_recall": 0.9944802207911684, "eval_runtime": 1.6458, "eval_samples_per_second": 589.386, "eval_steps_per_second": 18.836, "step": 838 }, { "epoch": 1.19, "grad_norm": 0.7001124620437622, "learning_rate": 8.066825775656326e-06, "loss": 0.0046, "step": 1000 }, { "epoch": 1.79, "grad_norm": 0.1312304437160492, "learning_rate": 2.100238663484487e-06, "loss": 0.0027, "step": 1500 }, { "epoch": 2.0, "eval_accuracy": null, "eval_f1": 0.9963201471941122, "eval_loss": 0.004037069622427225, "eval_precision": 0.9963201471941122, "eval_recall": 0.9963201471941122, "eval_runtime": 1.6277, "eval_samples_per_second": 595.928, "eval_steps_per_second": 19.045, "step": 1676 } ], "logging_steps": 500, "max_steps": 1676, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "total_flos": 2013781275950328.0, "train_batch_size": 32, "trial_name": null, "trial_params": null }