Text Classification
Transformers
Safetensors
xlm-roberta
xlmr-large-toxicity-classifier-v2 / trainer_state.json
dardem's picture
Upload 9 files
4fb9074 verified
{
"best_metric": 0.2617274224758148,
"best_model_checkpoint": "xlm-roberta-large-all-full-finetuned-toxicity-classification/checkpoint-4016",
"epoch": 2.9987546699875467,
"eval_steps": 500,
"global_step": 6021,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.24906600249066002,
"grad_norm": 13.982502937316895,
"learning_rate": 1.83391463212091e-05,
"loss": 0.4913,
"step": 500
},
{
"epoch": 0.49813200498132004,
"grad_norm": 9.81984806060791,
"learning_rate": 1.6678292642418204e-05,
"loss": 0.3674,
"step": 1000
},
{
"epoch": 0.7471980074719801,
"grad_norm": 9.316424369812012,
"learning_rate": 1.5017438963627307e-05,
"loss": 0.3317,
"step": 1500
},
{
"epoch": 0.9962640099626401,
"grad_norm": 13.302652359008789,
"learning_rate": 1.3356585284836408e-05,
"loss": 0.3147,
"step": 2000
},
{
"epoch": 1.0,
"eval_loss": 0.2968192994594574,
"eval_runtime": 142.521,
"eval_samples_per_second": 50.091,
"eval_steps_per_second": 3.136,
"step": 2008
},
{
"epoch": 1.2450809464508095,
"grad_norm": 16.878368377685547,
"learning_rate": 1.1695731606045508e-05,
"loss": 0.2468,
"step": 2500
},
{
"epoch": 1.4941469489414696,
"grad_norm": 10.831304550170898,
"learning_rate": 1.0034877927254609e-05,
"loss": 0.237,
"step": 3000
},
{
"epoch": 1.7432129514321295,
"grad_norm": 21.390304565429688,
"learning_rate": 8.374024248463712e-06,
"loss": 0.2271,
"step": 3500
},
{
"epoch": 1.9922789539227894,
"grad_norm": 10.786124229431152,
"learning_rate": 6.7131705696728125e-06,
"loss": 0.2278,
"step": 4000
},
{
"epoch": 2.0,
"eval_loss": 0.2617274224758148,
"eval_runtime": 142.583,
"eval_samples_per_second": 50.069,
"eval_steps_per_second": 3.135,
"step": 4016
},
{
"epoch": 2.241095890410959,
"grad_norm": 10.759682655334473,
"learning_rate": 5.0523168908819146e-06,
"loss": 0.1553,
"step": 4500
},
{
"epoch": 2.490161892901619,
"grad_norm": 7.604818344116211,
"learning_rate": 3.391463212091015e-06,
"loss": 0.1573,
"step": 5000
},
{
"epoch": 2.739227895392279,
"grad_norm": 5.243312358856201,
"learning_rate": 1.7306095333001162e-06,
"loss": 0.1422,
"step": 5500
},
{
"epoch": 2.988293897882939,
"grad_norm": 11.100104331970215,
"learning_rate": 6.975585450921775e-08,
"loss": 0.1439,
"step": 6000
},
{
"epoch": 2.9987546699875467,
"eval_loss": 0.31470784544944763,
"eval_runtime": 142.6105,
"eval_samples_per_second": 50.059,
"eval_steps_per_second": 3.134,
"step": 6021
}
],
"logging_steps": 500,
"max_steps": 6021,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7951793851787264e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}