totally-not-an-llm's picture
Upload folder using huggingface_hub
7a2eed5
raw
history blame
5.25 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9759036144578315,
"eval_steps": 150,
"global_step": 41,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.2314,
"step": 1
},
{
"epoch": 0.1,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.4026,
"step": 2
},
{
"epoch": 0.14,
"learning_rate": 8.999999999999999e-05,
"loss": 1.8531,
"step": 3
},
{
"epoch": 0.19,
"learning_rate": 0.00011999999999999999,
"loss": 1.3763,
"step": 4
},
{
"epoch": 0.24,
"learning_rate": 0.00015,
"loss": 0.961,
"step": 5
},
{
"epoch": 0.29,
"learning_rate": 0.00017999999999999998,
"loss": 1.2005,
"step": 6
},
{
"epoch": 0.34,
"learning_rate": 0.00020999999999999998,
"loss": 1.1054,
"step": 7
},
{
"epoch": 0.39,
"learning_rate": 0.00023999999999999998,
"loss": 1.0114,
"step": 8
},
{
"epoch": 0.43,
"learning_rate": 0.00027,
"loss": 1.1386,
"step": 9
},
{
"epoch": 0.48,
"learning_rate": 0.0003,
"loss": 0.9856,
"step": 10
},
{
"epoch": 0.53,
"learning_rate": 0.00029990862405286433,
"loss": 0.7562,
"step": 11
},
{
"epoch": 0.58,
"learning_rate": 0.0002996346075389736,
"loss": 1.0498,
"step": 12
},
{
"epoch": 0.63,
"learning_rate": 0.00029917828430524096,
"loss": 1.2559,
"step": 13
},
{
"epoch": 0.67,
"learning_rate": 0.0002985402103112355,
"loss": 1.0074,
"step": 14
},
{
"epoch": 0.72,
"learning_rate": 0.0002977211629518312,
"loss": 1.0239,
"step": 15
},
{
"epoch": 0.77,
"learning_rate": 0.0002967221401100708,
"loss": 1.3441,
"step": 16
},
{
"epoch": 0.82,
"learning_rate": 0.0002955443589413994,
"loss": 1.1692,
"step": 17
},
{
"epoch": 0.87,
"learning_rate": 0.0002941892543907478,
"loss": 1.3764,
"step": 18
},
{
"epoch": 0.92,
"learning_rate": 0.00029265847744427303,
"loss": 0.9064,
"step": 19
},
{
"epoch": 0.96,
"learning_rate": 0.0002909538931178862,
"loss": 1.4608,
"step": 20
},
{
"epoch": 1.01,
"learning_rate": 0.0002890775781850181,
"loss": 1.0947,
"step": 21
},
{
"epoch": 1.06,
"learning_rate": 0.0002870318186463901,
"loss": 1.113,
"step": 22
},
{
"epoch": 1.11,
"learning_rate": 0.000284819106944875,
"loss": 0.9546,
"step": 23
},
{
"epoch": 1.16,
"learning_rate": 0.000282442138928839,
"loss": 1.1469,
"step": 24
},
{
"epoch": 1.2,
"learning_rate": 0.0002799038105676658,
"loss": 1.3102,
"step": 25
},
{
"epoch": 1.25,
"learning_rate": 0.00027720721442346387,
"loss": 1.4763,
"step": 26
},
{
"epoch": 1.3,
"learning_rate": 0.0002743556358832562,
"loss": 1.0281,
"step": 27
},
{
"epoch": 1.35,
"learning_rate": 0.0002713525491562421,
"loss": 1.0988,
"step": 28
},
{
"epoch": 1.4,
"learning_rate": 0.00026820161304100823,
"loss": 1.1848,
"step": 29
},
{
"epoch": 1.45,
"learning_rate": 0.00026490666646784665,
"loss": 0.9901,
"step": 30
},
{
"epoch": 1.49,
"learning_rate": 0.00026147172382160914,
"loss": 0.8676,
"step": 31
},
{
"epoch": 1.54,
"learning_rate": 0.00025790097005079764,
"loss": 1.0521,
"step": 32
},
{
"epoch": 1.59,
"learning_rate": 0.0002541987555688496,
"loss": 1.0436,
"step": 33
},
{
"epoch": 1.64,
"learning_rate": 0.0002503695909538287,
"loss": 0.9511,
"step": 34
},
{
"epoch": 1.69,
"learning_rate": 0.0002464181414529809,
"loss": 1.2208,
"step": 35
},
{
"epoch": 1.73,
"learning_rate": 0.0002423492212988487,
"loss": 1.1128,
"step": 36
},
{
"epoch": 1.78,
"learning_rate": 0.00023816778784387094,
"loss": 1.114,
"step": 37
},
{
"epoch": 1.83,
"learning_rate": 0.00023387893552061199,
"loss": 0.9318,
"step": 38
},
{
"epoch": 1.88,
"learning_rate": 0.0002294878896349807,
"loss": 1.6424,
"step": 39
},
{
"epoch": 1.93,
"learning_rate": 0.000225,
"loss": 0.7759,
"step": 40
},
{
"epoch": 1.98,
"learning_rate": 0.00022042073441788358,
"loss": 0.8061,
"step": 41
}
],
"logging_steps": 1,
"max_steps": 100,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.0588396984467456e+17,
"trial_name": null,
"trial_params": null
}