fpadovani commited on
Commit
660f605
·
verified ·
1 Parent(s): 6f2d0c8

Training in progress, step 16000, checkpoint

Browse files
checkpoint-16000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfb4d92aae67095142cd993f370d16e9b74ba7082cea161e377a087970f378b1
3
  size 51007160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e4542eb6edf5803c2117f413af486ccb5c61be4dc86da6e830e8cf6956c6df2
3
  size 51007160
checkpoint-16000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d15125b6705cbe245b837df36312d84397503384b1222e8d43f814b0f1f9ae6
3
  size 102078202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc119b503ce9dccc3dc0f5eb8ae2fb35348880e651655e58461a89577dab0722
3
  size 102078202
checkpoint-16000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:561c3ffa6ceafece634ed38c4325e9601c1ff76720618438ee77ccb3af7b0238
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0332c6e843b2ffc80fd1fc9bbd2207db391e189037ae29bfac425f4c46a2b4
3
  size 14244
checkpoint-16000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1d534a2bb9aa7e14d4f8fd025f70bb885236f8e21c6db210bf254630b820270
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed6a51558dc3bf5f251384c39427d69588a614dfb8e848e6cf8740a289294e3
3
  size 1000
checkpoint-16000/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-16000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 4.033241271972656,
3
- "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/fr_clm/childes_42/checkpoint-16000",
4
- "epoch": 25.157232704402517,
5
  "eval_steps": 2000,
6
  "global_step": 16000,
7
  "is_hyper_param_search": false,
@@ -9,102 +9,102 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 3.1446540880503147,
13
- "eval_loss": 6.666613578796387,
14
- "eval_runtime": 0.6599,
15
- "eval_samples_per_second": 1380.413,
16
- "eval_steps_per_second": 86.37,
17
  "step": 2000
18
  },
19
  {
20
- "epoch": 6.289308176100629,
21
- "grad_norm": 1.431768536567688,
22
  "learning_rate": 1e-05,
23
- "loss": 6.5315,
24
  "step": 4000
25
  },
26
  {
27
- "epoch": 6.289308176100629,
28
- "eval_loss": 5.101934432983398,
29
- "eval_runtime": 0.6406,
30
- "eval_samples_per_second": 1422.145,
31
- "eval_steps_per_second": 88.982,
32
  "step": 4000
33
  },
34
  {
35
- "epoch": 9.433962264150944,
36
- "eval_loss": 4.677049160003662,
37
- "eval_runtime": 0.6439,
38
- "eval_samples_per_second": 1414.724,
39
- "eval_steps_per_second": 88.517,
40
  "step": 6000
41
  },
42
  {
43
- "epoch": 12.578616352201259,
44
- "grad_norm": 2.6387760639190674,
45
- "learning_rate": 2e-05,
46
- "loss": 4.3407,
47
  "step": 8000
48
  },
49
  {
50
- "epoch": 12.578616352201259,
51
- "eval_loss": 4.4404096603393555,
52
- "eval_runtime": 0.6445,
53
- "eval_samples_per_second": 1413.55,
54
- "eval_steps_per_second": 88.444,
55
  "step": 8000
56
  },
57
  {
58
- "epoch": 15.723270440251572,
59
- "eval_loss": 4.286661624908447,
60
- "eval_runtime": 0.6446,
61
- "eval_samples_per_second": 1413.35,
62
- "eval_steps_per_second": 88.431,
63
  "step": 10000
64
  },
65
  {
66
- "epoch": 18.867924528301888,
67
- "grad_norm": 2.381523609161377,
68
  "learning_rate": 2.9995e-05,
69
- "loss": 3.8725,
70
  "step": 12000
71
  },
72
  {
73
- "epoch": 18.867924528301888,
74
- "eval_loss": 4.171908855438232,
75
- "eval_runtime": 0.6441,
76
- "eval_samples_per_second": 1414.321,
77
- "eval_steps_per_second": 88.492,
78
  "step": 12000
79
  },
80
  {
81
- "epoch": 22.0125786163522,
82
- "eval_loss": 4.096552848815918,
83
- "eval_runtime": 0.6506,
84
- "eval_samples_per_second": 1400.196,
85
- "eval_steps_per_second": 87.608,
86
  "step": 14000
87
  },
88
  {
89
- "epoch": 25.157232704402517,
90
- "grad_norm": 2.4479799270629883,
91
- "learning_rate": 3.999e-05,
92
- "loss": 3.5953,
93
  "step": 16000
94
  },
95
  {
96
- "epoch": 25.157232704402517,
97
- "eval_loss": 4.033241271972656,
98
- "eval_runtime": 0.6465,
99
- "eval_samples_per_second": 1409.06,
100
- "eval_steps_per_second": 88.163,
101
  "step": 16000
102
  }
103
  ],
104
  "logging_steps": 4000,
105
  "max_steps": 100000,
106
  "num_input_tokens_seen": 0,
107
- "num_train_epochs": 158,
108
  "save_steps": 4000,
109
  "stateful_callbacks": {
110
  "TrainerControl": {
@@ -118,7 +118,7 @@
118
  "attributes": {}
119
  }
120
  },
121
- "total_flos": 4136656129228800.0,
122
  "train_batch_size": 16,
123
  "trial_name": null,
124
  "trial_params": null
 
1
  {
2
+ "best_metric": 4.8005266189575195,
3
+ "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/de_clm/childes_42/checkpoint-16000",
4
+ "epoch": 16.771488469601678,
5
  "eval_steps": 2000,
6
  "global_step": 16000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 2.0964360587002098,
13
+ "eval_loss": 7.0908098220825195,
14
+ "eval_runtime": 0.9834,
15
+ "eval_samples_per_second": 1398.23,
16
+ "eval_steps_per_second": 87.453,
17
  "step": 2000
18
  },
19
  {
20
+ "epoch": 4.1928721174004195,
21
+ "grad_norm": 1.4699604511260986,
22
  "learning_rate": 1e-05,
23
+ "loss": 6.9765,
24
  "step": 4000
25
  },
26
  {
27
+ "epoch": 4.1928721174004195,
28
+ "eval_loss": 5.874638557434082,
29
+ "eval_runtime": 0.973,
30
+ "eval_samples_per_second": 1413.214,
31
+ "eval_steps_per_second": 88.39,
32
  "step": 4000
33
  },
34
  {
35
+ "epoch": 6.289308176100629,
36
+ "eval_loss": 5.544075965881348,
37
+ "eval_runtime": 0.9898,
38
+ "eval_samples_per_second": 1389.228,
39
+ "eval_steps_per_second": 86.89,
40
  "step": 6000
41
  },
42
  {
43
+ "epoch": 8.385744234800839,
44
+ "grad_norm": 2.451873779296875,
45
+ "learning_rate": 1.9997500000000003e-05,
46
+ "loss": 5.2182,
47
  "step": 8000
48
  },
49
  {
50
+ "epoch": 8.385744234800839,
51
+ "eval_loss": 5.278811454772949,
52
+ "eval_runtime": 0.9809,
53
+ "eval_samples_per_second": 1401.823,
54
+ "eval_steps_per_second": 87.678,
55
  "step": 8000
56
  },
57
  {
58
+ "epoch": 10.482180293501049,
59
+ "eval_loss": 5.099233150482178,
60
+ "eval_runtime": 0.9694,
61
+ "eval_samples_per_second": 1418.396,
62
+ "eval_steps_per_second": 88.714,
63
  "step": 10000
64
  },
65
  {
66
+ "epoch": 12.578616352201259,
67
+ "grad_norm": 2.490121841430664,
68
  "learning_rate": 2.9995e-05,
69
+ "loss": 4.7379,
70
  "step": 12000
71
  },
72
  {
73
+ "epoch": 12.578616352201259,
74
+ "eval_loss": 4.970981597900391,
75
+ "eval_runtime": 0.9729,
76
+ "eval_samples_per_second": 1413.251,
77
+ "eval_steps_per_second": 88.392,
78
  "step": 12000
79
  },
80
  {
81
+ "epoch": 14.675052410901468,
82
+ "eval_loss": 4.875853061676025,
83
+ "eval_runtime": 0.9902,
84
+ "eval_samples_per_second": 1388.55,
85
+ "eval_steps_per_second": 86.848,
86
  "step": 14000
87
  },
88
  {
89
+ "epoch": 16.771488469601678,
90
+ "grad_norm": 2.485079765319824,
91
+ "learning_rate": 3.99925e-05,
92
+ "loss": 4.4249,
93
  "step": 16000
94
  },
95
  {
96
+ "epoch": 16.771488469601678,
97
+ "eval_loss": 4.8005266189575195,
98
+ "eval_runtime": 0.9709,
99
+ "eval_samples_per_second": 1416.144,
100
+ "eval_steps_per_second": 88.573,
101
  "step": 16000
102
  }
103
  ],
104
  "logging_steps": 4000,
105
  "max_steps": 100000,
106
  "num_input_tokens_seen": 0,
107
+ "num_train_epochs": 105,
108
  "save_steps": 4000,
109
  "stateful_callbacks": {
110
  "TrainerControl": {
 
118
  "attributes": {}
119
  }
120
  },
121
+ "total_flos": 4137059128983552.0,
122
  "train_batch_size": 16,
123
  "trial_name": null,
124
  "trial_params": null
checkpoint-16000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6d9a18dff8e8501ed8e1286b71e34a5ee0478c42b015fd65b8981e86e58a8c2
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e7557b0dc8ef07c9db255e1167d98819525b043deb4934f580e21d716914111
3
  size 5368