File size: 7,237 Bytes
27083c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9984,
  "eval_steps": 500,
  "global_step": 156,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.032,
      "grad_norm": 0.3039781153202057,
      "learning_rate": 2.9924022525939684e-05,
      "loss": 0.678,
      "num_input_tokens_seen": 163840,
      "step": 5
    },
    {
      "epoch": 0.064,
      "grad_norm": 0.2683364450931549,
      "learning_rate": 2.9696859780634016e-05,
      "loss": 0.6551,
      "num_input_tokens_seen": 327680,
      "step": 10
    },
    {
      "epoch": 0.096,
      "grad_norm": 0.246008038520813,
      "learning_rate": 2.9320812997628184e-05,
      "loss": 0.6372,
      "num_input_tokens_seen": 491520,
      "step": 15
    },
    {
      "epoch": 0.128,
      "grad_norm": 0.23320266604423523,
      "learning_rate": 2.8799691654882365e-05,
      "loss": 0.6201,
      "num_input_tokens_seen": 655360,
      "step": 20
    },
    {
      "epoch": 0.16,
      "grad_norm": 0.24441155791282654,
      "learning_rate": 2.8138774883503317e-05,
      "loss": 0.5965,
      "num_input_tokens_seen": 819200,
      "step": 25
    },
    {
      "epoch": 0.192,
      "grad_norm": 0.2341088205575943,
      "learning_rate": 2.7344757988404845e-05,
      "loss": 0.5959,
      "num_input_tokens_seen": 983040,
      "step": 30
    },
    {
      "epoch": 0.224,
      "grad_norm": 0.23481066524982452,
      "learning_rate": 2.6425684622660387e-05,
      "loss": 0.6006,
      "num_input_tokens_seen": 1146880,
      "step": 35
    },
    {
      "epoch": 0.256,
      "grad_norm": 0.2456846982240677,
      "learning_rate": 2.5390865302643993e-05,
      "loss": 0.594,
      "num_input_tokens_seen": 1310720,
      "step": 40
    },
    {
      "epoch": 0.288,
      "grad_norm": 0.25728079676628113,
      "learning_rate": 2.425078308942815e-05,
      "loss": 0.5825,
      "num_input_tokens_seen": 1474560,
      "step": 45
    },
    {
      "epoch": 0.32,
      "grad_norm": 0.2505452334880829,
      "learning_rate": 2.3016987391917016e-05,
      "loss": 0.5871,
      "num_input_tokens_seen": 1638400,
      "step": 50
    },
    {
      "epoch": 0.352,
      "grad_norm": 0.27519550919532776,
      "learning_rate": 2.1701976967524388e-05,
      "loss": 0.5771,
      "num_input_tokens_seen": 1802240,
      "step": 55
    },
    {
      "epoch": 0.384,
      "grad_norm": 0.2705497741699219,
      "learning_rate": 2.0319073305638035e-05,
      "loss": 0.5544,
      "num_input_tokens_seen": 1966080,
      "step": 60
    },
    {
      "epoch": 0.416,
      "grad_norm": 0.2861919701099396,
      "learning_rate": 1.888228567653781e-05,
      "loss": 0.5768,
      "num_input_tokens_seen": 2129920,
      "step": 65
    },
    {
      "epoch": 0.448,
      "grad_norm": 0.29395216703414917,
      "learning_rate": 1.7406169212866405e-05,
      "loss": 0.5534,
      "num_input_tokens_seen": 2293760,
      "step": 70
    },
    {
      "epoch": 0.48,
      "grad_norm": 0.285727322101593,
      "learning_rate": 1.5905677461334292e-05,
      "loss": 0.5597,
      "num_input_tokens_seen": 2457600,
      "step": 75
    },
    {
      "epoch": 0.512,
      "grad_norm": 0.30645114183425903,
      "learning_rate": 1.4396010898358778e-05,
      "loss": 0.571,
      "num_input_tokens_seen": 2621440,
      "step": 80
    },
    {
      "epoch": 0.544,
      "grad_norm": 0.2912521958351135,
      "learning_rate": 1.2892462944223613e-05,
      "loss": 0.5572,
      "num_input_tokens_seen": 2785280,
      "step": 85
    },
    {
      "epoch": 0.576,
      "grad_norm": 0.3027022182941437,
      "learning_rate": 1.1410265035686639e-05,
      "loss": 0.5686,
      "num_input_tokens_seen": 2949120,
      "step": 90
    },
    {
      "epoch": 0.608,
      "grad_norm": 0.32110294699668884,
      "learning_rate": 9.964432326500933e-06,
      "loss": 0.5525,
      "num_input_tokens_seen": 3112960,
      "step": 95
    },
    {
      "epoch": 0.64,
      "grad_norm": 0.31834056973457336,
      "learning_rate": 8.569611578954186e-06,
      "loss": 0.5594,
      "num_input_tokens_seen": 3276800,
      "step": 100
    },
    {
      "epoch": 0.672,
      "grad_norm": 0.3223641812801361,
      "learning_rate": 7.239932787335147e-06,
      "loss": 0.5709,
      "num_input_tokens_seen": 3440640,
      "step": 105
    },
    {
      "epoch": 0.704,
      "grad_norm": 0.32236120104789734,
      "learning_rate": 5.988866036430314e-06,
      "loss": 0.5555,
      "num_input_tokens_seen": 3604480,
      "step": 110
    },
    {
      "epoch": 0.736,
      "grad_norm": 0.32125550508499146,
      "learning_rate": 4.829085045121636e-06,
      "loss": 0.5597,
      "num_input_tokens_seen": 3768320,
      "step": 115
    },
    {
      "epoch": 0.768,
      "grad_norm": 0.325810968875885,
      "learning_rate": 3.772338777433482e-06,
      "loss": 0.5432,
      "num_input_tokens_seen": 3932160,
      "step": 120
    },
    {
      "epoch": 0.8,
      "grad_norm": 0.32014375925064087,
      "learning_rate": 2.829332421651404e-06,
      "loss": 0.5363,
      "num_input_tokens_seen": 4096000,
      "step": 125
    },
    {
      "epoch": 0.832,
      "grad_norm": 0.32001549005508423,
      "learning_rate": 2.0096189432334194e-06,
      "loss": 0.5582,
      "num_input_tokens_seen": 4259840,
      "step": 130
    },
    {
      "epoch": 0.864,
      "grad_norm": 0.33245849609375,
      "learning_rate": 1.321502310118649e-06,
      "loss": 0.5539,
      "num_input_tokens_seen": 4423680,
      "step": 135
    },
    {
      "epoch": 0.896,
      "grad_norm": 0.3493448495864868,
      "learning_rate": 7.719533707928178e-07,
      "loss": 0.5529,
      "num_input_tokens_seen": 4587520,
      "step": 140
    },
    {
      "epoch": 0.928,
      "grad_norm": 0.3279431462287903,
      "learning_rate": 3.665392372935922e-07,
      "loss": 0.5584,
      "num_input_tokens_seen": 4751360,
      "step": 145
    },
    {
      "epoch": 0.96,
      "grad_norm": 0.3298741281032562,
      "learning_rate": 1.0936688852919042e-07,
      "loss": 0.5503,
      "num_input_tokens_seen": 4915200,
      "step": 150
    },
    {
      "epoch": 0.992,
      "grad_norm": 0.33253204822540283,
      "learning_rate": 3.0415652272480776e-09,
      "loss": 0.5549,
      "num_input_tokens_seen": 5079040,
      "step": 155
    },
    {
      "epoch": 0.9984,
      "num_input_tokens_seen": 5111808,
      "step": 156,
      "total_flos": 2.1873318928633037e+17,
      "train_loss": 0.5760276004289969,
      "train_runtime": 1768.2955,
      "train_samples_per_second": 5.655,
      "train_steps_per_second": 0.088
    }
  ],
  "logging_steps": 5,
  "max_steps": 156,
  "num_input_tokens_seen": 5111808,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 2.1873318928633037e+17,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}