NicholasCorrado commited on
Commit
ebae1a4
·
verified ·
1 Parent(s): 4c99d17

Model save

Browse files
Files changed (3) hide show
  1. all_results.json +6 -6
  2. train_results.json +6 -6
  3. trainer_state.json +1097 -16
all_results.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "epoch": 1.0,
3
  "eval_logits/chosen": -1.3140687942504883,
4
  "eval_logits/rejected": -1.1924079656600952,
5
  "eval_logps/chosen": -316.60687255859375,
@@ -14,9 +14,9 @@
14
  "eval_samples_per_second": 17.926,
15
  "eval_steps_per_second": 0.359,
16
  "total_flos": 0.0,
17
- "train_loss": 0.1732867956161499,
18
- "train_runtime": 87.5332,
19
- "train_samples": 50,
20
- "train_samples_per_second": 0.571,
21
- "train_steps_per_second": 0.011
22
  }
 
1
  {
2
+ "epoch": 0.9993060374739764,
3
  "eval_logits/chosen": -1.3140687942504883,
4
  "eval_logits/rejected": -1.1924079656600952,
5
  "eval_logps/chosen": -316.60687255859375,
 
14
  "eval_samples_per_second": 17.926,
15
  "eval_steps_per_second": 0.359,
16
  "total_flos": 0.0,
17
+ "train_loss": 0.4493948830498589,
18
+ "train_runtime": 20175.981,
19
+ "train_samples": 184443,
20
+ "train_samples_per_second": 9.142,
21
+ "train_steps_per_second": 0.036
22
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
  "total_flos": 0.0,
4
- "train_loss": 0.1732867956161499,
5
- "train_runtime": 87.5332,
6
- "train_samples": 50,
7
- "train_samples_per_second": 0.571,
8
- "train_steps_per_second": 0.011
9
  }
 
1
  {
2
+ "epoch": 0.9993060374739764,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.4493948830498589,
5
+ "train_runtime": 20175.981,
6
+ "train_samples": 184443,
7
+ "train_samples_per_second": 9.142,
8
+ "train_steps_per_second": 0.036
9
  }
trainer_state.json CHANGED
@@ -1,21 +1,22 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 1000,
6
- "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 1.0,
13
- "learning_rate": 5e-07,
14
- "logits/chosen": -0.6792019605636597,
15
- "logits/rejected": -1.02703857421875,
16
- "logps/chosen": -368.00933837890625,
17
- "logps/rejected": -500.88104248046875,
18
- "loss": 0.1733,
 
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -23,17 +24,1097 @@
23
  "step": 1
24
  },
25
  {
26
- "epoch": 1.0,
27
- "step": 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  "total_flos": 0.0,
29
- "train_loss": 0.1732867956161499,
30
- "train_runtime": 87.5332,
31
- "train_samples_per_second": 0.571,
32
- "train_steps_per_second": 0.011
33
  }
34
  ],
35
  "logging_steps": 10,
36
- "max_steps": 1,
37
  "num_input_tokens_seen": 0,
38
  "num_train_epochs": 1,
39
  "save_steps": 100,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9993060374739764,
5
  "eval_steps": 1000,
6
+ "global_step": 720,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0013879250520471894,
13
+ "grad_norm": 2.184646703877197,
14
+ "learning_rate": 6.9444444444444435e-09,
15
+ "logits/chosen": -1.157708764076233,
16
+ "logits/rejected": -1.0856982469558716,
17
+ "logps/chosen": -392.3704528808594,
18
+ "logps/rejected": -422.7169189453125,
19
+ "loss": 0.6931,
20
  "rewards/accuracies": 0.0,
21
  "rewards/chosen": 0.0,
22
  "rewards/margins": 0.0,
 
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.013879250520471894,
28
+ "grad_norm": 2.045186292766699,
29
+ "learning_rate": 6.944444444444444e-08,
30
+ "logits/chosen": -0.9201799035072327,
31
+ "logits/rejected": -0.9960015416145325,
32
+ "logps/chosen": -403.1753234863281,
33
+ "logps/rejected": -406.3473815917969,
34
+ "loss": 0.6931,
35
+ "rewards/accuracies": 0.3680555522441864,
36
+ "rewards/chosen": -0.0007031817804090679,
37
+ "rewards/margins": -0.0005899361567571759,
38
+ "rewards/rejected": -0.00011324579827487469,
39
+ "step": 10
40
+ },
41
+ {
42
+ "epoch": 0.027758501040943788,
43
+ "grad_norm": 2.0501238605790117,
44
+ "learning_rate": 1.3888888888888888e-07,
45
+ "logits/chosen": -0.9155173301696777,
46
+ "logits/rejected": -0.9719367027282715,
47
+ "logps/chosen": -393.12945556640625,
48
+ "logps/rejected": -407.33709716796875,
49
+ "loss": 0.6932,
50
+ "rewards/accuracies": 0.48124998807907104,
51
+ "rewards/chosen": 0.0007024414371699095,
52
+ "rewards/margins": 0.00011867408466059715,
53
+ "rewards/rejected": 0.0005837674252688885,
54
+ "step": 20
55
+ },
56
+ {
57
+ "epoch": 0.041637751561415685,
58
+ "grad_norm": 2.1931925008563304,
59
+ "learning_rate": 2.0833333333333333e-07,
60
+ "logits/chosen": -0.9494641423225403,
61
+ "logits/rejected": -1.0066778659820557,
62
+ "logps/chosen": -429.01910400390625,
63
+ "logps/rejected": -425.8172912597656,
64
+ "loss": 0.6929,
65
+ "rewards/accuracies": 0.559374988079071,
66
+ "rewards/chosen": 0.0010106399422511458,
67
+ "rewards/margins": 0.0010079689091071486,
68
+ "rewards/rejected": 2.671018592081964e-06,
69
+ "step": 30
70
+ },
71
+ {
72
+ "epoch": 0.055517002081887576,
73
+ "grad_norm": 2.196961132429192,
74
+ "learning_rate": 2.7777777777777776e-07,
75
+ "logits/chosen": -0.9400655627250671,
76
+ "logits/rejected": -1.0188666582107544,
77
+ "logps/chosen": -420.2276916503906,
78
+ "logps/rejected": -405.5096130371094,
79
+ "loss": 0.6923,
80
+ "rewards/accuracies": 0.574999988079071,
81
+ "rewards/chosen": 0.002629968337714672,
82
+ "rewards/margins": 0.001661272719502449,
83
+ "rewards/rejected": 0.0009686955017969012,
84
+ "step": 40
85
+ },
86
+ {
87
+ "epoch": 0.06939625260235947,
88
+ "grad_norm": 2.2620008213708194,
89
+ "learning_rate": 3.472222222222222e-07,
90
+ "logits/chosen": -0.9268127679824829,
91
+ "logits/rejected": -0.9413528442382812,
92
+ "logps/chosen": -407.0869140625,
93
+ "logps/rejected": -433.43035888671875,
94
+ "loss": 0.691,
95
+ "rewards/accuracies": 0.703125,
96
+ "rewards/chosen": 0.006026268471032381,
97
+ "rewards/margins": 0.005162273999303579,
98
+ "rewards/rejected": 0.0008639938896521926,
99
+ "step": 50
100
+ },
101
+ {
102
+ "epoch": 0.08327550312283137,
103
+ "grad_norm": 2.9642516438971254,
104
+ "learning_rate": 4.1666666666666667e-07,
105
+ "logits/chosen": -0.9514287710189819,
106
+ "logits/rejected": -1.0300030708312988,
107
+ "logps/chosen": -408.86181640625,
108
+ "logps/rejected": -417.53106689453125,
109
+ "loss": 0.6884,
110
+ "rewards/accuracies": 0.7281249761581421,
111
+ "rewards/chosen": 0.01152682676911354,
112
+ "rewards/margins": 0.00950426422059536,
113
+ "rewards/rejected": 0.002022563014179468,
114
+ "step": 60
115
+ },
116
+ {
117
+ "epoch": 0.09715475364330327,
118
+ "grad_norm": 2.0059535680251264,
119
+ "learning_rate": 4.861111111111111e-07,
120
+ "logits/chosen": -0.9322255849838257,
121
+ "logits/rejected": -0.9961916208267212,
122
+ "logps/chosen": -416.08197021484375,
123
+ "logps/rejected": -440.123291015625,
124
+ "loss": 0.6845,
125
+ "rewards/accuracies": 0.721875011920929,
126
+ "rewards/chosen": 0.02161681093275547,
127
+ "rewards/margins": 0.016515256837010384,
128
+ "rewards/rejected": 0.005101555027067661,
129
+ "step": 70
130
+ },
131
+ {
132
+ "epoch": 0.11103400416377515,
133
+ "grad_norm": 2.959027274351803,
134
+ "learning_rate": 4.998119881260575e-07,
135
+ "logits/chosen": -0.8885990381240845,
136
+ "logits/rejected": -0.9473252296447754,
137
+ "logps/chosen": -412.66253662109375,
138
+ "logps/rejected": -426.8203125,
139
+ "loss": 0.6784,
140
+ "rewards/accuracies": 0.7875000238418579,
141
+ "rewards/chosen": 0.03722007945179939,
142
+ "rewards/margins": 0.03375672921538353,
143
+ "rewards/rejected": 0.0034633490722626448,
144
+ "step": 80
145
+ },
146
+ {
147
+ "epoch": 0.12491325468424705,
148
+ "grad_norm": 2.896496746709743,
149
+ "learning_rate": 4.990486745229364e-07,
150
+ "logits/chosen": -0.9239265322685242,
151
+ "logits/rejected": -1.015549659729004,
152
+ "logps/chosen": -406.7080078125,
153
+ "logps/rejected": -408.1942138671875,
154
+ "loss": 0.6692,
155
+ "rewards/accuracies": 0.793749988079071,
156
+ "rewards/chosen": 0.04666774719953537,
157
+ "rewards/margins": 0.0515812449157238,
158
+ "rewards/rejected": -0.004913502838462591,
159
+ "step": 90
160
+ },
161
+ {
162
+ "epoch": 0.13879250520471895,
163
+ "grad_norm": 2.17437482262988,
164
+ "learning_rate": 4.977001008412112e-07,
165
+ "logits/chosen": -1.0140012502670288,
166
+ "logits/rejected": -1.0407038927078247,
167
+ "logps/chosen": -416.13800048828125,
168
+ "logps/rejected": -424.24468994140625,
169
+ "loss": 0.6619,
170
+ "rewards/accuracies": 0.746874988079071,
171
+ "rewards/chosen": 0.06093335896730423,
172
+ "rewards/margins": 0.05933469533920288,
173
+ "rewards/rejected": 0.0015986515209078789,
174
+ "step": 100
175
+ },
176
+ {
177
+ "epoch": 0.15267175572519084,
178
+ "grad_norm": 2.0661816686924537,
179
+ "learning_rate": 4.957694362057149e-07,
180
+ "logits/chosen": -0.9822956919670105,
181
+ "logits/rejected": -1.011765956878662,
182
+ "logps/chosen": -417.8843688964844,
183
+ "logps/rejected": -422.9627380371094,
184
+ "loss": 0.6518,
185
+ "rewards/accuracies": 0.7437499761581421,
186
+ "rewards/chosen": 0.05249622464179993,
187
+ "rewards/margins": 0.07984234392642975,
188
+ "rewards/rejected": -0.027346113696694374,
189
+ "step": 110
190
+ },
191
+ {
192
+ "epoch": 0.16655100624566274,
193
+ "grad_norm": 2.1244191981703135,
194
+ "learning_rate": 4.932612176449559e-07,
195
+ "logits/chosen": -0.9970356225967407,
196
+ "logits/rejected": -1.0578349828720093,
197
+ "logps/chosen": -407.3737487792969,
198
+ "logps/rejected": -426.0777893066406,
199
+ "loss": 0.6368,
200
+ "rewards/accuracies": 0.75,
201
+ "rewards/chosen": 0.04393979534506798,
202
+ "rewards/margins": 0.10807327926158905,
203
+ "rewards/rejected": -0.06413348764181137,
204
+ "step": 120
205
+ },
206
+ {
207
+ "epoch": 0.18043025676613464,
208
+ "grad_norm": 2.0595896023881703,
209
+ "learning_rate": 4.901813394291801e-07,
210
+ "logits/chosen": -0.9531866908073425,
211
+ "logits/rejected": -0.9673601984977722,
212
+ "logps/chosen": -416.93572998046875,
213
+ "logps/rejected": -443.47894287109375,
214
+ "loss": 0.6201,
215
+ "rewards/accuracies": 0.84375,
216
+ "rewards/chosen": 0.03279733285307884,
217
+ "rewards/margins": 0.1790144443511963,
218
+ "rewards/rejected": -0.14621710777282715,
219
+ "step": 130
220
+ },
221
+ {
222
+ "epoch": 0.19430950728660654,
223
+ "grad_norm": 2.169975326300987,
224
+ "learning_rate": 4.865370392189376e-07,
225
+ "logits/chosen": -0.9693125486373901,
226
+ "logits/rejected": -0.9949439167976379,
227
+ "logps/chosen": -396.1827697753906,
228
+ "logps/rejected": -439.85198974609375,
229
+ "loss": 0.6004,
230
+ "rewards/accuracies": 0.84375,
231
+ "rewards/chosen": 0.005037306807935238,
232
+ "rewards/margins": 0.23115964233875275,
233
+ "rewards/rejected": -0.22612233459949493,
234
+ "step": 140
235
+ },
236
+ {
237
+ "epoch": 0.2081887578070784,
238
+ "grad_norm": 2.439547013921171,
239
+ "learning_rate": 4.823368810567056e-07,
240
+ "logits/chosen": -1.0007877349853516,
241
+ "logits/rejected": -1.0211200714111328,
242
+ "logps/chosen": -408.7399597167969,
243
+ "logps/rejected": -449.01953125,
244
+ "loss": 0.5878,
245
+ "rewards/accuracies": 0.8062499761581421,
246
+ "rewards/chosen": -0.07737134397029877,
247
+ "rewards/margins": 0.25105008482933044,
248
+ "rewards/rejected": -0.3284214437007904,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 0.2220680083275503,
253
+ "grad_norm": 2.191957070219856,
254
+ "learning_rate": 4.775907352415367e-07,
255
+ "logits/chosen": -0.9597848057746887,
256
+ "logits/rejected": -0.9754905700683594,
257
+ "logps/chosen": -404.9871826171875,
258
+ "logps/rejected": -453.3285217285156,
259
+ "loss": 0.572,
260
+ "rewards/accuracies": 0.78125,
261
+ "rewards/chosen": -0.14126327633857727,
262
+ "rewards/margins": 0.30146196484565735,
263
+ "rewards/rejected": -0.44272518157958984,
264
+ "step": 160
265
+ },
266
+ {
267
+ "epoch": 0.2359472588480222,
268
+ "grad_norm": 2.8446228567237726,
269
+ "learning_rate": 4.723097551340265e-07,
270
+ "logits/chosen": -0.8638654947280884,
271
+ "logits/rejected": -0.8823292851448059,
272
+ "logps/chosen": -451.3196716308594,
273
+ "logps/rejected": -487.5765075683594,
274
+ "loss": 0.5457,
275
+ "rewards/accuracies": 0.784375011920929,
276
+ "rewards/chosen": -0.2892600893974304,
277
+ "rewards/margins": 0.37542229890823364,
278
+ "rewards/rejected": -0.6646823287010193,
279
+ "step": 170
280
+ },
281
+ {
282
+ "epoch": 0.2498265093684941,
283
+ "grad_norm": 2.488211046081056,
284
+ "learning_rate": 4.6650635094610966e-07,
285
+ "logits/chosen": -0.9351462125778198,
286
+ "logits/rejected": -0.9417260885238647,
287
+ "logps/chosen": -442.58917236328125,
288
+ "logps/rejected": -491.6702575683594,
289
+ "loss": 0.5337,
290
+ "rewards/accuracies": 0.784375011920929,
291
+ "rewards/chosen": -0.40233302116394043,
292
+ "rewards/margins": 0.40838655829429626,
293
+ "rewards/rejected": -0.8107194900512695,
294
+ "step": 180
295
+ },
296
+ {
297
+ "epoch": 0.263705759888966,
298
+ "grad_norm": 3.2325426784137417,
299
+ "learning_rate": 4.6019416057727577e-07,
300
+ "logits/chosen": -0.9113900065422058,
301
+ "logits/rejected": -0.9249471426010132,
302
+ "logps/chosen": -454.93658447265625,
303
+ "logps/rejected": -505.32763671875,
304
+ "loss": 0.5093,
305
+ "rewards/accuracies": 0.78125,
306
+ "rewards/chosen": -0.5173531174659729,
307
+ "rewards/margins": 0.5457069277763367,
308
+ "rewards/rejected": -1.0630600452423096,
309
+ "step": 190
310
+ },
311
+ {
312
+ "epoch": 0.2775850104094379,
313
+ "grad_norm": 2.6935717929946374,
314
+ "learning_rate": 4.5338801756574185e-07,
315
+ "logits/chosen": -0.9233020544052124,
316
+ "logits/rejected": -0.936613917350769,
317
+ "logps/chosen": -477.2933654785156,
318
+ "logps/rejected": -543.9907836914062,
319
+ "loss": 0.5088,
320
+ "rewards/accuracies": 0.765625,
321
+ "rewards/chosen": -0.698850154876709,
322
+ "rewards/margins": 0.6066737174987793,
323
+ "rewards/rejected": -1.3055237531661987,
324
+ "step": 200
325
+ },
326
+ {
327
+ "epoch": 0.2914642609299098,
328
+ "grad_norm": 2.6234937800693237,
329
+ "learning_rate": 4.461039162298939e-07,
330
+ "logits/chosen": -0.9328230619430542,
331
+ "logits/rejected": -0.8883553743362427,
332
+ "logps/chosen": -474.24176025390625,
333
+ "logps/rejected": -560.80810546875,
334
+ "loss": 0.4809,
335
+ "rewards/accuracies": 0.8125,
336
+ "rewards/chosen": -0.7412362694740295,
337
+ "rewards/margins": 0.7680382132530212,
338
+ "rewards/rejected": -1.5092744827270508,
339
+ "step": 210
340
+ },
341
+ {
342
+ "epoch": 0.3053435114503817,
343
+ "grad_norm": 3.024837883769618,
344
+ "learning_rate": 4.3835897408191513e-07,
345
+ "logits/chosen": -0.9258328676223755,
346
+ "logits/rejected": -0.902255654335022,
347
+ "logps/chosen": -545.10205078125,
348
+ "logps/rejected": -611.480224609375,
349
+ "loss": 0.4716,
350
+ "rewards/accuracies": 0.7906249761581421,
351
+ "rewards/chosen": -0.9928766489028931,
352
+ "rewards/margins": 0.8125397562980652,
353
+ "rewards/rejected": -1.8054163455963135,
354
+ "step": 220
355
+ },
356
+ {
357
+ "epoch": 0.3192227619708536,
358
+ "grad_norm": 4.999154415916733,
359
+ "learning_rate": 4.301713916019286e-07,
360
+ "logits/chosen": -0.864261269569397,
361
+ "logits/rejected": -0.8518573045730591,
362
+ "logps/chosen": -521.3760986328125,
363
+ "logps/rejected": -621.6141967773438,
364
+ "loss": 0.4618,
365
+ "rewards/accuracies": 0.8343750238418579,
366
+ "rewards/chosen": -1.0177581310272217,
367
+ "rewards/margins": 1.0421375036239624,
368
+ "rewards/rejected": -2.0598955154418945,
369
+ "step": 230
370
+ },
371
+ {
372
+ "epoch": 0.3331020124913255,
373
+ "grad_norm": 4.221789042426902,
374
+ "learning_rate": 4.2156040946718343e-07,
375
+ "logits/chosen": -0.8536814451217651,
376
+ "logits/rejected": -0.855249285697937,
377
+ "logps/chosen": -554.1260986328125,
378
+ "logps/rejected": -660.2291259765625,
379
+ "loss": 0.4504,
380
+ "rewards/accuracies": 0.7749999761581421,
381
+ "rewards/chosen": -1.2824825048446655,
382
+ "rewards/margins": 0.9512511491775513,
383
+ "rewards/rejected": -2.233733654022217,
384
+ "step": 240
385
+ },
386
+ {
387
+ "epoch": 0.3469812630117974,
388
+ "grad_norm": 2.9920792128032283,
389
+ "learning_rate": 4.125462633367959e-07,
390
+ "logits/chosen": -0.9102590680122375,
391
+ "logits/rejected": -0.8657618761062622,
392
+ "logps/chosen": -548.0431518554688,
393
+ "logps/rejected": -677.654541015625,
394
+ "loss": 0.4387,
395
+ "rewards/accuracies": 0.809374988079071,
396
+ "rewards/chosen": -1.2187608480453491,
397
+ "rewards/margins": 1.2359611988067627,
398
+ "rewards/rejected": -2.4547219276428223,
399
+ "step": 250
400
+ },
401
+ {
402
+ "epoch": 0.3608605135322693,
403
+ "grad_norm": 3.276352885973741,
404
+ "learning_rate": 4.031501362983007e-07,
405
+ "logits/chosen": -0.9509126543998718,
406
+ "logits/rejected": -0.8994159698486328,
407
+ "logps/chosen": -542.1051635742188,
408
+ "logps/rejected": -668.0875244140625,
409
+ "loss": 0.44,
410
+ "rewards/accuracies": 0.815625011920929,
411
+ "rewards/chosen": -1.3456058502197266,
412
+ "rewards/margins": 1.152848243713379,
413
+ "rewards/rejected": -2.4984538555145264,
414
+ "step": 260
415
+ },
416
+ {
417
+ "epoch": 0.3747397640527412,
418
+ "grad_norm": 3.433959749322587,
419
+ "learning_rate": 3.933941090877615e-07,
420
+ "logits/chosen": -0.9043784141540527,
421
+ "logits/rejected": -0.8728653192520142,
422
+ "logps/chosen": -572.6734619140625,
423
+ "logps/rejected": -690.0535278320312,
424
+ "loss": 0.4269,
425
+ "rewards/accuracies": 0.796875,
426
+ "rewards/chosen": -1.49461030960083,
427
+ "rewards/margins": 1.185040831565857,
428
+ "rewards/rejected": -2.6796510219573975,
429
+ "step": 270
430
+ },
431
+ {
432
+ "epoch": 0.3886190145732131,
433
+ "grad_norm": 3.135829679670571,
434
+ "learning_rate": 3.833011082004228e-07,
435
+ "logits/chosen": -0.9181197285652161,
436
+ "logits/rejected": -0.8538551330566406,
437
+ "logps/chosen": -580.3753662109375,
438
+ "logps/rejected": -723.9002685546875,
439
+ "loss": 0.4243,
440
+ "rewards/accuracies": 0.762499988079071,
441
+ "rewards/chosen": -1.563520908355713,
442
+ "rewards/margins": 1.419384479522705,
443
+ "rewards/rejected": -2.982905387878418,
444
+ "step": 280
445
+ },
446
+ {
447
+ "epoch": 0.4024982650936849,
448
+ "grad_norm": 3.4471828982674606,
449
+ "learning_rate": 3.728948520138426e-07,
450
+ "logits/chosen": -0.8911579847335815,
451
+ "logits/rejected": -0.8040667772293091,
452
+ "logps/chosen": -581.972412109375,
453
+ "logps/rejected": -704.8370361328125,
454
+ "loss": 0.4129,
455
+ "rewards/accuracies": 0.8125,
456
+ "rewards/chosen": -1.598099946975708,
457
+ "rewards/margins": 1.2950611114501953,
458
+ "rewards/rejected": -2.893160820007324,
459
+ "step": 290
460
+ },
461
+ {
462
+ "epoch": 0.4163775156141568,
463
+ "grad_norm": 3.4538574024709185,
464
+ "learning_rate": 3.6219979505011555e-07,
465
+ "logits/chosen": -0.9019178152084351,
466
+ "logits/rejected": -0.8638097047805786,
467
+ "logps/chosen": -552.6265869140625,
468
+ "logps/rejected": -678.7213745117188,
469
+ "loss": 0.4264,
470
+ "rewards/accuracies": 0.815625011920929,
471
+ "rewards/chosen": -1.5648345947265625,
472
+ "rewards/margins": 1.3646188974380493,
473
+ "rewards/rejected": -2.9294533729553223,
474
+ "step": 300
475
+ },
476
+ {
477
+ "epoch": 0.4302567661346287,
478
+ "grad_norm": 4.0164936619199905,
479
+ "learning_rate": 3.512410705081684e-07,
480
+ "logits/chosen": -0.8717131614685059,
481
+ "logits/rejected": -0.8151350021362305,
482
+ "logps/chosen": -573.2371215820312,
483
+ "logps/rejected": -731.2991333007812,
484
+ "loss": 0.4135,
485
+ "rewards/accuracies": 0.8062499761581421,
486
+ "rewards/chosen": -1.5790045261383057,
487
+ "rewards/margins": 1.489929437637329,
488
+ "rewards/rejected": -3.0689339637756348,
489
+ "step": 310
490
+ },
491
+ {
492
+ "epoch": 0.4441360166551006,
493
+ "grad_norm": 3.5055099662080766,
494
+ "learning_rate": 3.400444312011776e-07,
495
+ "logits/chosen": -0.9052634239196777,
496
+ "logits/rejected": -0.8478328585624695,
497
+ "logps/chosen": -601.0723266601562,
498
+ "logps/rejected": -743.2720947265625,
499
+ "loss": 0.3968,
500
+ "rewards/accuracies": 0.809374988079071,
501
+ "rewards/chosen": -1.5770965814590454,
502
+ "rewards/margins": 1.5724782943725586,
503
+ "rewards/rejected": -3.1495749950408936,
504
+ "step": 320
505
+ },
506
+ {
507
+ "epoch": 0.4580152671755725,
508
+ "grad_norm": 3.965189906947426,
509
+ "learning_rate": 3.286361890379034e-07,
510
+ "logits/chosen": -0.8289991617202759,
511
+ "logits/rejected": -0.7707743644714355,
512
+ "logps/chosen": -592.8557739257812,
513
+ "logps/rejected": -759.8111572265625,
514
+ "loss": 0.397,
515
+ "rewards/accuracies": 0.8031250238418579,
516
+ "rewards/chosen": -1.7602096796035767,
517
+ "rewards/margins": 1.649737000465393,
518
+ "rewards/rejected": -3.409946918487549,
519
+ "step": 330
520
+ },
521
+ {
522
+ "epoch": 0.4718945176960444,
523
+ "grad_norm": 6.571433183841374,
524
+ "learning_rate": 3.1704315319015936e-07,
525
+ "logits/chosen": -0.8428624868392944,
526
+ "logits/rejected": -0.807705283164978,
527
+ "logps/chosen": -569.0560302734375,
528
+ "logps/rejected": -749.4913330078125,
529
+ "loss": 0.4054,
530
+ "rewards/accuracies": 0.8374999761581421,
531
+ "rewards/chosen": -1.711888074874878,
532
+ "rewards/margins": 1.6754896640777588,
533
+ "rewards/rejected": -3.387377977371216,
534
+ "step": 340
535
+ },
536
+ {
537
+ "epoch": 0.4857737682165163,
538
+ "grad_norm": 3.3895657051926116,
539
+ "learning_rate": 3.052925670917219e-07,
540
+ "logits/chosen": -0.8292319178581238,
541
+ "logits/rejected": -0.7592617273330688,
542
+ "logps/chosen": -615.757080078125,
543
+ "logps/rejected": -775.6668090820312,
544
+ "loss": 0.3867,
545
+ "rewards/accuracies": 0.7718750238418579,
546
+ "rewards/chosen": -1.9108030796051025,
547
+ "rewards/margins": 1.574349045753479,
548
+ "rewards/rejected": -3.48515248298645,
549
+ "step": 350
550
+ },
551
+ {
552
+ "epoch": 0.4996530187369882,
553
+ "grad_norm": 3.9806648944209155,
554
+ "learning_rate": 2.934120444167326e-07,
555
+ "logits/chosen": -0.822270393371582,
556
+ "logits/rejected": -0.7529654502868652,
557
+ "logps/chosen": -610.9036865234375,
558
+ "logps/rejected": -785.0739135742188,
559
+ "loss": 0.3914,
560
+ "rewards/accuracies": 0.815625011920929,
561
+ "rewards/chosen": -2.06144380569458,
562
+ "rewards/margins": 1.6444549560546875,
563
+ "rewards/rejected": -3.7058987617492676,
564
+ "step": 360
565
+ },
566
+ {
567
+ "epoch": 0.5135322692574601,
568
+ "grad_norm": 4.270423330616301,
569
+ "learning_rate": 2.814295041880407e-07,
570
+ "logits/chosen": -0.8429604768753052,
571
+ "logits/rejected": -0.7863871455192566,
572
+ "logps/chosen": -599.1904296875,
573
+ "logps/rejected": -767.7243041992188,
574
+ "loss": 0.391,
575
+ "rewards/accuracies": 0.7749999761581421,
576
+ "rewards/chosen": -1.957845687866211,
577
+ "rewards/margins": 1.5971133708953857,
578
+ "rewards/rejected": -3.5549590587615967,
579
+ "step": 370
580
+ },
581
+ {
582
+ "epoch": 0.527411519777932,
583
+ "grad_norm": 3.711197534301825,
584
+ "learning_rate": 2.6937310516798275e-07,
585
+ "logits/chosen": -0.8030338287353516,
586
+ "logits/rejected": -0.7218085527420044,
587
+ "logps/chosen": -635.15380859375,
588
+ "logps/rejected": -831.8131103515625,
589
+ "loss": 0.3798,
590
+ "rewards/accuracies": 0.8187500238418579,
591
+ "rewards/chosen": -2.005901336669922,
592
+ "rewards/margins": 1.8555805683135986,
593
+ "rewards/rejected": -3.8614819049835205,
594
+ "step": 380
595
+ },
596
+ {
597
+ "epoch": 0.5412907702984039,
598
+ "grad_norm": 4.065093944544315,
599
+ "learning_rate": 2.5727117968577785e-07,
600
+ "logits/chosen": -0.7184926867485046,
601
+ "logits/rejected": -0.6400619745254517,
602
+ "logps/chosen": -647.2830810546875,
603
+ "logps/rejected": -867.3894653320312,
604
+ "loss": 0.3737,
605
+ "rewards/accuracies": 0.8531249761581421,
606
+ "rewards/chosen": -2.1855947971343994,
607
+ "rewards/margins": 2.0819170475006104,
608
+ "rewards/rejected": -4.26751184463501,
609
+ "step": 390
610
+ },
611
+ {
612
+ "epoch": 0.5551700208188758,
613
+ "grad_norm": 4.664675650179661,
614
+ "learning_rate": 2.4515216705704393e-07,
615
+ "logits/chosen": -0.7386522889137268,
616
+ "logits/rejected": -0.6554276347160339,
617
+ "logps/chosen": -680.4814453125,
618
+ "logps/rejected": -875.1360473632812,
619
+ "loss": 0.3768,
620
+ "rewards/accuracies": 0.809374988079071,
621
+ "rewards/chosen": -2.458164691925049,
622
+ "rewards/margins": 1.9175488948822021,
623
+ "rewards/rejected": -4.37571382522583,
624
+ "step": 400
625
+ },
626
+ {
627
+ "epoch": 0.5690492713393477,
628
+ "grad_norm": 4.082366486193136,
629
+ "learning_rate": 2.330445467518977e-07,
630
+ "logits/chosen": -0.7022604942321777,
631
+ "logits/rejected": -0.5973988175392151,
632
+ "logps/chosen": -627.4280395507812,
633
+ "logps/rejected": -786.0429077148438,
634
+ "loss": 0.377,
635
+ "rewards/accuracies": 0.7749999761581421,
636
+ "rewards/chosen": -2.2749786376953125,
637
+ "rewards/margins": 1.6706234216690063,
638
+ "rewards/rejected": -3.94560170173645,
639
+ "step": 410
640
+ },
641
+ {
642
+ "epoch": 0.5829285218598196,
643
+ "grad_norm": 3.6701003128420386,
644
+ "learning_rate": 2.209767714686924e-07,
645
+ "logits/chosen": -0.672639787197113,
646
+ "logits/rejected": -0.5875508189201355,
647
+ "logps/chosen": -665.8011474609375,
648
+ "logps/rejected": -878.91015625,
649
+ "loss": 0.3537,
650
+ "rewards/accuracies": 0.8125,
651
+ "rewards/chosen": -2.3907861709594727,
652
+ "rewards/margins": 2.144279956817627,
653
+ "rewards/rejected": -4.5350661277771,
654
+ "step": 420
655
+ },
656
+ {
657
+ "epoch": 0.5968077723802915,
658
+ "grad_norm": 3.8553830986355764,
659
+ "learning_rate": 2.0897720027066897e-07,
660
+ "logits/chosen": -0.67729651927948,
661
+ "logits/rejected": -0.5586274862289429,
662
+ "logps/chosen": -661.219482421875,
663
+ "logps/rejected": -875.0428466796875,
664
+ "loss": 0.3715,
665
+ "rewards/accuracies": 0.815625011920929,
666
+ "rewards/chosen": -2.52473783493042,
667
+ "rewards/margins": 2.1635537147521973,
668
+ "rewards/rejected": -4.688291072845459,
669
+ "step": 430
670
+ },
671
+ {
672
+ "epoch": 0.6106870229007634,
673
+ "grad_norm": 4.080685306612081,
674
+ "learning_rate": 1.970740319426474e-07,
675
+ "logits/chosen": -0.65470290184021,
676
+ "logits/rejected": -0.5402953028678894,
677
+ "logps/chosen": -674.3743896484375,
678
+ "logps/rejected": -868.76611328125,
679
+ "loss": 0.3756,
680
+ "rewards/accuracies": 0.8062499761581421,
681
+ "rewards/chosen": -2.5917928218841553,
682
+ "rewards/margins": 1.9496173858642578,
683
+ "rewards/rejected": -4.54141092300415,
684
+ "step": 440
685
+ },
686
+ {
687
+ "epoch": 0.6245662734212353,
688
+ "grad_norm": 3.7177874253136913,
689
+ "learning_rate": 1.8529523872436977e-07,
690
+ "logits/chosen": -0.6337302327156067,
691
+ "logits/rejected": -0.5308721661567688,
692
+ "logps/chosen": -659.3401489257812,
693
+ "logps/rejected": -889.49462890625,
694
+ "loss": 0.3548,
695
+ "rewards/accuracies": 0.7875000238418579,
696
+ "rewards/chosen": -2.4972152709960938,
697
+ "rewards/margins": 2.2179877758026123,
698
+ "rewards/rejected": -4.715203285217285,
699
+ "step": 450
700
+ },
701
+ {
702
+ "epoch": 0.6384455239417072,
703
+ "grad_norm": 3.781558261259466,
704
+ "learning_rate": 1.7366850057622172e-07,
705
+ "logits/chosen": -0.627717137336731,
706
+ "logits/rejected": -0.5284063816070557,
707
+ "logps/chosen": -665.6905517578125,
708
+ "logps/rejected": -902.7052612304688,
709
+ "loss": 0.3565,
710
+ "rewards/accuracies": 0.8031250238418579,
711
+ "rewards/chosen": -2.4058690071105957,
712
+ "rewards/margins": 2.2597124576568604,
713
+ "rewards/rejected": -4.665581703186035,
714
+ "step": 460
715
+ },
716
+ {
717
+ "epoch": 0.6523247744621791,
718
+ "grad_norm": 3.624902658770024,
719
+ "learning_rate": 1.622211401318028e-07,
720
+ "logits/chosen": -0.5916509628295898,
721
+ "logits/rejected": -0.48228612542152405,
722
+ "logps/chosen": -683.482421875,
723
+ "logps/rejected": -909.9552612304688,
724
+ "loss": 0.3597,
725
+ "rewards/accuracies": 0.840624988079071,
726
+ "rewards/chosen": -2.451106071472168,
727
+ "rewards/margins": 2.257086992263794,
728
+ "rewards/rejected": -4.708193302154541,
729
+ "step": 470
730
+ },
731
+ {
732
+ "epoch": 0.666204024982651,
733
+ "grad_norm": 4.657777670569194,
734
+ "learning_rate": 1.5098005849021078e-07,
735
+ "logits/chosen": -0.5622087717056274,
736
+ "logits/rejected": -0.46162405610084534,
737
+ "logps/chosen": -661.2738037109375,
738
+ "logps/rejected": -859.2135009765625,
739
+ "loss": 0.3586,
740
+ "rewards/accuracies": 0.824999988079071,
741
+ "rewards/chosen": -2.3591208457946777,
742
+ "rewards/margins": 2.061002016067505,
743
+ "rewards/rejected": -4.4201226234436035,
744
+ "step": 480
745
+ },
746
+ {
747
+ "epoch": 0.6800832755031229,
748
+ "grad_norm": 6.251774756777437,
749
+ "learning_rate": 1.3997167199892385e-07,
750
+ "logits/chosen": -0.5363645553588867,
751
+ "logits/rejected": -0.39701658487319946,
752
+ "logps/chosen": -659.409912109375,
753
+ "logps/rejected": -862.7174072265625,
754
+ "loss": 0.3703,
755
+ "rewards/accuracies": 0.8187500238418579,
756
+ "rewards/chosen": -2.3470611572265625,
757
+ "rewards/margins": 2.0817208290100098,
758
+ "rewards/rejected": -4.428781986236572,
759
+ "step": 490
760
+ },
761
+ {
762
+ "epoch": 0.6939625260235948,
763
+ "grad_norm": 4.437192094325177,
764
+ "learning_rate": 1.2922185017584036e-07,
765
+ "logits/chosen": -0.47550851106643677,
766
+ "logits/rejected": -0.33102065324783325,
767
+ "logps/chosen": -642.2039794921875,
768
+ "logps/rejected": -921.5857543945312,
769
+ "loss": 0.341,
770
+ "rewards/accuracies": 0.856249988079071,
771
+ "rewards/chosen": -2.3800008296966553,
772
+ "rewards/margins": 2.652977466583252,
773
+ "rewards/rejected": -5.032978534698486,
774
+ "step": 500
775
+ },
776
+ {
777
+ "epoch": 0.7078417765440667,
778
+ "grad_norm": 4.426537023215993,
779
+ "learning_rate": 1.1875585491635998e-07,
780
+ "logits/chosen": -0.5427089333534241,
781
+ "logits/rejected": -0.3993372321128845,
782
+ "logps/chosen": -642.7872924804688,
783
+ "logps/rejected": -917.6959838867188,
784
+ "loss": 0.339,
785
+ "rewards/accuracies": 0.859375,
786
+ "rewards/chosen": -2.4717633724212646,
787
+ "rewards/margins": 2.438523054122925,
788
+ "rewards/rejected": -4.910286903381348,
789
+ "step": 510
790
+ },
791
+ {
792
+ "epoch": 0.7217210270645386,
793
+ "grad_norm": 3.8981108408032368,
794
+ "learning_rate": 1.0859828112836539e-07,
795
+ "logits/chosen": -0.4982399344444275,
796
+ "logits/rejected": -0.3629917502403259,
797
+ "logps/chosen": -662.3302001953125,
798
+ "logps/rejected": -924.0505981445312,
799
+ "loss": 0.3575,
800
+ "rewards/accuracies": 0.8374999761581421,
801
+ "rewards/chosen": -2.5424540042877197,
802
+ "rewards/margins": 2.429400682449341,
803
+ "rewards/rejected": -4.9718546867370605,
804
+ "step": 520
805
+ },
806
+ {
807
+ "epoch": 0.7356002775850105,
808
+ "grad_norm": 4.062590769922296,
809
+ "learning_rate": 9.877299893461455e-08,
810
+ "logits/chosen": -0.4675068259239197,
811
+ "logits/rejected": -0.32594671845436096,
812
+ "logps/chosen": -694.9034423828125,
813
+ "logps/rejected": -893.17724609375,
814
+ "loss": 0.3619,
815
+ "rewards/accuracies": 0.828125,
816
+ "rewards/chosen": -2.6744236946105957,
817
+ "rewards/margins": 2.1196742057800293,
818
+ "rewards/rejected": -4.794097900390625,
819
+ "step": 530
820
+ },
821
+ {
822
+ "epoch": 0.7494795281054824,
823
+ "grad_norm": 4.00277278125475,
824
+ "learning_rate": 8.930309757836516e-08,
825
+ "logits/chosen": -0.4969969689846039,
826
+ "logits/rejected": -0.33853963017463684,
827
+ "logps/chosen": -672.2896118164062,
828
+ "logps/rejected": -913.6337890625,
829
+ "loss": 0.3511,
830
+ "rewards/accuracies": 0.809374988079071,
831
+ "rewards/chosen": -2.6400508880615234,
832
+ "rewards/margins": 2.4135284423828125,
833
+ "rewards/rejected": -5.053578853607178,
834
+ "step": 540
835
+ },
836
+ {
837
+ "epoch": 0.7633587786259542,
838
+ "grad_norm": 4.354943956619566,
839
+ "learning_rate": 8.021083116405173e-08,
840
+ "logits/chosen": -0.45121559500694275,
841
+ "logits/rejected": -0.34631314873695374,
842
+ "logps/chosen": -652.8707885742188,
843
+ "logps/rejected": -885.6276245117188,
844
+ "loss": 0.358,
845
+ "rewards/accuracies": 0.793749988079071,
846
+ "rewards/chosen": -2.4929685592651367,
847
+ "rewards/margins": 2.304232120513916,
848
+ "rewards/rejected": -4.797201156616211,
849
+ "step": 550
850
+ },
851
+ {
852
+ "epoch": 0.7772380291464261,
853
+ "grad_norm": 4.835099186465556,
854
+ "learning_rate": 7.151756636052527e-08,
855
+ "logits/chosen": -0.4754953384399414,
856
+ "logits/rejected": -0.3613481819629669,
857
+ "logps/chosen": -677.6926879882812,
858
+ "logps/rejected": -924.0533447265625,
859
+ "loss": 0.3606,
860
+ "rewards/accuracies": 0.800000011920929,
861
+ "rewards/chosen": -2.6459193229675293,
862
+ "rewards/margins": 2.297471523284912,
863
+ "rewards/rejected": -4.943390846252441,
864
+ "step": 560
865
+ },
866
+ {
867
+ "epoch": 0.7911172796668979,
868
+ "grad_norm": 4.2131174135927765,
869
+ "learning_rate": 6.324373218975104e-08,
870
+ "logits/chosen": -0.44653385877609253,
871
+ "logits/rejected": -0.3059902787208557,
872
+ "logps/chosen": -659.1906127929688,
873
+ "logps/rejected": -913.2021484375,
874
+ "loss": 0.3647,
875
+ "rewards/accuracies": 0.824999988079071,
876
+ "rewards/chosen": -2.6062073707580566,
877
+ "rewards/margins": 2.3299379348754883,
878
+ "rewards/rejected": -4.936145782470703,
879
+ "step": 570
880
+ },
881
+ {
882
+ "epoch": 0.8049965301873698,
883
+ "grad_norm": 4.3409883619650795,
884
+ "learning_rate": 5.5408772018959996e-08,
885
+ "logits/chosen": -0.44951170682907104,
886
+ "logits/rejected": -0.3005351424217224,
887
+ "logps/chosen": -656.5030517578125,
888
+ "logps/rejected": -886.4778442382812,
889
+ "loss": 0.3535,
890
+ "rewards/accuracies": 0.815625011920929,
891
+ "rewards/chosen": -2.515747547149658,
892
+ "rewards/margins": 2.289696455001831,
893
+ "rewards/rejected": -4.805444240570068,
894
+ "step": 580
895
+ },
896
+ {
897
+ "epoch": 0.8188757807078417,
898
+ "grad_norm": 13.005915598801213,
899
+ "learning_rate": 4.8031097869072225e-08,
900
+ "logits/chosen": -0.4361554980278015,
901
+ "logits/rejected": -0.3123430609703064,
902
+ "logps/chosen": -669.0994873046875,
903
+ "logps/rejected": -921.1182861328125,
904
+ "loss": 0.3566,
905
+ "rewards/accuracies": 0.840624988079071,
906
+ "rewards/chosen": -2.5885722637176514,
907
+ "rewards/margins": 2.3980278968811035,
908
+ "rewards/rejected": -4.986600399017334,
909
+ "step": 590
910
+ },
911
+ {
912
+ "epoch": 0.8327550312283136,
913
+ "grad_norm": 4.507720819165163,
914
+ "learning_rate": 4.112804714676593e-08,
915
+ "logits/chosen": -0.4602123200893402,
916
+ "logits/rejected": -0.3107297122478485,
917
+ "logps/chosen": -671.499267578125,
918
+ "logps/rejected": -886.6002807617188,
919
+ "loss": 0.3403,
920
+ "rewards/accuracies": 0.8500000238418579,
921
+ "rewards/chosen": -2.505514144897461,
922
+ "rewards/margins": 2.2939445972442627,
923
+ "rewards/rejected": -4.7994585037231445,
924
+ "step": 600
925
+ },
926
+ {
927
+ "epoch": 0.8466342817487855,
928
+ "grad_norm": 4.108687652880907,
929
+ "learning_rate": 3.4715841901871545e-08,
930
+ "logits/chosen": -0.4734552800655365,
931
+ "logits/rejected": -0.31184083223342896,
932
+ "logps/chosen": -664.9038696289062,
933
+ "logps/rejected": -929.7863159179688,
934
+ "loss": 0.3487,
935
+ "rewards/accuracies": 0.846875011920929,
936
+ "rewards/chosen": -2.5965065956115723,
937
+ "rewards/margins": 2.5841736793518066,
938
+ "rewards/rejected": -5.180680751800537,
939
+ "step": 610
940
+ },
941
+ {
942
+ "epoch": 0.8605135322692574,
943
+ "grad_norm": 4.599039860177261,
944
+ "learning_rate": 2.8809550705835546e-08,
945
+ "logits/chosen": -0.43759673833847046,
946
+ "logits/rejected": -0.25794321298599243,
947
+ "logps/chosen": -715.013671875,
948
+ "logps/rejected": -970.8460083007812,
949
+ "loss": 0.3524,
950
+ "rewards/accuracies": 0.84375,
951
+ "rewards/chosen": -2.732679843902588,
952
+ "rewards/margins": 2.611907482147217,
953
+ "rewards/rejected": -5.344587802886963,
954
+ "step": 620
955
+ },
956
+ {
957
+ "epoch": 0.8743927827897293,
958
+ "grad_norm": 4.4811322085497105,
959
+ "learning_rate": 2.3423053240837514e-08,
960
+ "logits/chosen": -0.4395861029624939,
961
+ "logits/rejected": -0.27333635091781616,
962
+ "logps/chosen": -696.0693359375,
963
+ "logps/rejected": -928.7999877929688,
964
+ "loss": 0.3694,
965
+ "rewards/accuracies": 0.7875000238418579,
966
+ "rewards/chosen": -2.7578296661376953,
967
+ "rewards/margins": 2.2534213066101074,
968
+ "rewards/rejected": -5.011250972747803,
969
+ "step": 630
970
+ },
971
+ {
972
+ "epoch": 0.8882720333102012,
973
+ "grad_norm": 4.763510092667976,
974
+ "learning_rate": 1.8569007682777415e-08,
975
+ "logits/chosen": -0.4241456985473633,
976
+ "logits/rejected": -0.29944029450416565,
977
+ "logps/chosen": -663.63330078125,
978
+ "logps/rejected": -864.45703125,
979
+ "loss": 0.358,
980
+ "rewards/accuracies": 0.8500000238418579,
981
+ "rewards/chosen": -2.602560043334961,
982
+ "rewards/margins": 2.116223096847534,
983
+ "rewards/rejected": -4.718783378601074,
984
+ "step": 640
985
+ },
986
+ {
987
+ "epoch": 0.9021512838306731,
988
+ "grad_norm": 4.368284739209083,
989
+ "learning_rate": 1.4258820954781037e-08,
990
+ "logits/chosen": -0.4327312409877777,
991
+ "logits/rejected": -0.2682781219482422,
992
+ "logps/chosen": -700.5999145507812,
993
+ "logps/rejected": -931.2169799804688,
994
+ "loss": 0.3583,
995
+ "rewards/accuracies": 0.828125,
996
+ "rewards/chosen": -2.735988140106201,
997
+ "rewards/margins": 2.3255417346954346,
998
+ "rewards/rejected": -5.061530113220215,
999
+ "step": 650
1000
+ },
1001
+ {
1002
+ "epoch": 0.916030534351145,
1003
+ "grad_norm": 4.046149463634508,
1004
+ "learning_rate": 1.0502621921127774e-08,
1005
+ "logits/chosen": -0.43741026520729065,
1006
+ "logits/rejected": -0.2442634403705597,
1007
+ "logps/chosen": -671.8573608398438,
1008
+ "logps/rejected": -919.7029418945312,
1009
+ "loss": 0.3519,
1010
+ "rewards/accuracies": 0.846875011920929,
1011
+ "rewards/chosen": -2.6393163204193115,
1012
+ "rewards/margins": 2.4307634830474854,
1013
+ "rewards/rejected": -5.070079326629639,
1014
+ "step": 660
1015
+ },
1016
+ {
1017
+ "epoch": 0.9299097848716169,
1018
+ "grad_norm": 3.883568325725545,
1019
+ "learning_rate": 7.309237584595007e-09,
1020
+ "logits/chosen": -0.4095235764980316,
1021
+ "logits/rejected": -0.27290958166122437,
1022
+ "logps/chosen": -672.8810424804688,
1023
+ "logps/rejected": -929.7780151367188,
1024
+ "loss": 0.3433,
1025
+ "rewards/accuracies": 0.859375,
1026
+ "rewards/chosen": -2.6669869422912598,
1027
+ "rewards/margins": 2.416769504547119,
1028
+ "rewards/rejected": -5.083756446838379,
1029
+ "step": 670
1030
+ },
1031
+ {
1032
+ "epoch": 0.9437890353920888,
1033
+ "grad_norm": 5.273020682836859,
1034
+ "learning_rate": 4.6861723431538265e-09,
1035
+ "logits/chosen": -0.42638665437698364,
1036
+ "logits/rejected": -0.25011223554611206,
1037
+ "logps/chosen": -685.7161865234375,
1038
+ "logps/rejected": -900.3049926757812,
1039
+ "loss": 0.3666,
1040
+ "rewards/accuracies": 0.831250011920929,
1041
+ "rewards/chosen": -2.629455089569092,
1042
+ "rewards/margins": 2.211458206176758,
1043
+ "rewards/rejected": -4.84091329574585,
1044
+ "step": 680
1045
+ },
1046
+ {
1047
+ "epoch": 0.9576682859125607,
1048
+ "grad_norm": 5.214078177947886,
1049
+ "learning_rate": 2.639590354763882e-09,
1050
+ "logits/chosen": -0.41801247000694275,
1051
+ "logits/rejected": -0.29952767491340637,
1052
+ "logps/chosen": -651.67724609375,
1053
+ "logps/rejected": -901.2472534179688,
1054
+ "loss": 0.3506,
1055
+ "rewards/accuracies": 0.84375,
1056
+ "rewards/chosen": -2.55751371383667,
1057
+ "rewards/margins": 2.3576016426086426,
1058
+ "rewards/rejected": -4.9151153564453125,
1059
+ "step": 690
1060
+ },
1061
+ {
1062
+ "epoch": 0.9715475364330326,
1063
+ "grad_norm": 4.655188648736662,
1064
+ "learning_rate": 1.1743010517085427e-09,
1065
+ "logits/chosen": -0.4516308307647705,
1066
+ "logits/rejected": -0.2846836745738983,
1067
+ "logps/chosen": -696.3230590820312,
1068
+ "logps/rejected": -928.4945068359375,
1069
+ "loss": 0.3491,
1070
+ "rewards/accuracies": 0.831250011920929,
1071
+ "rewards/chosen": -2.7318358421325684,
1072
+ "rewards/margins": 2.3993608951568604,
1073
+ "rewards/rejected": -5.13119649887085,
1074
+ "step": 700
1075
+ },
1076
+ {
1077
+ "epoch": 0.9854267869535045,
1078
+ "grad_norm": 3.572705217532298,
1079
+ "learning_rate": 2.9374783851240923e-10,
1080
+ "logits/chosen": -0.39585572481155396,
1081
+ "logits/rejected": -0.26901108026504517,
1082
+ "logps/chosen": -655.4457397460938,
1083
+ "logps/rejected": -909.2194213867188,
1084
+ "loss": 0.3344,
1085
+ "rewards/accuracies": 0.84375,
1086
+ "rewards/chosen": -2.4239559173583984,
1087
+ "rewards/margins": 2.4643149375915527,
1088
+ "rewards/rejected": -4.888271331787109,
1089
+ "step": 710
1090
+ },
1091
+ {
1092
+ "epoch": 0.9993060374739764,
1093
+ "grad_norm": 4.876709666740222,
1094
+ "learning_rate": 0.0,
1095
+ "logits/chosen": -0.4101489186286926,
1096
+ "logits/rejected": -0.2496207058429718,
1097
+ "logps/chosen": -666.4955444335938,
1098
+ "logps/rejected": -903.6095581054688,
1099
+ "loss": 0.3544,
1100
+ "rewards/accuracies": 0.8218749761581421,
1101
+ "rewards/chosen": -2.61322283744812,
1102
+ "rewards/margins": 2.3339571952819824,
1103
+ "rewards/rejected": -4.947180271148682,
1104
+ "step": 720
1105
+ },
1106
+ {
1107
+ "epoch": 0.9993060374739764,
1108
+ "step": 720,
1109
  "total_flos": 0.0,
1110
+ "train_loss": 0.4493948830498589,
1111
+ "train_runtime": 20175.981,
1112
+ "train_samples_per_second": 9.142,
1113
+ "train_steps_per_second": 0.036
1114
  }
1115
  ],
1116
  "logging_steps": 10,
1117
+ "max_steps": 720,
1118
  "num_input_tokens_seen": 0,
1119
  "num_train_epochs": 1,
1120
  "save_steps": 100,