File size: 28,726 Bytes
ac046d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 100,
  "global_step": 478,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.0020920502092050207,
      "grad_norm": 9.085185309257627,
      "learning_rate": 1.0416666666666666e-08,
      "logits/chosen": -2.7662220001220703,
      "logits/rejected": -2.7178127765655518,
      "logps/chosen": -269.6776123046875,
      "logps/rejected": -360.6510314941406,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.02092050209205021,
      "grad_norm": 9.1082652647099,
      "learning_rate": 1.0416666666666667e-07,
      "logits/chosen": -2.5920939445495605,
      "logits/rejected": -2.5626003742218018,
      "logps/chosen": -264.7879638671875,
      "logps/rejected": -251.56271362304688,
      "loss": 0.6931,
      "rewards/accuracies": 0.4652777910232544,
      "rewards/chosen": -0.00015384703874588013,
      "rewards/margins": 0.00021662651852238923,
      "rewards/rejected": -0.00037047354271635413,
      "step": 10
    },
    {
      "epoch": 0.04184100418410042,
      "grad_norm": 8.403069183005119,
      "learning_rate": 2.0833333333333333e-07,
      "logits/chosen": -2.6531224250793457,
      "logits/rejected": -2.6051571369171143,
      "logps/chosen": -281.4217224121094,
      "logps/rejected": -296.8309020996094,
      "loss": 0.6923,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.0006401228019967675,
      "rewards/margins": 0.0008313875878229737,
      "rewards/rejected": -0.00019126484403386712,
      "step": 20
    },
    {
      "epoch": 0.06276150627615062,
      "grad_norm": 8.308611367087158,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": -2.6641345024108887,
      "logits/rejected": -2.591866970062256,
      "logps/chosen": -299.44696044921875,
      "logps/rejected": -263.68865966796875,
      "loss": 0.6886,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.012143267318606377,
      "rewards/margins": 0.009724361822009087,
      "rewards/rejected": 0.0024189057294279337,
      "step": 30
    },
    {
      "epoch": 0.08368200836820083,
      "grad_norm": 8.158936380689568,
      "learning_rate": 4.1666666666666667e-07,
      "logits/chosen": -2.556858539581299,
      "logits/rejected": -2.521756172180176,
      "logps/chosen": -263.13677978515625,
      "logps/rejected": -245.13369750976562,
      "loss": 0.6778,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": 0.039376698434352875,
      "rewards/margins": 0.04692060127854347,
      "rewards/rejected": -0.007543901912868023,
      "step": 40
    },
    {
      "epoch": 0.10460251046025104,
      "grad_norm": 8.442774298195335,
      "learning_rate": 4.999733114418725e-07,
      "logits/chosen": -2.4983766078948975,
      "logits/rejected": -2.458930730819702,
      "logps/chosen": -279.6597595214844,
      "logps/rejected": -297.4883117675781,
      "loss": 0.6615,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.0010005368385463953,
      "rewards/margins": 0.048170365393161774,
      "rewards/rejected": -0.04716982692480087,
      "step": 50
    },
    {
      "epoch": 0.12552301255230125,
      "grad_norm": 11.320329514836741,
      "learning_rate": 4.990398100856366e-07,
      "logits/chosen": -2.514209270477295,
      "logits/rejected": -2.4425783157348633,
      "logps/chosen": -273.6862487792969,
      "logps/rejected": -312.9122314453125,
      "loss": 0.6389,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.020198455080389977,
      "rewards/margins": 0.12713830173015594,
      "rewards/rejected": -0.14733675122261047,
      "step": 60
    },
    {
      "epoch": 0.14644351464435146,
      "grad_norm": 10.658450428160506,
      "learning_rate": 4.967775735898179e-07,
      "logits/chosen": -2.3650529384613037,
      "logits/rejected": -2.3627445697784424,
      "logps/chosen": -270.68896484375,
      "logps/rejected": -283.2526550292969,
      "loss": 0.6157,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.05055179074406624,
      "rewards/margins": 0.21437402069568634,
      "rewards/rejected": -0.26492583751678467,
      "step": 70
    },
    {
      "epoch": 0.16736401673640167,
      "grad_norm": 18.344270948626654,
      "learning_rate": 4.931986719649298e-07,
      "logits/chosen": -2.5614020824432373,
      "logits/rejected": -2.4784748554229736,
      "logps/chosen": -336.35430908203125,
      "logps/rejected": -303.9096374511719,
      "loss": 0.6025,
      "rewards/accuracies": 0.6875,
      "rewards/chosen": -0.1354917585849762,
      "rewards/margins": 0.26337119936943054,
      "rewards/rejected": -0.39886292815208435,
      "step": 80
    },
    {
      "epoch": 0.18828451882845187,
      "grad_norm": 19.12478751671118,
      "learning_rate": 4.883222001996351e-07,
      "logits/chosen": -2.134124755859375,
      "logits/rejected": -2.0814406871795654,
      "logps/chosen": -305.53082275390625,
      "logps/rejected": -345.31109619140625,
      "loss": 0.5741,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.4640178680419922,
      "rewards/margins": 0.46487826108932495,
      "rewards/rejected": -0.9288961291313171,
      "step": 90
    },
    {
      "epoch": 0.20920502092050208,
      "grad_norm": 17.356342239295923,
      "learning_rate": 4.821741763807186e-07,
      "logits/chosen": -1.3313448429107666,
      "logits/rejected": -1.1910110712051392,
      "logps/chosen": -308.7679138183594,
      "logps/rejected": -361.08013916015625,
      "loss": 0.5709,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.4839816987514496,
      "rewards/margins": 0.5560156106948853,
      "rewards/rejected": -1.0399973392486572,
      "step": 100
    },
    {
      "epoch": 0.20920502092050208,
      "eval_logits/chosen": -1.2465178966522217,
      "eval_logits/rejected": -1.0884699821472168,
      "eval_logps/chosen": -296.7220153808594,
      "eval_logps/rejected": -345.89581298828125,
      "eval_loss": 0.5765112042427063,
      "eval_rewards/accuracies": 0.73046875,
      "eval_rewards/chosen": -0.34146958589553833,
      "eval_rewards/margins": 0.49064701795578003,
      "eval_rewards/rejected": -0.8321166634559631,
      "eval_runtime": 40.3807,
      "eval_samples_per_second": 49.529,
      "eval_steps_per_second": 0.792,
      "step": 100
    },
    {
      "epoch": 0.2301255230125523,
      "grad_norm": 17.47373189781474,
      "learning_rate": 4.747874028753375e-07,
      "logits/chosen": -0.9215749502182007,
      "logits/rejected": -0.43307366967201233,
      "logps/chosen": -360.1551818847656,
      "logps/rejected": -366.24737548828125,
      "loss": 0.5682,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.5557556748390198,
      "rewards/margins": 0.528251588344574,
      "rewards/rejected": -1.0840072631835938,
      "step": 110
    },
    {
      "epoch": 0.2510460251046025,
      "grad_norm": 20.093025180061602,
      "learning_rate": 4.662012913161997e-07,
      "logits/chosen": -0.3439692556858063,
      "logits/rejected": -0.04505181312561035,
      "logps/chosen": -336.732177734375,
      "logps/rejected": -372.484619140625,
      "loss": 0.5504,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.5553595423698425,
      "rewards/margins": 0.5710638761520386,
      "rewards/rejected": -1.1264234781265259,
      "step": 120
    },
    {
      "epoch": 0.2719665271966527,
      "grad_norm": 23.028382893640803,
      "learning_rate": 4.5646165232345103e-07,
      "logits/chosen": 0.32941126823425293,
      "logits/rejected": 0.7188884019851685,
      "logps/chosen": -367.50909423828125,
      "logps/rejected": -419.0060119628906,
      "loss": 0.535,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.8557965159416199,
      "rewards/margins": 0.6497102379798889,
      "rewards/rejected": -1.5055067539215088,
      "step": 130
    },
    {
      "epoch": 0.2928870292887029,
      "grad_norm": 18.677447832810206,
      "learning_rate": 4.456204510851956e-07,
      "logits/chosen": 0.21762561798095703,
      "logits/rejected": 0.6349714398384094,
      "logps/chosen": -381.5950927734375,
      "logps/rejected": -431.7596740722656,
      "loss": 0.534,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.6563121676445007,
      "rewards/margins": 0.6548308730125427,
      "rewards/rejected": -1.311143159866333,
      "step": 140
    },
    {
      "epoch": 0.3138075313807531,
      "grad_norm": 27.137772429027354,
      "learning_rate": 4.337355301007335e-07,
      "logits/chosen": 0.3337453007698059,
      "logits/rejected": 0.7608418464660645,
      "logps/chosen": -367.8205261230469,
      "logps/rejected": -415.28045654296875,
      "loss": 0.5402,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.8457827568054199,
      "rewards/margins": 0.5450310707092285,
      "rewards/rejected": -1.3908138275146484,
      "step": 150
    },
    {
      "epoch": 0.33472803347280333,
      "grad_norm": 26.991586965942954,
      "learning_rate": 4.2087030056579986e-07,
      "logits/chosen": 0.3724555969238281,
      "logits/rejected": 1.1069540977478027,
      "logps/chosen": -358.0096130371094,
      "logps/rejected": -428.70361328125,
      "loss": 0.5324,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -0.8497047424316406,
      "rewards/margins": 0.8634451031684875,
      "rewards/rejected": -1.7131500244140625,
      "step": 160
    },
    {
      "epoch": 0.35564853556485354,
      "grad_norm": 25.592749003897193,
      "learning_rate": 4.070934040463998e-07,
      "logits/chosen": 0.5049823522567749,
      "logits/rejected": 0.9404481649398804,
      "logps/chosen": -348.8735046386719,
      "logps/rejected": -392.8125915527344,
      "loss": 0.5363,
      "rewards/accuracies": 0.699999988079071,
      "rewards/chosen": -0.9956283569335938,
      "rewards/margins": 0.620826244354248,
      "rewards/rejected": -1.6164544820785522,
      "step": 170
    },
    {
      "epoch": 0.37656903765690375,
      "grad_norm": 22.372745978564954,
      "learning_rate": 3.9247834624635404e-07,
      "logits/chosen": 0.6127285361289978,
      "logits/rejected": 1.2147839069366455,
      "logps/chosen": -340.34906005859375,
      "logps/rejected": -381.84661865234375,
      "loss": 0.5064,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.8403685688972473,
      "rewards/margins": 0.7025831937789917,
      "rewards/rejected": -1.5429518222808838,
      "step": 180
    },
    {
      "epoch": 0.39748953974895396,
      "grad_norm": 29.66314474064595,
      "learning_rate": 3.7710310482256523e-07,
      "logits/chosen": 0.3294413983821869,
      "logits/rejected": 0.9074909090995789,
      "logps/chosen": -347.7164611816406,
      "logps/rejected": -403.9298400878906,
      "loss": 0.5209,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.7645334601402283,
      "rewards/margins": 0.67705237865448,
      "rewards/rejected": -1.441585898399353,
      "step": 190
    },
    {
      "epoch": 0.41841004184100417,
      "grad_norm": 58.26403433971581,
      "learning_rate": 3.610497133404795e-07,
      "logits/chosen": 0.549667477607727,
      "logits/rejected": 1.1599102020263672,
      "logps/chosen": -346.0694580078125,
      "logps/rejected": -415.4115295410156,
      "loss": 0.5427,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.8615196943283081,
      "rewards/margins": 0.7718194723129272,
      "rewards/rejected": -1.6333389282226562,
      "step": 200
    },
    {
      "epoch": 0.41841004184100417,
      "eval_logits/chosen": 0.18153175711631775,
      "eval_logits/rejected": 0.9058605432510376,
      "eval_logps/chosen": -336.09857177734375,
      "eval_logps/rejected": -416.4310607910156,
      "eval_loss": 0.5255736708641052,
      "eval_rewards/accuracies": 0.76953125,
      "eval_rewards/chosen": -0.7352346181869507,
      "eval_rewards/margins": 0.8022347688674927,
      "eval_rewards/rejected": -1.5374693870544434,
      "eval_runtime": 40.3301,
      "eval_samples_per_second": 49.591,
      "eval_steps_per_second": 0.793,
      "step": 200
    },
    {
      "epoch": 0.4393305439330544,
      "grad_norm": 23.975315777665053,
      "learning_rate": 3.4440382358952115e-07,
      "logits/chosen": 0.23435361683368683,
      "logits/rejected": 1.2575860023498535,
      "logps/chosen": -365.595703125,
      "logps/rejected": -398.31182861328125,
      "loss": 0.5374,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.759240448474884,
      "rewards/margins": 0.711215615272522,
      "rewards/rejected": -1.4704558849334717,
      "step": 210
    },
    {
      "epoch": 0.4602510460251046,
      "grad_norm": 33.35111797134312,
      "learning_rate": 3.272542485937368e-07,
      "logits/chosen": 0.5184121131896973,
      "logits/rejected": 1.4770405292510986,
      "logps/chosen": -347.60296630859375,
      "logps/rejected": -407.5792541503906,
      "loss": 0.5233,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.8224745988845825,
      "rewards/margins": 0.7966977953910828,
      "rewards/rejected": -1.61917245388031,
      "step": 220
    },
    {
      "epoch": 0.4811715481171548,
      "grad_norm": 27.065916482787443,
      "learning_rate": 3.096924887558854e-07,
      "logits/chosen": 0.49935898184776306,
      "logits/rejected": 1.3911898136138916,
      "logps/chosen": -331.6248474121094,
      "logps/rejected": -421.27374267578125,
      "loss": 0.536,
      "rewards/accuracies": 0.7437499761581421,
      "rewards/chosen": -0.8267892003059387,
      "rewards/margins": 0.9082108736038208,
      "rewards/rejected": -1.7350002527236938,
      "step": 230
    },
    {
      "epoch": 0.502092050209205,
      "grad_norm": 22.856393344097995,
      "learning_rate": 2.9181224366319943e-07,
      "logits/chosen": 0.2771614193916321,
      "logits/rejected": 0.9905085563659668,
      "logps/chosen": -338.65069580078125,
      "logps/rejected": -394.545654296875,
      "loss": 0.5024,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.7491301894187927,
      "rewards/margins": 0.7059187293052673,
      "rewards/rejected": -1.4550487995147705,
      "step": 240
    },
    {
      "epoch": 0.5230125523012552,
      "grad_norm": 26.034393008774543,
      "learning_rate": 2.7370891215954565e-07,
      "logits/chosen": 0.8687924146652222,
      "logits/rejected": 1.8204635381698608,
      "logps/chosen": -394.45635986328125,
      "logps/rejected": -441.63739013671875,
      "loss": 0.4997,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.9626559019088745,
      "rewards/margins": 0.858391284942627,
      "rewards/rejected": -1.821047067642212,
      "step": 250
    },
    {
      "epoch": 0.5439330543933054,
      "grad_norm": 32.298336541514864,
      "learning_rate": 2.55479083351317e-07,
      "logits/chosen": 0.8348854184150696,
      "logits/rejected": 1.8038705587387085,
      "logps/chosen": -395.5223693847656,
      "logps/rejected": -442.0123596191406,
      "loss": 0.5091,
      "rewards/accuracies": 0.737500011920929,
      "rewards/chosen": -1.0121302604675293,
      "rewards/margins": 0.8687250018119812,
      "rewards/rejected": -1.8808553218841553,
      "step": 260
    },
    {
      "epoch": 0.5648535564853556,
      "grad_norm": 23.028145780758834,
      "learning_rate": 2.3722002126275822e-07,
      "logits/chosen": 0.5836859941482544,
      "logits/rejected": 1.3243392705917358,
      "logps/chosen": -365.3858337402344,
      "logps/rejected": -417.47735595703125,
      "loss": 0.5181,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.8838497996330261,
      "rewards/margins": 0.7144038081169128,
      "rewards/rejected": -1.5982534885406494,
      "step": 270
    },
    {
      "epoch": 0.5857740585774058,
      "grad_norm": 24.69890342328338,
      "learning_rate": 2.19029145890313e-07,
      "logits/chosen": 0.8636754155158997,
      "logits/rejected": 1.8474575281143188,
      "logps/chosen": -362.3246154785156,
      "logps/rejected": -422.9788513183594,
      "loss": 0.5317,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -1.0066673755645752,
      "rewards/margins": 0.8012599945068359,
      "rewards/rejected": -1.8079273700714111,
      "step": 280
    },
    {
      "epoch": 0.606694560669456,
      "grad_norm": 23.579877718409463,
      "learning_rate": 2.0100351342479216e-07,
      "logits/chosen": 0.9099249839782715,
      "logits/rejected": 1.71127188205719,
      "logps/chosen": -365.8359375,
      "logps/rejected": -429.7777404785156,
      "loss": 0.5117,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -1.1478241682052612,
      "rewards/margins": 0.7301602363586426,
      "rewards/rejected": -1.8779844045639038,
      "step": 290
    },
    {
      "epoch": 0.6276150627615062,
      "grad_norm": 28.794362121773794,
      "learning_rate": 1.8323929841460178e-07,
      "logits/chosen": 0.5150594115257263,
      "logits/rejected": 1.7005367279052734,
      "logps/chosen": -400.82708740234375,
      "logps/rejected": -440.55181884765625,
      "loss": 0.4892,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.9979560971260071,
      "rewards/margins": 0.8070657849311829,
      "rewards/rejected": -1.80502188205719,
      "step": 300
    },
    {
      "epoch": 0.6276150627615062,
      "eval_logits/chosen": 0.5828189253807068,
      "eval_logits/rejected": 1.3891677856445312,
      "eval_logps/chosen": -351.67193603515625,
      "eval_logps/rejected": -444.7821960449219,
      "eval_loss": 0.5082466006278992,
      "eval_rewards/accuracies": 0.76953125,
      "eval_rewards/chosen": -0.8909686207771301,
      "eval_rewards/margins": 0.9300119280815125,
      "eval_rewards/rejected": -1.820980429649353,
      "eval_runtime": 40.346,
      "eval_samples_per_second": 49.571,
      "eval_steps_per_second": 0.793,
      "step": 300
    },
    {
      "epoch": 0.6485355648535565,
      "grad_norm": 33.12731092406144,
      "learning_rate": 1.6583128063291573e-07,
      "logits/chosen": 0.7290142774581909,
      "logits/rejected": 1.4886902570724487,
      "logps/chosen": -397.9615478515625,
      "logps/rejected": -434.9144592285156,
      "loss": 0.4935,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -1.0262950658798218,
      "rewards/margins": 0.7377363443374634,
      "rewards/rejected": -1.764031171798706,
      "step": 310
    },
    {
      "epoch": 0.6694560669456067,
      "grad_norm": 33.809250573756664,
      "learning_rate": 1.488723393865766e-07,
      "logits/chosen": 0.8037419319152832,
      "logits/rejected": 1.8283920288085938,
      "logps/chosen": -393.73876953125,
      "logps/rejected": -423.55279541015625,
      "loss": 0.4925,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -1.0004233121871948,
      "rewards/margins": 0.7867400050163269,
      "rewards/rejected": -1.7871633768081665,
      "step": 320
    },
    {
      "epoch": 0.6903765690376569,
      "grad_norm": 29.294631844662618,
      "learning_rate": 1.3245295796480788e-07,
      "logits/chosen": 0.5138927698135376,
      "logits/rejected": 1.4465028047561646,
      "logps/chosen": -355.19622802734375,
      "logps/rejected": -424.10052490234375,
      "loss": 0.4984,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.9297013282775879,
      "rewards/margins": 0.7162156701087952,
      "rewards/rejected": -1.6459171772003174,
      "step": 330
    },
    {
      "epoch": 0.7112970711297071,
      "grad_norm": 31.084849867717274,
      "learning_rate": 1.1666074087171627e-07,
      "logits/chosen": 0.8492459058761597,
      "logits/rejected": 1.7086740732192993,
      "logps/chosen": -376.3741455078125,
      "logps/rejected": -468.2669982910156,
      "loss": 0.5005,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.8633196949958801,
      "rewards/margins": 1.0182921886444092,
      "rewards/rejected": -1.8816118240356445,
      "step": 340
    },
    {
      "epoch": 0.7322175732217573,
      "grad_norm": 24.550583619455697,
      "learning_rate": 1.0157994641835734e-07,
      "logits/chosen": 0.9272977113723755,
      "logits/rejected": 1.8347976207733154,
      "logps/chosen": -351.08538818359375,
      "logps/rejected": -420.5586853027344,
      "loss": 0.4773,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.9292421340942383,
      "rewards/margins": 0.8692865371704102,
      "rewards/rejected": -1.7985286712646484,
      "step": 350
    },
    {
      "epoch": 0.7531380753138075,
      "grad_norm": 23.786028174418703,
      "learning_rate": 8.729103716819111e-08,
      "logits/chosen": 0.772596001625061,
      "logits/rejected": 1.983689308166504,
      "logps/chosen": -400.6702575683594,
      "logps/rejected": -450.78521728515625,
      "loss": 0.511,
      "rewards/accuracies": 0.762499988079071,
      "rewards/chosen": -0.9801367521286011,
      "rewards/margins": 0.9152604341506958,
      "rewards/rejected": -1.8953969478607178,
      "step": 360
    },
    {
      "epoch": 0.7740585774058577,
      "grad_norm": 28.031131650473906,
      "learning_rate": 7.387025063449081e-08,
      "logits/chosen": 1.3180190324783325,
      "logits/rejected": 2.0549569129943848,
      "logps/chosen": -387.903076171875,
      "logps/rejected": -421.43560791015625,
      "loss": 0.5062,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -1.1257370710372925,
      "rewards/margins": 0.7061508893966675,
      "rewards/rejected": -1.83188796043396,
      "step": 370
    },
    {
      "epoch": 0.7949790794979079,
      "grad_norm": 25.874688989134494,
      "learning_rate": 6.138919252022435e-08,
      "logits/chosen": 1.4991798400878906,
      "logits/rejected": 2.2097418308258057,
      "logps/chosen": -361.59136962890625,
      "logps/rejected": -474.5025939941406,
      "loss": 0.5045,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -1.2211620807647705,
      "rewards/margins": 0.9370111227035522,
      "rewards/rejected": -2.1581733226776123,
      "step": 380
    },
    {
      "epoch": 0.8158995815899581,
      "grad_norm": 25.013956798098185,
      "learning_rate": 4.991445467064689e-08,
      "logits/chosen": 0.9765421152114868,
      "logits/rejected": 1.5557312965393066,
      "logps/chosen": -405.4990234375,
      "logps/rejected": -466.1669006347656,
      "loss": 0.4919,
      "rewards/accuracies": 0.731249988079071,
      "rewards/chosen": -0.9986322522163391,
      "rewards/margins": 0.8234176635742188,
      "rewards/rejected": -1.822049856185913,
      "step": 390
    },
    {
      "epoch": 0.8368200836820083,
      "grad_norm": 25.325590503420294,
      "learning_rate": 3.9507259776993954e-08,
      "logits/chosen": 1.118195652961731,
      "logits/rejected": 2.1151347160339355,
      "logps/chosen": -369.673095703125,
      "logps/rejected": -455.16949462890625,
      "loss": 0.5037,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.991197943687439,
      "rewards/margins": 0.8906410336494446,
      "rewards/rejected": -1.8818390369415283,
      "step": 400
    },
    {
      "epoch": 0.8368200836820083,
      "eval_logits/chosen": 0.7959027290344238,
      "eval_logits/rejected": 1.6106007099151611,
      "eval_logps/chosen": -346.2210998535156,
      "eval_logps/rejected": -441.496826171875,
      "eval_loss": 0.5030511617660522,
      "eval_rewards/accuracies": 0.78515625,
      "eval_rewards/chosen": -0.8364605903625488,
      "eval_rewards/margins": 0.9516662955284119,
      "eval_rewards/rejected": -1.788127064704895,
      "eval_runtime": 40.3046,
      "eval_samples_per_second": 49.622,
      "eval_steps_per_second": 0.794,
      "step": 400
    },
    {
      "epoch": 0.8577405857740585,
      "grad_norm": 25.826488801828866,
      "learning_rate": 3.022313472693447e-08,
      "logits/chosen": 0.8946113586425781,
      "logits/rejected": 2.032439708709717,
      "logps/chosen": -391.67340087890625,
      "logps/rejected": -448.6036682128906,
      "loss": 0.5091,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -0.9272828102111816,
      "rewards/margins": 0.8706603050231934,
      "rewards/rejected": -1.797943115234375,
      "step": 410
    },
    {
      "epoch": 0.8786610878661087,
      "grad_norm": 24.611045682765628,
      "learning_rate": 2.2111614344599684e-08,
      "logits/chosen": 0.7512882947921753,
      "logits/rejected": 1.5819581747055054,
      "logps/chosen": -391.5676574707031,
      "logps/rejected": -447.89959716796875,
      "loss": 0.4949,
      "rewards/accuracies": 0.6812499761581421,
      "rewards/chosen": -0.9452153444290161,
      "rewards/margins": 0.789088249206543,
      "rewards/rejected": -1.7343034744262695,
      "step": 420
    },
    {
      "epoch": 0.899581589958159,
      "grad_norm": 24.57477424595926,
      "learning_rate": 1.521597710086439e-08,
      "logits/chosen": 1.212896704673767,
      "logits/rejected": 1.9935243129730225,
      "logps/chosen": -380.76373291015625,
      "logps/rejected": -443.2093811035156,
      "loss": 0.4764,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -1.0168489217758179,
      "rewards/margins": 0.8378502130508423,
      "rewards/rejected": -1.8546991348266602,
      "step": 430
    },
    {
      "epoch": 0.9205020920502092,
      "grad_norm": 29.526622672975254,
      "learning_rate": 9.57301420397924e-09,
      "logits/chosen": 0.9320109486579895,
      "logits/rejected": 1.8708562850952148,
      "logps/chosen": -375.6236877441406,
      "logps/rejected": -440.24822998046875,
      "loss": 0.4967,
      "rewards/accuracies": 0.768750011920929,
      "rewards/chosen": -0.9130943417549133,
      "rewards/margins": 0.7984671592712402,
      "rewards/rejected": -1.7115614414215088,
      "step": 440
    },
    {
      "epoch": 0.9414225941422594,
      "grad_norm": 25.409754325281586,
      "learning_rate": 5.212833302556258e-09,
      "logits/chosen": 0.8921103477478027,
      "logits/rejected": 1.6804935932159424,
      "logps/chosen": -392.9017333984375,
      "logps/rejected": -487.7478942871094,
      "loss": 0.4971,
      "rewards/accuracies": 0.706250011920929,
      "rewards/chosen": -0.9917811155319214,
      "rewards/margins": 0.8150280117988586,
      "rewards/rejected": -1.8068091869354248,
      "step": 450
    },
    {
      "epoch": 0.9623430962343096,
      "grad_norm": 27.931385213072396,
      "learning_rate": 2.158697848236607e-09,
      "logits/chosen": 1.0774990320205688,
      "logits/rejected": 1.940333366394043,
      "logps/chosen": -366.88958740234375,
      "logps/rejected": -417.73590087890625,
      "loss": 0.5033,
      "rewards/accuracies": 0.7124999761581421,
      "rewards/chosen": -0.9498540759086609,
      "rewards/margins": 0.801009476184845,
      "rewards/rejected": -1.7508634328842163,
      "step": 460
    },
    {
      "epoch": 0.9832635983263598,
      "grad_norm": 26.97146711470604,
      "learning_rate": 4.269029751107489e-10,
      "logits/chosen": 0.9106823801994324,
      "logits/rejected": 1.9309165477752686,
      "logps/chosen": -368.4771728515625,
      "logps/rejected": -455.9451599121094,
      "loss": 0.4933,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -0.8981329202651978,
      "rewards/margins": 0.8376766443252563,
      "rewards/rejected": -1.735809564590454,
      "step": 470
    },
    {
      "epoch": 1.0,
      "step": 478,
      "total_flos": 0.0,
      "train_loss": 0.5395244165444474,
      "train_runtime": 4445.7731,
      "train_samples_per_second": 13.751,
      "train_steps_per_second": 0.108
    }
  ],
  "logging_steps": 10,
  "max_steps": 478,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}