File size: 3,257 Bytes
d0af8c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 500,
  "global_step": 48,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.020833333333333332,
      "grad_norm": 6.704164995417714,
      "learning_rate": 1e-07,
      "logits/chosen": -2.8245413303375244,
      "logits/rejected": -2.8128416538238525,
      "logps/chosen": -277.95263671875,
      "logps/rejected": -263.10675048828125,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.20833333333333334,
      "grad_norm": 8.760615119514256,
      "learning_rate": 4.83504027183137e-07,
      "logits/chosen": -2.7760632038116455,
      "logits/rejected": -2.7525925636291504,
      "logps/chosen": -253.4949493408203,
      "logps/rejected": -221.0575408935547,
      "loss": 0.6908,
      "rewards/accuracies": 0.5694444179534912,
      "rewards/chosen": 0.005031693261116743,
      "rewards/margins": 0.005800557788461447,
      "rewards/rejected": -0.0007688638288527727,
      "step": 10
    },
    {
      "epoch": 0.4166666666666667,
      "grad_norm": 7.288463410297658,
      "learning_rate": 3.643105808261596e-07,
      "logits/chosen": -2.808706045150757,
      "logits/rejected": -2.7749691009521484,
      "logps/chosen": -272.81695556640625,
      "logps/rejected": -244.42822265625,
      "loss": 0.6733,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": 0.03518055006861687,
      "rewards/margins": 0.04298284649848938,
      "rewards/rejected": -0.007802293635904789,
      "step": 20
    },
    {
      "epoch": 0.625,
      "grad_norm": 7.409051370210203,
      "learning_rate": 1.8676665440207977e-07,
      "logits/chosen": -2.757873058319092,
      "logits/rejected": -2.7445333003997803,
      "logps/chosen": -285.27813720703125,
      "logps/rejected": -257.22314453125,
      "loss": 0.6524,
      "rewards/accuracies": 0.6343749761581421,
      "rewards/chosen": -0.03371132165193558,
      "rewards/margins": 0.0908183678984642,
      "rewards/rejected": -0.12452969700098038,
      "step": 30
    },
    {
      "epoch": 0.8333333333333334,
      "grad_norm": 8.006923126768978,
      "learning_rate": 4.1500545527530544e-08,
      "logits/chosen": -2.793574094772339,
      "logits/rejected": -2.7703700065612793,
      "logps/chosen": -300.51007080078125,
      "logps/rejected": -290.48675537109375,
      "loss": 0.634,
      "rewards/accuracies": 0.684374988079071,
      "rewards/chosen": -0.0866335853934288,
      "rewards/margins": 0.15011821687221527,
      "rewards/rejected": -0.23675179481506348,
      "step": 40
    },
    {
      "epoch": 1.0,
      "step": 48,
      "total_flos": 0.0,
      "train_loss": 0.6576150357723236,
      "train_runtime": 1338.4968,
      "train_samples_per_second": 9.135,
      "train_steps_per_second": 0.036
    }
  ],
  "logging_steps": 10,
  "max_steps": 48,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}