zerozeroz commited on
Commit
a90397d
·
verified ·
1 Parent(s): 9029ff2

Model save

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: CodeLlama-7b-hf_1000rl_cpp
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - grpo
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for CodeLlama-7b-hf_1000rl_cpp
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="zerozeroz/CodeLlama-7b-hf_1000rl_cpp", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+
30
+
31
+
32
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.14.0
37
+ - Transformers: 4.48.1
38
+ - Pytorch: 2.5.1+cu121
39
+ - Datasets: 3.1.0
40
+ - Tokenizers: 0.21.0
41
+
42
+ ## Citations
43
+
44
+ Cite GRPO as:
45
+
46
+ ```bibtex
47
+ @article{zhihong2024deepseekmath,
48
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
49
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
50
+ year = 2024,
51
+ eprint = {arXiv:2402.03300},
52
+ }
53
+
54
+ ```
55
+
56
+ Cite TRL as:
57
+
58
+ ```bibtex
59
+ @misc{vonwerra2022trl,
60
+ title = {{TRL: Transformer Reinforcement Learning}},
61
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
62
+ year = 2020,
63
+ journal = {GitHub repository},
64
+ publisher = {GitHub},
65
+ howpublished = {\url{https://github.com/huggingface/trl}}
66
+ }
67
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 6.70051409522318e-05,
4
+ "train_runtime": 7347.2678,
5
+ "train_samples": 338,
6
+ "train_samples_per_second": 0.138,
7
+ "train_steps_per_second": 0.023
8
+ }
config.json CHANGED
@@ -25,6 +25,6 @@
25
  "tie_word_embeddings": false,
26
  "torch_dtype": "bfloat16",
27
  "transformers_version": "4.48.1",
28
- "use_cache": false,
29
  "vocab_size": 32016
30
  }
 
25
  "tie_word_embeddings": false,
26
  "torch_dtype": "bfloat16",
27
  "transformers_version": "4.48.1",
28
+ "use_cache": true,
29
  "vocab_size": 32016
30
  }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.48.1"
6
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 6.70051409522318e-05,
4
+ "train_runtime": 7347.2678,
5
+ "train_samples": 338,
6
+ "train_samples_per_second": 0.138,
7
+ "train_steps_per_second": 0.023
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,2239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9734513274336285,
5
+ "eval_steps": 500,
6
+ "global_step": 169,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 186.12500381469727,
13
+ "epoch": 0.017699115044247787,
14
+ "grad_norm": 2.5752858647583037,
15
+ "kl": 0.0,
16
+ "learning_rate": 8.333333333333333e-08,
17
+ "loss": 0.0,
18
+ "reward": 0.48444899916648865,
19
+ "reward_std": 0.46400773525238037,
20
+ "rewards/correct_code_reward_func": 0.3125,
21
+ "rewards/len_reward_func": 0.17194896191358566,
22
+ "step": 1
23
+ },
24
+ {
25
+ "completion_length": 164.375,
26
+ "epoch": 0.035398230088495575,
27
+ "grad_norm": 10.223894549317365,
28
+ "kl": 0.0,
29
+ "learning_rate": 1.6666666666666665e-07,
30
+ "loss": 0.0,
31
+ "reward": 0.47450903058052063,
32
+ "reward_std": 0.40167468786239624,
33
+ "rewards/correct_code_reward_func": 0.2500000149011612,
34
+ "rewards/len_reward_func": 0.22450900077819824,
35
+ "step": 2
36
+ },
37
+ {
38
+ "completion_length": 164.4791717529297,
39
+ "epoch": 0.05309734513274336,
40
+ "grad_norm": 2.2899995314753827,
41
+ "kl": 4.023313522338867e-05,
42
+ "learning_rate": 2.5e-07,
43
+ "loss": 0.0,
44
+ "reward": 0.37769100069999695,
45
+ "reward_std": 0.5044284015893936,
46
+ "rewards/correct_code_reward_func": 0.2083333432674408,
47
+ "rewards/len_reward_func": 0.16935766488313675,
48
+ "step": 3
49
+ },
50
+ {
51
+ "completion_length": 190.12500762939453,
52
+ "epoch": 0.07079646017699115,
53
+ "grad_norm": 6.009103649538728,
54
+ "kl": 8.815526962280273e-05,
55
+ "learning_rate": 3.333333333333333e-07,
56
+ "loss": 0.0,
57
+ "reward": 0.37105801701545715,
58
+ "reward_std": 0.47604119777679443,
59
+ "rewards/correct_code_reward_func": 0.2083333395421505,
60
+ "rewards/len_reward_func": 0.16272468864917755,
61
+ "step": 4
62
+ },
63
+ {
64
+ "completion_length": 126.10416793823242,
65
+ "epoch": 0.08849557522123894,
66
+ "grad_norm": 2.3931977443517964,
67
+ "kl": 3.49879264831543e-05,
68
+ "learning_rate": 4.1666666666666667e-07,
69
+ "loss": 0.0,
70
+ "reward": 0.5486156344413757,
71
+ "reward_std": 0.40741610527038574,
72
+ "rewards/correct_code_reward_func": 0.3958333358168602,
73
+ "rewards/len_reward_func": 0.15278229862451553,
74
+ "step": 5
75
+ },
76
+ {
77
+ "completion_length": 166.45833587646484,
78
+ "epoch": 0.10619469026548672,
79
+ "grad_norm": 1.5486991749328145,
80
+ "kl": 5.221366882324219e-05,
81
+ "learning_rate": 5e-07,
82
+ "loss": 0.0,
83
+ "reward": 0.5515251755714417,
84
+ "reward_std": 0.47483181953430176,
85
+ "rewards/correct_code_reward_func": 0.2916666716337204,
86
+ "rewards/len_reward_func": 0.25985850393772125,
87
+ "step": 6
88
+ },
89
+ {
90
+ "completion_length": 161.58333587646484,
91
+ "epoch": 0.12389380530973451,
92
+ "grad_norm": 5.714527026195051,
93
+ "kl": 5.5789947509765625e-05,
94
+ "learning_rate": 4.999535676028337e-07,
95
+ "loss": 0.0,
96
+ "reward": 0.46977874636650085,
97
+ "reward_std": 0.461082324385643,
98
+ "rewards/correct_code_reward_func": 0.229166679084301,
99
+ "rewards/len_reward_func": 0.24061208218336105,
100
+ "step": 7
101
+ },
102
+ {
103
+ "completion_length": 161.89583587646484,
104
+ "epoch": 0.1415929203539823,
105
+ "grad_norm": 3.6484747523940286,
106
+ "kl": 0.00010442733764648438,
107
+ "learning_rate": 4.998142876590749e-07,
108
+ "loss": 0.0,
109
+ "reward": 0.2943377196788788,
110
+ "reward_std": 0.3771718442440033,
111
+ "rewards/correct_code_reward_func": 0.1666666679084301,
112
+ "rewards/len_reward_func": 0.12767105549573898,
113
+ "step": 8
114
+ },
115
+ {
116
+ "completion_length": 147.7916717529297,
117
+ "epoch": 0.1592920353982301,
118
+ "grad_norm": 14.356223088928283,
119
+ "kl": 0.00017464160919189453,
120
+ "learning_rate": 4.99582211905537e-07,
121
+ "loss": 0.0,
122
+ "reward": 0.6274224072694778,
123
+ "reward_std": 0.43979279696941376,
124
+ "rewards/correct_code_reward_func": 0.4166666865348816,
125
+ "rewards/len_reward_func": 0.21075571328401566,
126
+ "step": 9
127
+ },
128
+ {
129
+ "completion_length": 188.2916717529297,
130
+ "epoch": 0.17699115044247787,
131
+ "grad_norm": 3.0299854001506494,
132
+ "kl": 8.487701416015625e-05,
133
+ "learning_rate": 4.992574265488882e-07,
134
+ "loss": 0.0,
135
+ "reward": 0.502624899148941,
136
+ "reward_std": 0.35237132012844086,
137
+ "rewards/correct_code_reward_func": 0.3125000149011612,
138
+ "rewards/len_reward_func": 0.19012490659952164,
139
+ "step": 10
140
+ },
141
+ {
142
+ "completion_length": 109.1875,
143
+ "epoch": 0.19469026548672566,
144
+ "grad_norm": 1.8891984084011832,
145
+ "kl": 0.00060272216796875,
146
+ "learning_rate": 4.988400522336303e-07,
147
+ "loss": 0.0,
148
+ "reward": 0.6938449144363403,
149
+ "reward_std": 0.5790197551250458,
150
+ "rewards/correct_code_reward_func": 0.5208333432674408,
151
+ "rewards/len_reward_func": 0.1730116307735443,
152
+ "step": 11
153
+ },
154
+ {
155
+ "completion_length": 171.2916717529297,
156
+ "epoch": 0.21238938053097345,
157
+ "grad_norm": 2.7849518039157664,
158
+ "kl": 0.0006923675537109375,
159
+ "learning_rate": 4.983302439972828e-07,
160
+ "loss": 0.0,
161
+ "reward": 0.4239259362220764,
162
+ "reward_std": 0.48799367249011993,
163
+ "rewards/correct_code_reward_func": 0.20833333395421505,
164
+ "rewards/len_reward_func": 0.21559256315231323,
165
+ "step": 12
166
+ },
167
+ {
168
+ "completion_length": 196.2291717529297,
169
+ "epoch": 0.23008849557522124,
170
+ "grad_norm": 1.392005725111761,
171
+ "kl": 0.000652313232421875,
172
+ "learning_rate": 4.977281912127938e-07,
173
+ "loss": 0.0,
174
+ "reward": 0.5957696735858917,
175
+ "reward_std": 0.5921532511711121,
176
+ "rewards/correct_code_reward_func": 0.4166666716337204,
177
+ "rewards/len_reward_func": 0.17910300940275192,
178
+ "step": 13
179
+ },
180
+ {
181
+ "completion_length": 158.64583587646484,
182
+ "epoch": 0.24778761061946902,
183
+ "grad_norm": 1.5701172166666861,
184
+ "kl": 0.000614166259765625,
185
+ "learning_rate": 4.970341175181955e-07,
186
+ "loss": 0.0,
187
+ "reward": 0.46809761226177216,
188
+ "reward_std": 0.4979168623685837,
189
+ "rewards/correct_code_reward_func": 0.2708333432674408,
190
+ "rewards/len_reward_func": 0.19726425409317017,
191
+ "step": 14
192
+ },
193
+ {
194
+ "completion_length": 147.1041717529297,
195
+ "epoch": 0.26548672566371684,
196
+ "grad_norm": 2.032633618646926,
197
+ "kl": 0.0015106201171875,
198
+ "learning_rate": 4.962482807335314e-07,
199
+ "loss": 0.0,
200
+ "reward": 0.5865300893783569,
201
+ "reward_std": 0.5591276288032532,
202
+ "rewards/correct_code_reward_func": 0.4166666716337204,
203
+ "rewards/len_reward_func": 0.16986342146992683,
204
+ "step": 15
205
+ },
206
+ {
207
+ "completion_length": 177.9791717529297,
208
+ "epoch": 0.2831858407079646,
209
+ "grad_norm": 53.72073405864756,
210
+ "kl": 0.018054962158203125,
211
+ "learning_rate": 4.95370972765087e-07,
212
+ "loss": 0.0,
213
+ "reward": 0.8554503321647644,
214
+ "reward_std": 0.48426124453544617,
215
+ "rewards/correct_code_reward_func": 0.5625000298023224,
216
+ "rewards/len_reward_func": 0.2929503321647644,
217
+ "step": 16
218
+ },
219
+ {
220
+ "completion_length": 187.31250762939453,
221
+ "epoch": 0.3008849557522124,
222
+ "grad_norm": 2.4065750408913034,
223
+ "kl": 0.001964569091796875,
224
+ "learning_rate": 4.944025194969586e-07,
225
+ "loss": 0.0,
226
+ "reward": 0.36077703535556793,
227
+ "reward_std": 0.35280461609363556,
228
+ "rewards/correct_code_reward_func": 0.1041666716337204,
229
+ "rewards/len_reward_func": 0.25661035627126694,
230
+ "step": 17
231
+ },
232
+ {
233
+ "completion_length": 186.14583587646484,
234
+ "epoch": 0.3185840707964602,
235
+ "grad_norm": 20.459541697505053,
236
+ "kl": 0.021259307861328125,
237
+ "learning_rate": 4.933432806700003e-07,
238
+ "loss": 0.0,
239
+ "reward": 0.5004578679800034,
240
+ "reward_std": 0.42624031007289886,
241
+ "rewards/correct_code_reward_func": 0.2708333432674408,
242
+ "rewards/len_reward_func": 0.22962453216314316,
243
+ "step": 18
244
+ },
245
+ {
246
+ "completion_length": 184.2916717529297,
247
+ "epoch": 0.336283185840708,
248
+ "grad_norm": 3.4167543405430436,
249
+ "kl": 0.008358001708984375,
250
+ "learning_rate": 4.921936497481956e-07,
251
+ "loss": 0.0,
252
+ "reward": 0.3476429730653763,
253
+ "reward_std": 0.43119728565216064,
254
+ "rewards/correct_code_reward_func": 0.0833333358168602,
255
+ "rewards/len_reward_func": 0.2643096297979355,
256
+ "step": 19
257
+ },
258
+ {
259
+ "completion_length": 179.87500762939453,
260
+ "epoch": 0.35398230088495575,
261
+ "grad_norm": 2.8250290981037574,
262
+ "kl": 0.0052490234375,
263
+ "learning_rate": 4.909540537725006e-07,
264
+ "loss": 0.0,
265
+ "reward": 0.476805180311203,
266
+ "reward_std": 0.36166805028915405,
267
+ "rewards/correct_code_reward_func": 0.2708333432674408,
268
+ "rewards/len_reward_func": 0.2059718370437622,
269
+ "step": 20
270
+ },
271
+ {
272
+ "completion_length": 148.9375,
273
+ "epoch": 0.37168141592920356,
274
+ "grad_norm": 1.9459373869466972,
275
+ "kl": 0.002933502197265625,
276
+ "learning_rate": 4.896249532022171e-07,
277
+ "loss": 0.0,
278
+ "reward": 0.5851579010486603,
279
+ "reward_std": 0.3626042455434799,
280
+ "rewards/correct_code_reward_func": 0.3333333358168602,
281
+ "rewards/len_reward_func": 0.2518245801329613,
282
+ "step": 21
283
+ },
284
+ {
285
+ "completion_length": 213.8541717529297,
286
+ "epoch": 0.3893805309734513,
287
+ "grad_norm": 0.5828214607615373,
288
+ "kl": 0.0029754638671875,
289
+ "learning_rate": 4.882068417439492e-07,
290
+ "loss": 0.0,
291
+ "reward": 0.5553887784481049,
292
+ "reward_std": 0.5428541898727417,
293
+ "rewards/correct_code_reward_func": 0.291666679084301,
294
+ "rewards/len_reward_func": 0.26372211426496506,
295
+ "step": 22
296
+ },
297
+ {
298
+ "completion_length": 113.43750381469727,
299
+ "epoch": 0.40707964601769914,
300
+ "grad_norm": 1.0373945570716487,
301
+ "kl": 0.0034637451171875,
302
+ "learning_rate": 4.867002461682128e-07,
303
+ "loss": 0.0,
304
+ "reward": 0.49457328021526337,
305
+ "reward_std": 0.38711363077163696,
306
+ "rewards/correct_code_reward_func": 0.2291666716337204,
307
+ "rewards/len_reward_func": 0.26540662348270416,
308
+ "step": 23
309
+ },
310
+ {
311
+ "completion_length": 145.5416717529297,
312
+ "epoch": 0.4247787610619469,
313
+ "grad_norm": 0.8868802158083684,
314
+ "kl": 0.00489044189453125,
315
+ "learning_rate": 4.851057261137608e-07,
316
+ "loss": 0.0,
317
+ "reward": 0.7876249551773071,
318
+ "reward_std": 0.4593852013349533,
319
+ "rewards/correct_code_reward_func": 0.5208333432674408,
320
+ "rewards/len_reward_func": 0.26679155230522156,
321
+ "step": 24
322
+ },
323
+ {
324
+ "completion_length": 223.50000762939453,
325
+ "epoch": 0.4424778761061947,
326
+ "grad_norm": 2.7373544353808152,
327
+ "kl": 0.005214691162109375,
328
+ "learning_rate": 4.83423873879701e-07,
329
+ "loss": 0.0,
330
+ "reward": 0.45352864265441895,
331
+ "reward_std": 0.5192392915487289,
332
+ "rewards/correct_code_reward_func": 0.2291666679084301,
333
+ "rewards/len_reward_func": 0.22436197102069855,
334
+ "step": 25
335
+ },
336
+ {
337
+ "completion_length": 172.56250762939453,
338
+ "epoch": 0.46017699115044247,
339
+ "grad_norm": 1.5744378292792975,
340
+ "kl": 0.0035400390625,
341
+ "learning_rate": 4.816553142054805e-07,
342
+ "loss": 0.0,
343
+ "reward": 0.4348207265138626,
344
+ "reward_std": 0.40823744237422943,
345
+ "rewards/correct_code_reward_func": 0.14583333395421505,
346
+ "rewards/len_reward_func": 0.2889874130487442,
347
+ "step": 26
348
+ },
349
+ {
350
+ "completion_length": 163.5416717529297,
351
+ "epoch": 0.4778761061946903,
352
+ "grad_norm": 37.42131429307232,
353
+ "kl": 0.0279998779296875,
354
+ "learning_rate": 4.798007040388211e-07,
355
+ "loss": 0.0,
356
+ "reward": 0.44616006314754486,
357
+ "reward_std": 0.41116151213645935,
358
+ "rewards/correct_code_reward_func": 0.18750000558793545,
359
+ "rewards/len_reward_func": 0.25866006314754486,
360
+ "step": 27
361
+ },
362
+ {
363
+ "completion_length": 147.1458396911621,
364
+ "epoch": 0.49557522123893805,
365
+ "grad_norm": 3.226533563373661,
366
+ "kl": 0.013214111328125,
367
+ "learning_rate": 4.778607322916895e-07,
368
+ "loss": 0.0,
369
+ "reward": 0.5665057748556137,
370
+ "reward_std": 0.3880573436617851,
371
+ "rewards/correct_code_reward_func": 0.2708333432674408,
372
+ "rewards/len_reward_func": 0.29567238688468933,
373
+ "step": 28
374
+ },
375
+ {
376
+ "completion_length": 127.56250381469727,
377
+ "epoch": 0.5132743362831859,
378
+ "grad_norm": 4.362166865135135,
379
+ "kl": 0.01708984375,
380
+ "learning_rate": 4.758361195843951e-07,
381
+ "loss": 0.0,
382
+ "reward": 0.8003825545310974,
383
+ "reward_std": 0.510110080242157,
384
+ "rewards/correct_code_reward_func": 0.4583333432674408,
385
+ "rewards/len_reward_func": 0.34204918146133423,
386
+ "step": 29
387
+ },
388
+ {
389
+ "completion_length": 173.37500381469727,
390
+ "epoch": 0.5309734513274337,
391
+ "grad_norm": 0.8500372624160994,
392
+ "kl": 0.00797271728515625,
393
+ "learning_rate": 4.7372761797790825e-07,
394
+ "loss": 0.0,
395
+ "reward": 0.37487032264471054,
396
+ "reward_std": 0.32572413980960846,
397
+ "rewards/correct_code_reward_func": 0.1458333432674408,
398
+ "rewards/len_reward_func": 0.22903696447610855,
399
+ "step": 30
400
+ },
401
+ {
402
+ "completion_length": 119.14583587646484,
403
+ "epoch": 0.5486725663716814,
404
+ "grad_norm": 2.2352274664870624,
405
+ "kl": 0.014129638671875,
406
+ "learning_rate": 4.7153601069450146e-07,
407
+ "loss": 0.0,
408
+ "reward": 0.5202069282531738,
409
+ "reward_std": 0.5186183750629425,
410
+ "rewards/correct_code_reward_func": 0.291666679084301,
411
+ "rewards/len_reward_func": 0.22854027152061462,
412
+ "step": 31
413
+ },
414
+ {
415
+ "completion_length": 111.31250381469727,
416
+ "epoch": 0.5663716814159292,
417
+ "grad_norm": 1.1283808844332623,
418
+ "kl": 0.015625,
419
+ "learning_rate": 4.6926211182681294e-07,
420
+ "loss": 0.0,
421
+ "reward": 0.4815867692232132,
422
+ "reward_std": 0.294177770614624,
423
+ "rewards/correct_code_reward_func": 0.1458333432674408,
424
+ "rewards/len_reward_func": 0.3357534259557724,
425
+ "step": 32
426
+ },
427
+ {
428
+ "completion_length": 111.87500381469727,
429
+ "epoch": 0.584070796460177,
430
+ "grad_norm": 15.382447167413178,
431
+ "kl": 0.0740966796875,
432
+ "learning_rate": 4.6690676603544553e-07,
433
+ "loss": 0.0001,
434
+ "reward": 0.30563782900571823,
435
+ "reward_std": 0.5501309335231781,
436
+ "rewards/correct_code_reward_func": 0.1458333395421505,
437
+ "rewards/len_reward_func": 0.15980449318885803,
438
+ "step": 33
439
+ },
440
+ {
441
+ "completion_length": 74.31250190734863,
442
+ "epoch": 0.6017699115044248,
443
+ "grad_norm": 6.16835237121908,
444
+ "kl": 0.038818359375,
445
+ "learning_rate": 4.644708482352092e-07,
446
+ "loss": 0.0,
447
+ "reward": 0.7065277397632599,
448
+ "reward_std": 0.4479583501815796,
449
+ "rewards/correct_code_reward_func": 0.3958333358168602,
450
+ "rewards/len_reward_func": 0.3106943815946579,
451
+ "step": 34
452
+ },
453
+ {
454
+ "completion_length": 96.45833587646484,
455
+ "epoch": 0.6194690265486725,
456
+ "grad_norm": 6.369953012669968,
457
+ "kl": 0.035186767578125,
458
+ "learning_rate": 4.6195526327012623e-07,
459
+ "loss": 0.0,
460
+ "reward": 0.3565668910741806,
461
+ "reward_std": 0.39397968351840973,
462
+ "rewards/correct_code_reward_func": 0.08333333395421505,
463
+ "rewards/len_reward_func": 0.273233562707901,
464
+ "step": 35
465
+ },
466
+ {
467
+ "completion_length": 112.14583587646484,
468
+ "epoch": 0.6371681415929203,
469
+ "grad_norm": 3.3487214552142164,
470
+ "kl": 0.0289306640625,
471
+ "learning_rate": 4.5936094557731806e-07,
472
+ "loss": 0.0,
473
+ "reward": 0.7201341390609741,
474
+ "reward_std": 0.3427349328994751,
475
+ "rewards/correct_code_reward_func": 0.3541666865348816,
476
+ "rewards/len_reward_func": 0.3659674823284149,
477
+ "step": 36
478
+ },
479
+ {
480
+ "completion_length": 101.64583587646484,
481
+ "epoch": 0.6548672566371682,
482
+ "grad_norm": 5.120880381398817,
483
+ "kl": 0.05126953125,
484
+ "learning_rate": 4.566888588399006e-07,
485
+ "loss": 0.0001,
486
+ "reward": 0.5204495191574097,
487
+ "reward_std": 0.3199263662099838,
488
+ "rewards/correct_code_reward_func": 0.1250000037252903,
489
+ "rewards/len_reward_func": 0.39544953405857086,
490
+ "step": 37
491
+ },
492
+ {
493
+ "completion_length": 120.5625,
494
+ "epoch": 0.672566371681416,
495
+ "grad_norm": 10.359850576348157,
496
+ "kl": 0.09923553466796875,
497
+ "learning_rate": 4.5393999562901517e-07,
498
+ "loss": 0.0001,
499
+ "reward": 0.6964526474475861,
500
+ "reward_std": 0.5385148227214813,
501
+ "rewards/correct_code_reward_func": 0.4166666865348816,
502
+ "rewards/len_reward_func": 0.27978597581386566,
503
+ "step": 38
504
+ },
505
+ {
506
+ "completion_length": 110.85417175292969,
507
+ "epoch": 0.6902654867256637,
508
+ "grad_norm": 12.273703285519217,
509
+ "kl": 0.0859375,
510
+ "learning_rate": 4.511153770351288e-07,
511
+ "loss": 0.0001,
512
+ "reward": 0.551011860370636,
513
+ "reward_std": 0.36342084407806396,
514
+ "rewards/correct_code_reward_func": 0.1875000111758709,
515
+ "rewards/len_reward_func": 0.3635118901729584,
516
+ "step": 39
517
+ },
518
+ {
519
+ "completion_length": 68.77083587646484,
520
+ "epoch": 0.7079646017699115,
521
+ "grad_norm": 2.2127761337332514,
522
+ "kl": 0.03765869140625,
523
+ "learning_rate": 4.482160522887403e-07,
524
+ "loss": 0.0,
525
+ "reward": 0.7378278374671936,
526
+ "reward_std": 0.39128121733665466,
527
+ "rewards/correct_code_reward_func": 0.4375000149011612,
528
+ "rewards/len_reward_func": 0.3003278076648712,
529
+ "step": 40
530
+ },
531
+ {
532
+ "completion_length": 135.33333587646484,
533
+ "epoch": 0.7256637168141593,
534
+ "grad_norm": 7.600755671861986,
535
+ "kl": 0.1025390625,
536
+ "learning_rate": 4.4524309837063504e-07,
537
+ "loss": 0.0001,
538
+ "reward": 0.6550733149051666,
539
+ "reward_std": 0.5409570336341858,
540
+ "rewards/correct_code_reward_func": 0.3333333432674408,
541
+ "rewards/len_reward_func": 0.32173997163772583,
542
+ "step": 41
543
+ },
544
+ {
545
+ "completion_length": 87.35416793823242,
546
+ "epoch": 0.7433628318584071,
547
+ "grad_norm": 1.6503719126782672,
548
+ "kl": 0.0264892578125,
549
+ "learning_rate": 4.4219761961182965e-07,
550
+ "loss": 0.0,
551
+ "reward": 0.6292450726032257,
552
+ "reward_std": 0.4537566006183624,
553
+ "rewards/correct_code_reward_func": 0.2708333432674408,
554
+ "rewards/len_reward_func": 0.3584117591381073,
555
+ "step": 42
556
+ },
557
+ {
558
+ "completion_length": 65.29166793823242,
559
+ "epoch": 0.7610619469026548,
560
+ "grad_norm": 4.327974078868625,
561
+ "kl": 0.1019287109375,
562
+ "learning_rate": 4.390807472833584e-07,
563
+ "loss": 0.0001,
564
+ "reward": 0.5142460465431213,
565
+ "reward_std": 0.41541849076747894,
566
+ "rewards/correct_code_reward_func": 0.1875000074505806,
567
+ "rewards/len_reward_func": 0.32674603164196014,
568
+ "step": 43
569
+ },
570
+ {
571
+ "completion_length": 88.62500381469727,
572
+ "epoch": 0.7787610619469026,
573
+ "grad_norm": 2.8856078275542814,
574
+ "kl": 0.068115234375,
575
+ "learning_rate": 4.3589363917605234e-07,
576
+ "loss": 0.0001,
577
+ "reward": 0.48976023495197296,
578
+ "reward_std": 0.39692020416259766,
579
+ "rewards/correct_code_reward_func": 0.1875,
580
+ "rewards/len_reward_func": 0.30226024240255356,
581
+ "step": 44
582
+ },
583
+ {
584
+ "completion_length": 78.75000381469727,
585
+ "epoch": 0.7964601769911505,
586
+ "grad_norm": 1.6857493365819893,
587
+ "kl": 0.04052734375,
588
+ "learning_rate": 4.3263747917046697e-07,
589
+ "loss": 0.0,
590
+ "reward": 0.5636806637048721,
591
+ "reward_std": 0.5081076323986053,
592
+ "rewards/correct_code_reward_func": 0.2500000111758709,
593
+ "rewards/len_reward_func": 0.3136806935071945,
594
+ "step": 45
595
+ },
596
+ {
597
+ "completion_length": 78.50000190734863,
598
+ "epoch": 0.8141592920353983,
599
+ "grad_norm": 1.2755984783224172,
600
+ "kl": 0.0218505859375,
601
+ "learning_rate": 4.2931347679711924e-07,
602
+ "loss": 0.0,
603
+ "reward": 0.6265330016613007,
604
+ "reward_std": 0.38107292354106903,
605
+ "rewards/correct_code_reward_func": 0.2916666716337204,
606
+ "rewards/len_reward_func": 0.33486635982990265,
607
+ "step": 46
608
+ },
609
+ {
610
+ "completion_length": 80.29166793823242,
611
+ "epoch": 0.831858407079646,
612
+ "grad_norm": 1.1240225012044924,
613
+ "kl": 0.0335693359375,
614
+ "learning_rate": 4.259228667871962e-07,
615
+ "loss": 0.0,
616
+ "reward": 0.5936128199100494,
617
+ "reward_std": 0.4675884544849396,
618
+ "rewards/correct_code_reward_func": 0.25,
619
+ "rewards/len_reward_func": 0.34361281991004944,
620
+ "step": 47
621
+ },
622
+ {
623
+ "completion_length": 50.0,
624
+ "epoch": 0.8495575221238938,
625
+ "grad_norm": 8.693611957424073,
626
+ "kl": 0.2470703125,
627
+ "learning_rate": 4.224669086139029e-07,
628
+ "loss": 0.0002,
629
+ "reward": 0.49966277182102203,
630
+ "reward_std": 0.3804493248462677,
631
+ "rewards/correct_code_reward_func": 0.08333333395421505,
632
+ "rewards/len_reward_func": 0.41632944345474243,
633
+ "step": 48
634
+ },
635
+ {
636
+ "completion_length": 92.20833587646484,
637
+ "epoch": 0.8672566371681416,
638
+ "grad_norm": 1.2744927278879081,
639
+ "kl": 0.02978515625,
640
+ "learning_rate": 4.1894688602461913e-07,
641
+ "loss": 0.0,
642
+ "reward": 0.7709261626005173,
643
+ "reward_std": 0.3614697605371475,
644
+ "rewards/correct_code_reward_func": 0.3958333544433117,
645
+ "rewards/len_reward_func": 0.3750927746295929,
646
+ "step": 49
647
+ },
648
+ {
649
+ "completion_length": 127.62500381469727,
650
+ "epoch": 0.8849557522123894,
651
+ "grad_norm": 0.566625300559752,
652
+ "kl": 0.016326904296875,
653
+ "learning_rate": 4.153641065640402e-07,
654
+ "loss": 0.0,
655
+ "reward": 0.5525736510753632,
656
+ "reward_std": 0.42078813910484314,
657
+ "rewards/correct_code_reward_func": 0.20833333395421505,
658
+ "rewards/len_reward_func": 0.34424029290676117,
659
+ "step": 50
660
+ },
661
+ {
662
+ "completion_length": 65.375,
663
+ "epoch": 0.9026548672566371,
664
+ "grad_norm": 12.046039464547933,
665
+ "kl": 0.081298828125,
666
+ "learning_rate": 4.11719901088477e-07,
667
+ "loss": 0.0001,
668
+ "reward": 0.6643719673156738,
669
+ "reward_std": 0.4581465870141983,
670
+ "rewards/correct_code_reward_func": 0.3333333432674408,
671
+ "rewards/len_reward_func": 0.3310386538505554,
672
+ "step": 51
673
+ },
674
+ {
675
+ "completion_length": 103.58333587646484,
676
+ "epoch": 0.9203539823008849,
677
+ "grad_norm": 10.540579232965062,
678
+ "kl": 0.0799560546875,
679
+ "learning_rate": 4.080156232714976e-07,
680
+ "loss": 0.0001,
681
+ "reward": 0.6433776319026947,
682
+ "reward_std": 0.3912515640258789,
683
+ "rewards/correct_code_reward_func": 0.29166667722165585,
684
+ "rewards/len_reward_func": 0.3517109453678131,
685
+ "step": 52
686
+ },
687
+ {
688
+ "completion_length": 62.375003814697266,
689
+ "epoch": 0.9380530973451328,
690
+ "grad_norm": 2.819698340947531,
691
+ "kl": 0.115966796875,
692
+ "learning_rate": 4.0425264910109233e-07,
693
+ "loss": 0.0001,
694
+ "reward": 0.7070481181144714,
695
+ "reward_std": 0.44986239075660706,
696
+ "rewards/correct_code_reward_func": 0.3125,
697
+ "rewards/len_reward_func": 0.39454813301563263,
698
+ "step": 53
699
+ },
700
+ {
701
+ "completion_length": 59.0625,
702
+ "epoch": 0.9557522123893806,
703
+ "grad_norm": 3.22095354432732,
704
+ "kl": 0.1131591796875,
705
+ "learning_rate": 4.004323763685511e-07,
706
+ "loss": 0.0001,
707
+ "reward": 0.5839295089244843,
708
+ "reward_std": 0.26084955781698227,
709
+ "rewards/correct_code_reward_func": 0.14583333395421505,
710
+ "rewards/len_reward_func": 0.43809613585472107,
711
+ "step": 54
712
+ },
713
+ {
714
+ "completion_length": 125.58333969116211,
715
+ "epoch": 0.9734513274336283,
716
+ "grad_norm": 8.551645640447115,
717
+ "kl": 0.15509033203125,
718
+ "learning_rate": 3.9655622414924007e-07,
719
+ "loss": 0.0002,
720
+ "reward": 0.528711274266243,
721
+ "reward_std": 0.3454602509737015,
722
+ "rewards/correct_code_reward_func": 0.1666666716337204,
723
+ "rewards/len_reward_func": 0.3620445877313614,
724
+ "step": 55
725
+ },
726
+ {
727
+ "completion_length": 60.000003814697266,
728
+ "epoch": 0.9911504424778761,
729
+ "grad_norm": 38.727062970707685,
730
+ "kl": 0.20166015625,
731
+ "learning_rate": 3.92625632275474e-07,
732
+ "loss": 0.0002,
733
+ "reward": 0.891117125749588,
734
+ "reward_std": 0.36153580248355865,
735
+ "rewards/correct_code_reward_func": 0.5000000149011612,
736
+ "rewards/len_reward_func": 0.39111708104610443,
737
+ "step": 56
738
+ },
739
+ {
740
+ "completion_length": 91.54167175292969,
741
+ "epoch": 1.0,
742
+ "grad_norm": 38.727062970707685,
743
+ "kl": 0.1865234375,
744
+ "learning_rate": 3.886420608016766e-07,
745
+ "loss": 0.0001,
746
+ "reward": 0.5094150304794312,
747
+ "reward_std": 0.4194638133049011,
748
+ "rewards/correct_code_reward_func": 0.1666666716337204,
749
+ "rewards/len_reward_func": 0.3427482843399048,
750
+ "step": 57
751
+ },
752
+ {
753
+ "completion_length": 96.41666793823242,
754
+ "epoch": 1.0176991150442478,
755
+ "grad_norm": 13.425409986987928,
756
+ "kl": 0.04998779296875,
757
+ "learning_rate": 3.846069894620305e-07,
758
+ "loss": 0.0001,
759
+ "reward": 0.5195242762565613,
760
+ "reward_std": 0.3198210597038269,
761
+ "rewards/correct_code_reward_func": 0.125,
762
+ "rewards/len_reward_func": 0.3945242464542389,
763
+ "step": 58
764
+ },
765
+ {
766
+ "completion_length": 79.3125,
767
+ "epoch": 1.0353982300884956,
768
+ "grad_norm": 1.7429704138905477,
769
+ "kl": 0.0509033203125,
770
+ "learning_rate": 3.8052191712081595e-07,
771
+ "loss": 0.0001,
772
+ "reward": 0.655539482831955,
773
+ "reward_std": 0.2987876646220684,
774
+ "rewards/correct_code_reward_func": 0.1875,
775
+ "rewards/len_reward_func": 0.46803948283195496,
776
+ "step": 59
777
+ },
778
+ {
779
+ "completion_length": 61.187503814697266,
780
+ "epoch": 1.0530973451327434,
781
+ "grad_norm": 7.538322601947877,
782
+ "kl": 0.1343994140625,
783
+ "learning_rate": 3.763883612156441e-07,
784
+ "loss": 0.0001,
785
+ "reward": 0.7049243450164795,
786
+ "reward_std": 0.5336888134479523,
787
+ "rewards/correct_code_reward_func": 0.3125000149011612,
788
+ "rewards/len_reward_func": 0.3924243599176407,
789
+ "step": 60
790
+ },
791
+ {
792
+ "completion_length": 71.45833587646484,
793
+ "epoch": 1.0707964601769913,
794
+ "grad_norm": 1.628031440232102,
795
+ "kl": 0.053955078125,
796
+ "learning_rate": 3.7220785719379095e-07,
797
+ "loss": 0.0001,
798
+ "reward": 0.5856566280126572,
799
+ "reward_std": 0.31656239926815033,
800
+ "rewards/correct_code_reward_func": 0.2083333395421505,
801
+ "rewards/len_reward_func": 0.37732329964637756,
802
+ "step": 61
803
+ },
804
+ {
805
+ "completion_length": 80.64583587646484,
806
+ "epoch": 1.0884955752212389,
807
+ "grad_norm": 0.8807120675882707,
808
+ "kl": 0.05035400390625,
809
+ "learning_rate": 3.6798195794184134e-07,
810
+ "loss": 0.0001,
811
+ "reward": 0.5311767309904099,
812
+ "reward_std": 0.2926383763551712,
813
+ "rewards/correct_code_reward_func": 0.1041666716337204,
814
+ "rewards/len_reward_func": 0.42701007425785065,
815
+ "step": 62
816
+ },
817
+ {
818
+ "completion_length": 112.29167175292969,
819
+ "epoch": 1.1061946902654867,
820
+ "grad_norm": 1.0053639016562932,
821
+ "kl": 0.07916259765625,
822
+ "learning_rate": 3.6371223320885484e-07,
823
+ "loss": 0.0001,
824
+ "reward": 0.4587331861257553,
825
+ "reward_std": 0.33583496510982513,
826
+ "rewards/correct_code_reward_func": 0.08333333395421505,
827
+ "rewards/len_reward_func": 0.3753998577594757,
828
+ "step": 63
829
+ },
830
+ {
831
+ "completion_length": 83.20833587646484,
832
+ "epoch": 1.1238938053097345,
833
+ "grad_norm": 1.7713543449970612,
834
+ "kl": 0.08245849609375,
835
+ "learning_rate": 3.594002690232682e-07,
836
+ "loss": 0.0001,
837
+ "reward": 0.6741357147693634,
838
+ "reward_std": 0.43141913414001465,
839
+ "rewards/correct_code_reward_func": 0.2708333432674408,
840
+ "rewards/len_reward_func": 0.4033023715019226,
841
+ "step": 64
842
+ },
843
+ {
844
+ "completion_length": 84.10416984558105,
845
+ "epoch": 1.1415929203539823,
846
+ "grad_norm": 25.050556888869668,
847
+ "kl": 0.12774658203125,
848
+ "learning_rate": 3.5504766710375047e-07,
849
+ "loss": 0.0001,
850
+ "reward": 0.6237881779670715,
851
+ "reward_std": 0.28890369832515717,
852
+ "rewards/correct_code_reward_func": 0.2083333432674408,
853
+ "rewards/len_reward_func": 0.41545483469963074,
854
+ "step": 65
855
+ },
856
+ {
857
+ "completion_length": 82.27083587646484,
858
+ "epoch": 1.1592920353982301,
859
+ "grad_norm": 1.3229069926746502,
860
+ "kl": 0.0810546875,
861
+ "learning_rate": 3.506560442642299e-07,
862
+ "loss": 0.0001,
863
+ "reward": 0.7232638597488403,
864
+ "reward_std": 0.37123511731624603,
865
+ "rewards/correct_code_reward_func": 0.3541666716337204,
866
+ "rewards/len_reward_func": 0.36909720301628113,
867
+ "step": 66
868
+ },
869
+ {
870
+ "completion_length": 58.00000190734863,
871
+ "epoch": 1.176991150442478,
872
+ "grad_norm": 14.439064098343355,
873
+ "kl": 0.1383056640625,
874
+ "learning_rate": 3.462270318133136e-07,
875
+ "loss": 0.0001,
876
+ "reward": 0.9435772597789764,
877
+ "reward_std": 0.2863286882638931,
878
+ "rewards/correct_code_reward_func": 0.520833358168602,
879
+ "rewards/len_reward_func": 0.42274393141269684,
880
+ "step": 67
881
+ },
882
+ {
883
+ "completion_length": 71.10416984558105,
884
+ "epoch": 1.1946902654867257,
885
+ "grad_norm": 7.987124238678158,
886
+ "kl": 0.122314453125,
887
+ "learning_rate": 3.41762274948323e-07,
888
+ "loss": 0.0001,
889
+ "reward": 0.7681821584701538,
890
+ "reward_std": 0.3728492707014084,
891
+ "rewards/correct_code_reward_func": 0.3541666865348816,
892
+ "rewards/len_reward_func": 0.41401544213294983,
893
+ "step": 68
894
+ },
895
+ {
896
+ "completion_length": 60.66666793823242,
897
+ "epoch": 1.2123893805309733,
898
+ "grad_norm": 2.9983488159761817,
899
+ "kl": 0.0537109375,
900
+ "learning_rate": 3.372634321441702e-07,
901
+ "loss": 0.0001,
902
+ "reward": 0.7418330609798431,
903
+ "reward_std": 0.38279734551906586,
904
+ "rewards/correct_code_reward_func": 0.3541666865348816,
905
+ "rewards/len_reward_func": 0.38766637444496155,
906
+ "step": 69
907
+ },
908
+ {
909
+ "completion_length": 80.62500381469727,
910
+ "epoch": 1.2300884955752212,
911
+ "grad_norm": 3.023771746536167,
912
+ "kl": 0.08154296875,
913
+ "learning_rate": 3.3273217453730205e-07,
914
+ "loss": 0.0001,
915
+ "reward": 0.6804832518100739,
916
+ "reward_std": 0.4174923449754715,
917
+ "rewards/correct_code_reward_func": 0.2708333432674408,
918
+ "rewards/len_reward_func": 0.40964992344379425,
919
+ "step": 70
920
+ },
921
+ {
922
+ "completion_length": 69.5,
923
+ "epoch": 1.247787610619469,
924
+ "grad_norm": 1.4050656113052118,
925
+ "kl": 0.0853271484375,
926
+ "learning_rate": 3.281701853049416e-07,
927
+ "loss": 0.0001,
928
+ "reward": 0.7291666865348816,
929
+ "reward_std": 0.32618680596351624,
930
+ "rewards/correct_code_reward_func": 0.2708333432674408,
931
+ "rewards/len_reward_func": 0.4583333432674408,
932
+ "step": 71
933
+ },
934
+ {
935
+ "completion_length": 74.47916793823242,
936
+ "epoch": 1.2654867256637168,
937
+ "grad_norm": 2.8179580865899894,
938
+ "kl": 0.0390625,
939
+ "learning_rate": 3.23579159039856e-07,
940
+ "loss": 0.0,
941
+ "reward": 0.75091952085495,
942
+ "reward_std": 0.4079275578260422,
943
+ "rewards/correct_code_reward_func": 0.2916666716337204,
944
+ "rewards/len_reward_func": 0.45925286412239075,
945
+ "step": 72
946
+ },
947
+ {
948
+ "completion_length": 65.52083587646484,
949
+ "epoch": 1.2831858407079646,
950
+ "grad_norm": 1.1299435463555585,
951
+ "kl": 0.0399169921875,
952
+ "learning_rate": 3.1896080112088477e-07,
953
+ "loss": 0.0,
954
+ "reward": 0.7291666865348816,
955
+ "reward_std": 0.40629878640174866,
956
+ "rewards/correct_code_reward_func": 0.2708333395421505,
957
+ "rewards/len_reward_func": 0.4583333432674408,
958
+ "step": 73
959
+ },
960
+ {
961
+ "completion_length": 57.000003814697266,
962
+ "epoch": 1.3008849557522124,
963
+ "grad_norm": 24.705642459397488,
964
+ "kl": 0.17333984375,
965
+ "learning_rate": 3.1431682707946117e-07,
966
+ "loss": 0.0002,
967
+ "reward": 0.5623736083507538,
968
+ "reward_std": 0.24504420161247253,
969
+ "rewards/correct_code_reward_func": 0.12500000558793545,
970
+ "rewards/len_reward_func": 0.4373735785484314,
971
+ "step": 74
972
+ },
973
+ {
974
+ "completion_length": 78.33333587646484,
975
+ "epoch": 1.3185840707964602,
976
+ "grad_norm": 0.7830442669221305,
977
+ "kl": 0.0224609375,
978
+ "learning_rate": 3.096489619623621e-07,
979
+ "loss": 0.0,
980
+ "reward": 0.7488380074501038,
981
+ "reward_std": 0.30931201577186584,
982
+ "rewards/correct_code_reward_func": 0.2500000149011612,
983
+ "rewards/len_reward_func": 0.49883799254894257,
984
+ "step": 75
985
+ },
986
+ {
987
+ "completion_length": 87.60417175292969,
988
+ "epoch": 1.336283185840708,
989
+ "grad_norm": 2.1746791001290044,
990
+ "kl": 0.04168701171875,
991
+ "learning_rate": 3.049589396909239e-07,
992
+ "loss": 0.0,
993
+ "reward": 0.5558712333440781,
994
+ "reward_std": 0.20058585330843925,
995
+ "rewards/correct_code_reward_func": 0.1041666716337204,
996
+ "rewards/len_reward_func": 0.45170454680919647,
997
+ "step": 76
998
+ },
999
+ {
1000
+ "completion_length": 72.14583587646484,
1001
+ "epoch": 1.3539823008849559,
1002
+ "grad_norm": 0.41651138876768207,
1003
+ "kl": 0.0919189453125,
1004
+ "learning_rate": 3.0024850241696126e-07,
1005
+ "loss": 0.0001,
1006
+ "reward": 0.6021915674209595,
1007
+ "reward_std": 0.22243696451187134,
1008
+ "rewards/correct_code_reward_func": 0.1458333358168602,
1009
+ "rewards/len_reward_func": 0.4563582092523575,
1010
+ "step": 77
1011
+ },
1012
+ {
1013
+ "completion_length": 54.916666984558105,
1014
+ "epoch": 1.3716814159292037,
1015
+ "grad_norm": 8.88612982008444,
1016
+ "kl": 0.106689453125,
1017
+ "learning_rate": 2.9551939987562865e-07,
1018
+ "loss": 0.0001,
1019
+ "reward": 0.6803071200847626,
1020
+ "reward_std": 0.2964301370084286,
1021
+ "rewards/correct_code_reward_func": 0.2291666679084301,
1022
+ "rewards/len_reward_func": 0.4511404484510422,
1023
+ "step": 78
1024
+ },
1025
+ {
1026
+ "completion_length": 93.91666793823242,
1027
+ "epoch": 1.3893805309734513,
1028
+ "grad_norm": 3.7969039322604865,
1029
+ "kl": 0.086761474609375,
1030
+ "learning_rate": 2.9077338873546564e-07,
1031
+ "loss": 0.0001,
1032
+ "reward": 0.7916666865348816,
1033
+ "reward_std": 0.34503278136253357,
1034
+ "rewards/correct_code_reward_func": 0.2916666716337204,
1035
+ "rewards/len_reward_func": 0.5,
1036
+ "step": 79
1037
+ },
1038
+ {
1039
+ "completion_length": 56.08333396911621,
1040
+ "epoch": 1.407079646017699,
1041
+ "grad_norm": 3.0759876398616743,
1042
+ "kl": 0.076171875,
1043
+ "learning_rate": 2.860122319458661e-07,
1044
+ "loss": 0.0001,
1045
+ "reward": 0.8750000596046448,
1046
+ "reward_std": 0.36751919984817505,
1047
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1048
+ "rewards/len_reward_func": 0.4791666716337204,
1049
+ "step": 80
1050
+ },
1051
+ {
1052
+ "completion_length": 77.58333587646484,
1053
+ "epoch": 1.424778761061947,
1054
+ "grad_norm": 5.267849973823214,
1055
+ "kl": 0.0604248046875,
1056
+ "learning_rate": 2.8123769808221403e-07,
1057
+ "loss": 0.0001,
1058
+ "reward": 0.9166666865348816,
1059
+ "reward_std": 0.4720035046339035,
1060
+ "rewards/correct_code_reward_func": 0.4375000298023224,
1061
+ "rewards/len_reward_func": 0.4791666716337204,
1062
+ "step": 81
1063
+ },
1064
+ {
1065
+ "completion_length": 86.18750190734863,
1066
+ "epoch": 1.4424778761061947,
1067
+ "grad_norm": 16.256704067320886,
1068
+ "kl": 0.05712890625,
1069
+ "learning_rate": 2.764515606889307e-07,
1070
+ "loss": 0.0001,
1071
+ "reward": 0.8077563941478729,
1072
+ "reward_std": 0.3568233400583267,
1073
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1074
+ "rewards/len_reward_func": 0.41192303597927094,
1075
+ "step": 82
1076
+ },
1077
+ {
1078
+ "completion_length": 106.89583587646484,
1079
+ "epoch": 1.4601769911504425,
1080
+ "grad_norm": 2.2341312424056214,
1081
+ "kl": 0.021087646484375,
1082
+ "learning_rate": 2.716555976206747e-07,
1083
+ "loss": 0.0,
1084
+ "reward": 0.7582251131534576,
1085
+ "reward_std": 0.3638576865196228,
1086
+ "rewards/correct_code_reward_func": 0.2916666716337204,
1087
+ "rewards/len_reward_func": 0.46655844151973724,
1088
+ "step": 83
1089
+ },
1090
+ {
1091
+ "completion_length": 48.41666793823242,
1092
+ "epoch": 1.4778761061946903,
1093
+ "grad_norm": 1.3490761289725859,
1094
+ "kl": 0.05279541015625,
1095
+ "learning_rate": 2.66851590381942e-07,
1096
+ "loss": 0.0001,
1097
+ "reward": 0.7612736523151398,
1098
+ "reward_std": 0.23646536469459534,
1099
+ "rewards/correct_code_reward_func": 0.2708333358168602,
1100
+ "rewards/len_reward_func": 0.490440309047699,
1101
+ "step": 84
1102
+ },
1103
+ {
1104
+ "completion_length": 60.187503814697266,
1105
+ "epoch": 1.495575221238938,
1106
+ "grad_norm": 4.538439276716702,
1107
+ "kl": 0.0347900390625,
1108
+ "learning_rate": 2.620413234653093e-07,
1109
+ "loss": 0.0,
1110
+ "reward": 0.8333333432674408,
1111
+ "reward_std": 0.438393235206604,
1112
+ "rewards/correct_code_reward_func": 0.3541666716337204,
1113
+ "rewards/len_reward_func": 0.4791666716337204,
1114
+ "step": 85
1115
+ },
1116
+ {
1117
+ "completion_length": 138.50000762939453,
1118
+ "epoch": 1.5132743362831858,
1119
+ "grad_norm": 0.9112971760232713,
1120
+ "kl": 0.021392822265625,
1121
+ "learning_rate": 2.5722658368856814e-07,
1122
+ "loss": 0.0,
1123
+ "reward": 0.6596881151199341,
1124
+ "reward_std": 0.4098925292491913,
1125
+ "rewards/correct_code_reward_func": 0.1875000074505806,
1126
+ "rewards/len_reward_func": 0.4721881300210953,
1127
+ "step": 86
1128
+ },
1129
+ {
1130
+ "completion_length": 67.89583587646484,
1131
+ "epoch": 1.5309734513274336,
1132
+ "grad_norm": 1.5750222049346805,
1133
+ "kl": 0.033447265625,
1134
+ "learning_rate": 2.5240915953099515e-07,
1135
+ "loss": 0.0,
1136
+ "reward": 0.9335185885429382,
1137
+ "reward_std": 0.32536589354276657,
1138
+ "rewards/correct_code_reward_func": 0.4375000149011612,
1139
+ "rewards/len_reward_func": 0.49601852893829346,
1140
+ "step": 87
1141
+ },
1142
+ {
1143
+ "completion_length": 56.395835876464844,
1144
+ "epoch": 1.5486725663716814,
1145
+ "grad_norm": 0.8224337332016152,
1146
+ "kl": 0.0401611328125,
1147
+ "learning_rate": 2.4759084046900483e-07,
1148
+ "loss": 0.0,
1149
+ "reward": 0.7500000298023224,
1150
+ "reward_std": 0.2342708334326744,
1151
+ "rewards/correct_code_reward_func": 0.3125000149011612,
1152
+ "rewards/len_reward_func": 0.4375000149011612,
1153
+ "step": 88
1154
+ },
1155
+ {
1156
+ "completion_length": 55.250003814697266,
1157
+ "epoch": 1.5663716814159292,
1158
+ "grad_norm": 1.7919585934332407,
1159
+ "kl": 0.046142578125,
1160
+ "learning_rate": 2.427734163114319e-07,
1161
+ "loss": 0.0,
1162
+ "reward": 0.8125000298023224,
1163
+ "reward_std": 0.3177132308483124,
1164
+ "rewards/correct_code_reward_func": 0.3125000149011612,
1165
+ "rewards/len_reward_func": 0.5,
1166
+ "step": 89
1167
+ },
1168
+ {
1169
+ "completion_length": 78.27083587646484,
1170
+ "epoch": 1.584070796460177,
1171
+ "grad_norm": 1.6894819978109317,
1172
+ "kl": 0.0758056640625,
1173
+ "learning_rate": 2.3795867653469067e-07,
1174
+ "loss": 0.0001,
1175
+ "reward": 0.8326875865459442,
1176
+ "reward_std": 0.27240147814154625,
1177
+ "rewards/correct_code_reward_func": 0.35416667722165585,
1178
+ "rewards/len_reward_func": 0.47852087020874023,
1179
+ "step": 90
1180
+ },
1181
+ {
1182
+ "completion_length": 102.83333969116211,
1183
+ "epoch": 1.6017699115044248,
1184
+ "grad_norm": 22.765338134366964,
1185
+ "kl": 0.07025146484375,
1186
+ "learning_rate": 2.3314840961805802e-07,
1187
+ "loss": 0.0001,
1188
+ "reward": 0.686832070350647,
1189
+ "reward_std": 0.2984095811843872,
1190
+ "rewards/correct_code_reward_func": 0.2083333432674408,
1191
+ "rewards/len_reward_func": 0.47849874198436737,
1192
+ "step": 91
1193
+ },
1194
+ {
1195
+ "completion_length": 54.31250190734863,
1196
+ "epoch": 1.6194690265486726,
1197
+ "grad_norm": 1.6877966926685302,
1198
+ "kl": 0.0550537109375,
1199
+ "learning_rate": 2.2834440237932533e-07,
1200
+ "loss": 0.0001,
1201
+ "reward": 0.8750000298023224,
1202
+ "reward_std": 0.38857419788837433,
1203
+ "rewards/correct_code_reward_func": 0.4166666865348816,
1204
+ "rewards/len_reward_func": 0.4583333432674408,
1205
+ "step": 92
1206
+ },
1207
+ {
1208
+ "completion_length": 120.70833969116211,
1209
+ "epoch": 1.6371681415929205,
1210
+ "grad_norm": 10.817524146996126,
1211
+ "kl": 0.0389404296875,
1212
+ "learning_rate": 2.2354843931106932e-07,
1213
+ "loss": 0.0,
1214
+ "reward": 0.7042862474918365,
1215
+ "reward_std": 0.3615289777517319,
1216
+ "rewards/correct_code_reward_func": 0.2083333432674408,
1217
+ "rewards/len_reward_func": 0.49595288932323456,
1218
+ "step": 93
1219
+ },
1220
+ {
1221
+ "completion_length": 44.250000953674316,
1222
+ "epoch": 1.6548672566371683,
1223
+ "grad_norm": 28.86285310594792,
1224
+ "kl": 0.064697265625,
1225
+ "learning_rate": 2.1876230191778597e-07,
1226
+ "loss": 0.0001,
1227
+ "reward": 0.8750000298023224,
1228
+ "reward_std": 0.3857583850622177,
1229
+ "rewards/correct_code_reward_func": 0.3750000149011612,
1230
+ "rewards/len_reward_func": 0.5,
1231
+ "step": 94
1232
+ },
1233
+ {
1234
+ "completion_length": 53.62500190734863,
1235
+ "epoch": 1.672566371681416,
1236
+ "grad_norm": 3.9097872367150397,
1237
+ "kl": 0.0594482421875,
1238
+ "learning_rate": 2.1398776805413398e-07,
1239
+ "loss": 0.0001,
1240
+ "reward": 0.8750000298023224,
1241
+ "reward_std": 0.3233579397201538,
1242
+ "rewards/correct_code_reward_func": 0.3750000149011612,
1243
+ "rewards/len_reward_func": 0.5,
1244
+ "step": 95
1245
+ },
1246
+ {
1247
+ "completion_length": 65.00000190734863,
1248
+ "epoch": 1.6902654867256637,
1249
+ "grad_norm": 1.5650440517517894,
1250
+ "kl": 0.0361328125,
1251
+ "learning_rate": 2.092266112645343e-07,
1252
+ "loss": 0.0,
1253
+ "reward": 0.8589357733726501,
1254
+ "reward_std": 0.3421449810266495,
1255
+ "rewards/correct_code_reward_func": 0.3750000149011612,
1256
+ "rewards/len_reward_func": 0.48393575847148895,
1257
+ "step": 96
1258
+ },
1259
+ {
1260
+ "completion_length": 68.06250381469727,
1261
+ "epoch": 1.7079646017699115,
1262
+ "grad_norm": 2.91895596774148,
1263
+ "kl": 0.0269775390625,
1264
+ "learning_rate": 2.044806001243714e-07,
1265
+ "loss": 0.0,
1266
+ "reward": 0.9035088419914246,
1267
+ "reward_std": 0.48539453744888306,
1268
+ "rewards/correct_code_reward_func": 0.4375000298023224,
1269
+ "rewards/len_reward_func": 0.466008797287941,
1270
+ "step": 97
1271
+ },
1272
+ {
1273
+ "completion_length": 73.70833396911621,
1274
+ "epoch": 1.7256637168141593,
1275
+ "grad_norm": 24.67219025250006,
1276
+ "kl": 0.0465087890625,
1277
+ "learning_rate": 1.997514975830388e-07,
1278
+ "loss": 0.0,
1279
+ "reward": 0.8952526450157166,
1280
+ "reward_std": 0.29202311858534813,
1281
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1282
+ "rewards/len_reward_func": 0.49941930174827576,
1283
+ "step": 98
1284
+ },
1285
+ {
1286
+ "completion_length": 97.75000381469727,
1287
+ "epoch": 1.7433628318584071,
1288
+ "grad_norm": 1.6430763507769786,
1289
+ "kl": 0.0406494140625,
1290
+ "learning_rate": 1.9504106030907603e-07,
1291
+ "loss": 0.0,
1292
+ "reward": 0.7537775337696075,
1293
+ "reward_std": 0.3211555927991867,
1294
+ "rewards/correct_code_reward_func": 0.2916666865348816,
1295
+ "rewards/len_reward_func": 0.46211084723472595,
1296
+ "step": 99
1297
+ },
1298
+ {
1299
+ "completion_length": 52.62500190734863,
1300
+ "epoch": 1.7610619469026547,
1301
+ "grad_norm": 3.9570608772498286,
1302
+ "kl": 0.0634765625,
1303
+ "learning_rate": 1.9035103803763792e-07,
1304
+ "loss": 0.0001,
1305
+ "reward": 0.8125,
1306
+ "reward_std": 0.3857453167438507,
1307
+ "rewards/correct_code_reward_func": 0.3125,
1308
+ "rewards/len_reward_func": 0.5,
1309
+ "step": 100
1310
+ },
1311
+ {
1312
+ "completion_length": 81.35416793823242,
1313
+ "epoch": 1.7787610619469025,
1314
+ "grad_norm": 2.731852842110385,
1315
+ "kl": 0.033447265625,
1316
+ "learning_rate": 1.8568317292053891e-07,
1317
+ "loss": 0.0,
1318
+ "reward": 0.9219505190849304,
1319
+ "reward_std": 0.2703188881278038,
1320
+ "rewards/correct_code_reward_func": 0.4375000149011612,
1321
+ "rewards/len_reward_func": 0.48445048928260803,
1322
+ "step": 101
1323
+ },
1324
+ {
1325
+ "completion_length": 83.31250381469727,
1326
+ "epoch": 1.7964601769911503,
1327
+ "grad_norm": 2.4613714386546595,
1328
+ "kl": 0.022613525390625,
1329
+ "learning_rate": 1.8103919887911523e-07,
1330
+ "loss": 0.0,
1331
+ "reward": 0.7291666865348816,
1332
+ "reward_std": 0.30859364569187164,
1333
+ "rewards/correct_code_reward_func": 0.2291666716337204,
1334
+ "rewards/len_reward_func": 0.5,
1335
+ "step": 102
1336
+ },
1337
+ {
1338
+ "completion_length": 79.33333587646484,
1339
+ "epoch": 1.8141592920353982,
1340
+ "grad_norm": 4.215262416117561,
1341
+ "kl": 0.040771484375,
1342
+ "learning_rate": 1.7642084096014402e-07,
1343
+ "loss": 0.0,
1344
+ "reward": 0.7859848439693451,
1345
+ "reward_std": 0.4127810299396515,
1346
+ "rewards/correct_code_reward_func": 0.291666679084301,
1347
+ "rewards/len_reward_func": 0.4943181872367859,
1348
+ "step": 103
1349
+ },
1350
+ {
1351
+ "completion_length": 58.52083396911621,
1352
+ "epoch": 1.831858407079646,
1353
+ "grad_norm": 4.745442984631991,
1354
+ "kl": 0.08740234375,
1355
+ "learning_rate": 1.7182981469505847e-07,
1356
+ "loss": 0.0001,
1357
+ "reward": 0.8333333730697632,
1358
+ "reward_std": 0.45660629868507385,
1359
+ "rewards/correct_code_reward_func": 0.354166679084301,
1360
+ "rewards/len_reward_func": 0.4791666716337204,
1361
+ "step": 104
1362
+ },
1363
+ {
1364
+ "completion_length": 112.64583587646484,
1365
+ "epoch": 1.8495575221238938,
1366
+ "grad_norm": 1.7418907068386247,
1367
+ "kl": 0.0325927734375,
1368
+ "learning_rate": 1.672678254626979e-07,
1369
+ "loss": 0.0,
1370
+ "reward": 0.6364757716655731,
1371
+ "reward_std": 0.3013785034418106,
1372
+ "rewards/correct_code_reward_func": 0.18750000558793545,
1373
+ "rewards/len_reward_func": 0.4489757716655731,
1374
+ "step": 105
1375
+ },
1376
+ {
1377
+ "completion_length": 57.5,
1378
+ "epoch": 1.8672566371681416,
1379
+ "grad_norm": 3.579303393703891,
1380
+ "kl": 0.0543212890625,
1381
+ "learning_rate": 1.6273656785582984e-07,
1382
+ "loss": 0.0001,
1383
+ "reward": 0.8536822199821472,
1384
+ "reward_std": 0.45756329596042633,
1385
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1386
+ "rewards/len_reward_func": 0.45784883201122284,
1387
+ "step": 106
1388
+ },
1389
+ {
1390
+ "completion_length": 83.52083587646484,
1391
+ "epoch": 1.8849557522123894,
1392
+ "grad_norm": 2.1418835681881925,
1393
+ "kl": 0.0758056640625,
1394
+ "learning_rate": 1.5823772505167698e-07,
1395
+ "loss": 0.0001,
1396
+ "reward": 0.8958333432674408,
1397
+ "reward_std": 0.49633626639842987,
1398
+ "rewards/correct_code_reward_func": 0.4375000149011612,
1399
+ "rewards/len_reward_func": 0.4583333432674408,
1400
+ "step": 107
1401
+ },
1402
+ {
1403
+ "completion_length": 94.20833587646484,
1404
+ "epoch": 1.9026548672566372,
1405
+ "grad_norm": 32.45107688241668,
1406
+ "kl": 0.025146484375,
1407
+ "learning_rate": 1.5377296818668635e-07,
1408
+ "loss": 0.0,
1409
+ "reward": 0.854166716337204,
1410
+ "reward_std": 0.37177951633930206,
1411
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1412
+ "rewards/len_reward_func": 0.4583333432674408,
1413
+ "step": 108
1414
+ },
1415
+ {
1416
+ "completion_length": 72.89583396911621,
1417
+ "epoch": 1.920353982300885,
1418
+ "grad_norm": 1.7762554154885124,
1419
+ "kl": 0.04364013671875,
1420
+ "learning_rate": 1.4934395573577013e-07,
1421
+ "loss": 0.0,
1422
+ "reward": 0.7892157137393951,
1423
+ "reward_std": 0.3116895519196987,
1424
+ "rewards/correct_code_reward_func": 0.3125000149011612,
1425
+ "rewards/len_reward_func": 0.47671568393707275,
1426
+ "step": 109
1427
+ },
1428
+ {
1429
+ "completion_length": 66.39583587646484,
1430
+ "epoch": 1.9380530973451329,
1431
+ "grad_norm": 1.3218901012489044,
1432
+ "kl": 0.02496337890625,
1433
+ "learning_rate": 1.4495233289624958e-07,
1434
+ "loss": 0.0,
1435
+ "reward": 0.7881554365158081,
1436
+ "reward_std": 0.3200981765985489,
1437
+ "rewards/correct_code_reward_func": 0.2916666716337204,
1438
+ "rewards/len_reward_func": 0.4964887797832489,
1439
+ "step": 110
1440
+ },
1441
+ {
1442
+ "completion_length": 58.291669845581055,
1443
+ "epoch": 1.9557522123893807,
1444
+ "grad_norm": 0.9195423141452592,
1445
+ "kl": 0.066162109375,
1446
+ "learning_rate": 1.4059973097673185e-07,
1447
+ "loss": 0.0001,
1448
+ "reward": 0.7083333432674408,
1449
+ "reward_std": 0.3382536321878433,
1450
+ "rewards/correct_code_reward_func": 0.2500000111758709,
1451
+ "rewards/len_reward_func": 0.4583333432674408,
1452
+ "step": 111
1453
+ },
1454
+ {
1455
+ "completion_length": 94.41667175292969,
1456
+ "epoch": 1.9734513274336283,
1457
+ "grad_norm": 89.52879883380525,
1458
+ "kl": 0.0601806640625,
1459
+ "learning_rate": 1.3628776679114514e-07,
1460
+ "loss": 0.0001,
1461
+ "reward": 0.7916666865348816,
1462
+ "reward_std": 0.31285394728183746,
1463
+ "rewards/correct_code_reward_func": 0.3125,
1464
+ "rewards/len_reward_func": 0.4791666716337204,
1465
+ "step": 112
1466
+ },
1467
+ {
1468
+ "completion_length": 100.27083587646484,
1469
+ "epoch": 1.991150442477876,
1470
+ "grad_norm": 2.390953296031483,
1471
+ "kl": 0.056396484375,
1472
+ "learning_rate": 1.320180420581587e-07,
1473
+ "loss": 0.0001,
1474
+ "reward": 0.6097994744777679,
1475
+ "reward_std": 0.18817735463380814,
1476
+ "rewards/correct_code_reward_func": 0.12500000558793545,
1477
+ "rewards/len_reward_func": 0.48479947447776794,
1478
+ "step": 113
1479
+ },
1480
+ {
1481
+ "completion_length": 63.875,
1482
+ "epoch": 2.0,
1483
+ "grad_norm": 3.601264581967262,
1484
+ "kl": 0.02001953125,
1485
+ "learning_rate": 1.2779214280620908e-07,
1486
+ "loss": 0.0,
1487
+ "reward": 0.6666666865348816,
1488
+ "reward_std": 0.17817416787147522,
1489
+ "rewards/correct_code_reward_func": 0.1666666716337204,
1490
+ "rewards/len_reward_func": 0.5,
1491
+ "step": 114
1492
+ },
1493
+ {
1494
+ "completion_length": 48.64583396911621,
1495
+ "epoch": 2.017699115044248,
1496
+ "grad_norm": 3.9192662388145028,
1497
+ "kl": 0.05908203125,
1498
+ "learning_rate": 1.2361163878435594e-07,
1499
+ "loss": 0.0001,
1500
+ "reward": 0.6176470816135406,
1501
+ "reward_std": 0.2277354598045349,
1502
+ "rewards/correct_code_reward_func": 0.12500000558793545,
1503
+ "rewards/len_reward_func": 0.49264705181121826,
1504
+ "step": 115
1505
+ },
1506
+ {
1507
+ "completion_length": 51.41666793823242,
1508
+ "epoch": 2.0353982300884956,
1509
+ "grad_norm": 2.2242727716589967,
1510
+ "kl": 0.04486083984375,
1511
+ "learning_rate": 1.1947808287918403e-07,
1512
+ "loss": 0.0,
1513
+ "reward": 0.7083333730697632,
1514
+ "reward_std": 0.2994871214032173,
1515
+ "rewards/correct_code_reward_func": 0.2083333395421505,
1516
+ "rewards/len_reward_func": 0.5,
1517
+ "step": 116
1518
+ },
1519
+ {
1520
+ "completion_length": 69.04166793823242,
1521
+ "epoch": 2.0530973451327434,
1522
+ "grad_norm": 5.407021499840827,
1523
+ "kl": 0.0469970703125,
1524
+ "learning_rate": 1.1539301053796949e-07,
1525
+ "loss": 0.0,
1526
+ "reward": 0.9791666865348816,
1527
+ "reward_std": 0.2946278229355812,
1528
+ "rewards/correct_code_reward_func": 0.4791666865348816,
1529
+ "rewards/len_reward_func": 0.5,
1530
+ "step": 117
1531
+ },
1532
+ {
1533
+ "completion_length": 47.83333396911621,
1534
+ "epoch": 2.0707964601769913,
1535
+ "grad_norm": 29.2301422488625,
1536
+ "kl": 0.08349609375,
1537
+ "learning_rate": 1.1135793919832334e-07,
1538
+ "loss": 0.0001,
1539
+ "reward": 0.9467871785163879,
1540
+ "reward_std": 0.43978360295295715,
1541
+ "rewards/correct_code_reward_func": 0.4791666716337204,
1542
+ "rewards/len_reward_func": 0.46762049198150635,
1543
+ "step": 118
1544
+ },
1545
+ {
1546
+ "completion_length": 90.41666793823242,
1547
+ "epoch": 2.088495575221239,
1548
+ "grad_norm": 1.804477992356325,
1549
+ "kl": 0.02587890625,
1550
+ "learning_rate": 1.0737436772452602e-07,
1551
+ "loss": 0.0,
1552
+ "reward": 0.7708333432674408,
1553
+ "reward_std": 0.3584126979112625,
1554
+ "rewards/correct_code_reward_func": 0.2708333358168602,
1555
+ "rewards/len_reward_func": 0.5,
1556
+ "step": 119
1557
+ },
1558
+ {
1559
+ "completion_length": 150.75000762939453,
1560
+ "epoch": 2.106194690265487,
1561
+ "grad_norm": 24.510631619520034,
1562
+ "kl": 0.0252685546875,
1563
+ "learning_rate": 1.0344377585075997e-07,
1564
+ "loss": 0.0,
1565
+ "reward": 0.684719979763031,
1566
+ "reward_std": 0.30992700159549713,
1567
+ "rewards/correct_code_reward_func": 0.1875000074505806,
1568
+ "rewards/len_reward_func": 0.4972199648618698,
1569
+ "step": 120
1570
+ },
1571
+ {
1572
+ "completion_length": 79.85416793823242,
1573
+ "epoch": 2.1238938053097347,
1574
+ "grad_norm": 2.667553033080654,
1575
+ "kl": 0.0318603515625,
1576
+ "learning_rate": 9.95676236314489e-08,
1577
+ "loss": 0.0,
1578
+ "reward": 0.8333333432674408,
1579
+ "reward_std": 0.31142252683639526,
1580
+ "rewards/correct_code_reward_func": 0.3333333432674408,
1581
+ "rewards/len_reward_func": 0.5,
1582
+ "step": 121
1583
+ },
1584
+ {
1585
+ "completion_length": 91.56250381469727,
1586
+ "epoch": 2.1415929203539825,
1587
+ "grad_norm": 100.44717449922435,
1588
+ "kl": 0.2076416015625,
1589
+ "learning_rate": 9.574735089890764e-08,
1590
+ "loss": 0.0002,
1591
+ "reward": 0.7708333432674408,
1592
+ "reward_std": 0.30231600999832153,
1593
+ "rewards/correct_code_reward_func": 0.2916666716337204,
1594
+ "rewards/len_reward_func": 0.4791666716337204,
1595
+ "step": 122
1596
+ },
1597
+ {
1598
+ "completion_length": 87.70833396911621,
1599
+ "epoch": 2.15929203539823,
1600
+ "grad_norm": 77.23064615733736,
1601
+ "kl": 0.22283935546875,
1602
+ "learning_rate": 9.198437672850248e-08,
1603
+ "loss": 0.0002,
1604
+ "reward": 0.770833358168602,
1605
+ "reward_std": 0.3205290399491787,
1606
+ "rewards/correct_code_reward_func": 0.2916666865348816,
1607
+ "rewards/len_reward_func": 0.4791666716337204,
1608
+ "step": 123
1609
+ },
1610
+ {
1611
+ "completion_length": 62.312503814697266,
1612
+ "epoch": 2.1769911504424777,
1613
+ "grad_norm": 17.734240204261667,
1614
+ "kl": 0.0535888671875,
1615
+ "learning_rate": 8.8280098911523e-08,
1616
+ "loss": 0.0001,
1617
+ "reward": 0.875,
1618
+ "reward_std": 0.2994871288537979,
1619
+ "rewards/correct_code_reward_func": 0.3958333432674408,
1620
+ "rewards/len_reward_func": 0.4791666716337204,
1621
+ "step": 124
1622
+ },
1623
+ {
1624
+ "completion_length": 65.41666793823242,
1625
+ "epoch": 2.1946902654867255,
1626
+ "grad_norm": 28.039058681720235,
1627
+ "kl": 0.0321044921875,
1628
+ "learning_rate": 8.463589343595976e-08,
1629
+ "loss": 0.0,
1630
+ "reward": 1.0833333730697632,
1631
+ "reward_std": 0.5049939155578613,
1632
+ "rewards/correct_code_reward_func": 0.5833333432674408,
1633
+ "rewards/len_reward_func": 0.5,
1634
+ "step": 125
1635
+ },
1636
+ {
1637
+ "completion_length": 98.64583396911621,
1638
+ "epoch": 2.2123893805309733,
1639
+ "grad_norm": 6.872794457305926,
1640
+ "kl": 0.0435791015625,
1641
+ "learning_rate": 8.105311397538084e-08,
1642
+ "loss": 0.0,
1643
+ "reward": 0.7077258378267288,
1644
+ "reward_std": 0.37921955436468124,
1645
+ "rewards/correct_code_reward_func": 0.25000000558793545,
1646
+ "rewards/len_reward_func": 0.4577258378267288,
1647
+ "step": 126
1648
+ },
1649
+ {
1650
+ "completion_length": 71.22916984558105,
1651
+ "epoch": 2.230088495575221,
1652
+ "grad_norm": 2.9985031046208857,
1653
+ "kl": 0.0357666015625,
1654
+ "learning_rate": 7.753309138609704e-08,
1655
+ "loss": 0.0,
1656
+ "reward": 0.7608647048473358,
1657
+ "reward_std": 0.35784420371055603,
1658
+ "rewards/correct_code_reward_func": 0.2708333358168602,
1659
+ "rewards/len_reward_func": 0.490031361579895,
1660
+ "step": 127
1661
+ },
1662
+ {
1663
+ "completion_length": 72.87500190734863,
1664
+ "epoch": 2.247787610619469,
1665
+ "grad_norm": 1.1881439057403123,
1666
+ "kl": 0.03509521484375,
1667
+ "learning_rate": 7.407713321280375e-08,
1668
+ "loss": 0.0,
1669
+ "reward": 0.854166716337204,
1670
+ "reward_std": 0.30231600999832153,
1671
+ "rewards/correct_code_reward_func": 0.354166679084301,
1672
+ "rewards/len_reward_func": 0.5,
1673
+ "step": 128
1674
+ },
1675
+ {
1676
+ "completion_length": 95.39583587646484,
1677
+ "epoch": 2.265486725663717,
1678
+ "grad_norm": 2.071026140631432,
1679
+ "kl": 0.0494384765625,
1680
+ "learning_rate": 7.06865232028808e-08,
1681
+ "loss": 0.0,
1682
+ "reward": 0.8162688612937927,
1683
+ "reward_std": 0.29165610671043396,
1684
+ "rewards/correct_code_reward_func": 0.3541666716337204,
1685
+ "rewards/len_reward_func": 0.46210217475891113,
1686
+ "step": 129
1687
+ },
1688
+ {
1689
+ "completion_length": 78.43750381469727,
1690
+ "epoch": 2.2831858407079646,
1691
+ "grad_norm": 0.7368071897691737,
1692
+ "kl": 0.027099609375,
1693
+ "learning_rate": 6.736252082953306e-08,
1694
+ "loss": 0.0,
1695
+ "reward": 0.8333333432674408,
1696
+ "reward_std": 0.2903675436973572,
1697
+ "rewards/correct_code_reward_func": 0.3333333358168602,
1698
+ "rewards/len_reward_func": 0.5,
1699
+ "step": 130
1700
+ },
1701
+ {
1702
+ "completion_length": 73.45833587646484,
1703
+ "epoch": 2.3008849557522124,
1704
+ "grad_norm": 2.1125816683606726,
1705
+ "kl": 0.0389404296875,
1706
+ "learning_rate": 6.410636082394771e-08,
1707
+ "loss": 0.0,
1708
+ "reward": 0.5384955406188965,
1709
+ "reward_std": 0.272975854575634,
1710
+ "rewards/correct_code_reward_func": 0.0833333358168602,
1711
+ "rewards/len_reward_func": 0.4551621973514557,
1712
+ "step": 131
1713
+ },
1714
+ {
1715
+ "completion_length": 101.02083587646484,
1716
+ "epoch": 2.3185840707964602,
1717
+ "grad_norm": 3.1749370092384415,
1718
+ "kl": 0.0380859375,
1719
+ "learning_rate": 6.091925271664156e-08,
1720
+ "loss": 0.0,
1721
+ "reward": 0.6666666716337204,
1722
+ "reward_std": 0.2903675250709057,
1723
+ "rewards/correct_code_reward_func": 0.1875,
1724
+ "rewards/len_reward_func": 0.4791666716337204,
1725
+ "step": 132
1726
+ },
1727
+ {
1728
+ "completion_length": 64.22916793823242,
1729
+ "epoch": 2.336283185840708,
1730
+ "grad_norm": 2.773648686745547,
1731
+ "kl": 0.11944580078125,
1732
+ "learning_rate": 5.7802380388170344e-08,
1733
+ "loss": 0.0001,
1734
+ "reward": 0.8125,
1735
+ "reward_std": 0.37034809589385986,
1736
+ "rewards/correct_code_reward_func": 0.3333333358168602,
1737
+ "rewards/len_reward_func": 0.4791666716337204,
1738
+ "step": 133
1739
+ },
1740
+ {
1741
+ "completion_length": 56.64583396911621,
1742
+ "epoch": 2.353982300884956,
1743
+ "grad_norm": 3.7650155223115793,
1744
+ "kl": 0.0516357421875,
1745
+ "learning_rate": 5.4756901629364886e-08,
1746
+ "loss": 0.0001,
1747
+ "reward": 1.129325270652771,
1748
+ "reward_std": 0.32795289158821106,
1749
+ "rewards/correct_code_reward_func": 0.6458333432674408,
1750
+ "rewards/len_reward_func": 0.4834919422864914,
1751
+ "step": 134
1752
+ },
1753
+ {
1754
+ "completion_length": 70.1875,
1755
+ "epoch": 2.3716814159292037,
1756
+ "grad_norm": 1.0135949886271995,
1757
+ "kl": 0.042236328125,
1758
+ "learning_rate": 5.178394771125968e-08,
1759
+ "loss": 0.0,
1760
+ "reward": 0.7218915224075317,
1761
+ "reward_std": 0.2874666675925255,
1762
+ "rewards/correct_code_reward_func": 0.2291666716337204,
1763
+ "rewards/len_reward_func": 0.49272486567497253,
1764
+ "step": 135
1765
+ },
1766
+ {
1767
+ "completion_length": 132.7916717529297,
1768
+ "epoch": 2.3893805309734515,
1769
+ "grad_norm": 1.3477438258822867,
1770
+ "kl": 0.03106689453125,
1771
+ "learning_rate": 4.888462296487128e-08,
1772
+ "loss": 0.0,
1773
+ "reward": 0.9166666865348816,
1774
+ "reward_std": 0.42218445241451263,
1775
+ "rewards/correct_code_reward_func": 0.4166666865348816,
1776
+ "rewards/len_reward_func": 0.5,
1777
+ "step": 136
1778
+ },
1779
+ {
1780
+ "completion_length": 60.22916793823242,
1781
+ "epoch": 2.4070796460176993,
1782
+ "grad_norm": 1.2650991231847002,
1783
+ "kl": 0.058837890625,
1784
+ "learning_rate": 4.606000437098476e-08,
1785
+ "loss": 0.0001,
1786
+ "reward": 0.8958333730697632,
1787
+ "reward_std": 0.2041093409061432,
1788
+ "rewards/correct_code_reward_func": 0.4166666716337204,
1789
+ "rewards/len_reward_func": 0.4791666716337204,
1790
+ "step": 137
1791
+ },
1792
+ {
1793
+ "completion_length": 64.6875,
1794
+ "epoch": 2.4247787610619467,
1795
+ "grad_norm": 2.901815806581681,
1796
+ "kl": 0.03662109375,
1797
+ "learning_rate": 4.3311141160099377e-08,
1798
+ "loss": 0.0,
1799
+ "reward": 0.9150572419166565,
1800
+ "reward_std": 0.3222523480653763,
1801
+ "rewards/correct_code_reward_func": 0.4166666716337204,
1802
+ "rewards/len_reward_func": 0.4983905553817749,
1803
+ "step": 138
1804
+ },
1805
+ {
1806
+ "completion_length": 92.79166984558105,
1807
+ "epoch": 2.442477876106195,
1808
+ "grad_norm": 0.6516445128630077,
1809
+ "kl": 0.03466796875,
1810
+ "learning_rate": 4.063905442268201e-08,
1811
+ "loss": 0.0,
1812
+ "reward": 0.6041666865348816,
1813
+ "reward_std": 0.16340987384319305,
1814
+ "rewards/correct_code_reward_func": 0.1041666679084301,
1815
+ "rewards/len_reward_func": 0.5,
1816
+ "step": 139
1817
+ },
1818
+ {
1819
+ "completion_length": 67.60416793823242,
1820
+ "epoch": 2.4601769911504423,
1821
+ "grad_norm": 1.8554639671156326,
1822
+ "kl": 0.0423583984375,
1823
+ "learning_rate": 3.8044736729873795e-08,
1824
+ "loss": 0.0,
1825
+ "reward": 0.9469756484031677,
1826
+ "reward_std": 0.43488872051239014,
1827
+ "rewards/correct_code_reward_func": 0.479166679084301,
1828
+ "rewards/len_reward_func": 0.46780893206596375,
1829
+ "step": 140
1830
+ },
1831
+ {
1832
+ "completion_length": 77.45833396911621,
1833
+ "epoch": 2.47787610619469,
1834
+ "grad_norm": 3.8300963552021754,
1835
+ "kl": 0.04541015625,
1836
+ "learning_rate": 3.552915176479071e-08,
1837
+ "loss": 0.0,
1838
+ "reward": 1.0416666865348816,
1839
+ "reward_std": 0.320542111992836,
1840
+ "rewards/correct_code_reward_func": 0.5416666716337204,
1841
+ "rewards/len_reward_func": 0.5,
1842
+ "step": 141
1843
+ },
1844
+ {
1845
+ "completion_length": 58.708335876464844,
1846
+ "epoch": 2.495575221238938,
1847
+ "grad_norm": 1.6137020616307847,
1848
+ "kl": 0.06591796875,
1849
+ "learning_rate": 3.309323396455446e-08,
1850
+ "loss": 0.0001,
1851
+ "reward": 0.9375000596046448,
1852
+ "reward_std": 0.2658637687563896,
1853
+ "rewards/correct_code_reward_func": 0.4583333432674408,
1854
+ "rewards/len_reward_func": 0.4791666716337204,
1855
+ "step": 142
1856
+ },
1857
+ {
1858
+ "completion_length": 86.12500381469727,
1859
+ "epoch": 2.5132743362831858,
1860
+ "grad_norm": 3.390075353900997,
1861
+ "kl": 0.0819091796875,
1862
+ "learning_rate": 3.073788817318707e-08,
1863
+ "loss": 0.0001,
1864
+ "reward": 0.8744058609008789,
1865
+ "reward_std": 0.429522842168808,
1866
+ "rewards/correct_code_reward_func": 0.3750000149011612,
1867
+ "rewards/len_reward_func": 0.4994058310985565,
1868
+ "step": 143
1869
+ },
1870
+ {
1871
+ "completion_length": 76.89583587646484,
1872
+ "epoch": 2.5309734513274336,
1873
+ "grad_norm": 17.278809046793295,
1874
+ "kl": 0.2576904296875,
1875
+ "learning_rate": 2.846398930549859e-08,
1876
+ "loss": 0.0003,
1877
+ "reward": 0.7083333432674408,
1878
+ "reward_std": 0.1178511306643486,
1879
+ "rewards/correct_code_reward_func": 0.20833333395421505,
1880
+ "rewards/len_reward_func": 0.5,
1881
+ "step": 144
1882
+ },
1883
+ {
1884
+ "completion_length": 86.625,
1885
+ "epoch": 2.5486725663716814,
1886
+ "grad_norm": 4.630135346925831,
1887
+ "kl": 0.0435791015625,
1888
+ "learning_rate": 2.62723820220917e-08,
1889
+ "loss": 0.0,
1890
+ "reward": 0.7428782880306244,
1891
+ "reward_std": 0.3244539424777031,
1892
+ "rewards/correct_code_reward_func": 0.2708333432674408,
1893
+ "rewards/len_reward_func": 0.4720449447631836,
1894
+ "step": 145
1895
+ },
1896
+ {
1897
+ "completion_length": 60.54166793823242,
1898
+ "epoch": 2.566371681415929,
1899
+ "grad_norm": 1.7403036344539455,
1900
+ "kl": 0.067626953125,
1901
+ "learning_rate": 2.416388041560491e-08,
1902
+ "loss": 0.0001,
1903
+ "reward": 1.0208333730697632,
1904
+ "reward_std": 0.24056155234575272,
1905
+ "rewards/correct_code_reward_func": 0.520833358168602,
1906
+ "rewards/len_reward_func": 0.5,
1907
+ "step": 146
1908
+ },
1909
+ {
1910
+ "completion_length": 72.68750190734863,
1911
+ "epoch": 2.584070796460177,
1912
+ "grad_norm": 8.835725978147865,
1913
+ "kl": 0.03240966796875,
1914
+ "learning_rate": 2.2139267708310454e-08,
1915
+ "loss": 0.0,
1916
+ "reward": 1.0000000596046448,
1917
+ "reward_std": 0.42218445241451263,
1918
+ "rewards/correct_code_reward_func": 0.5000000149011612,
1919
+ "rewards/len_reward_func": 0.5,
1920
+ "step": 147
1921
+ },
1922
+ {
1923
+ "completion_length": 65.31250381469727,
1924
+ "epoch": 2.601769911504425,
1925
+ "grad_norm": 21.996556093676393,
1926
+ "kl": 0.144287109375,
1927
+ "learning_rate": 2.019929596117889e-08,
1928
+ "loss": 0.0001,
1929
+ "reward": 0.9583333730697632,
1930
+ "reward_std": 0.4446708858013153,
1931
+ "rewards/correct_code_reward_func": 0.4583333432674408,
1932
+ "rewards/len_reward_func": 0.5,
1933
+ "step": 148
1934
+ },
1935
+ {
1936
+ "completion_length": 54.270835876464844,
1937
+ "epoch": 2.6194690265486726,
1938
+ "grad_norm": 4.1936490058839695,
1939
+ "kl": 0.099609375,
1940
+ "learning_rate": 1.8344685794519504e-08,
1941
+ "loss": 0.0001,
1942
+ "reward": 1.036210298538208,
1943
+ "reward_std": 0.37449972331523895,
1944
+ "rewards/correct_code_reward_func": 0.5416666716337204,
1945
+ "rewards/len_reward_func": 0.49454365670681,
1946
+ "step": 149
1947
+ },
1948
+ {
1949
+ "completion_length": 99.29166793823242,
1950
+ "epoch": 2.6371681415929205,
1951
+ "grad_norm": 63.75890524377285,
1952
+ "kl": 0.2841796875,
1953
+ "learning_rate": 1.6576126120299043e-08,
1954
+ "loss": 0.0003,
1955
+ "reward": 0.8559730350971222,
1956
+ "reward_std": 0.3255860209465027,
1957
+ "rewards/correct_code_reward_func": 0.3750000149011612,
1958
+ "rewards/len_reward_func": 0.4809730350971222,
1959
+ "step": 150
1960
+ },
1961
+ {
1962
+ "completion_length": 107.31250381469727,
1963
+ "epoch": 2.6548672566371683,
1964
+ "grad_norm": 36.89829026803691,
1965
+ "kl": 0.7239990234375,
1966
+ "learning_rate": 1.4894273886239206e-08,
1967
+ "loss": 0.0007,
1968
+ "reward": 0.6250000298023224,
1969
+ "reward_std": 0.3382536321878433,
1970
+ "rewards/correct_code_reward_func": 0.1666666716337204,
1971
+ "rewards/len_reward_func": 0.4583333432674408,
1972
+ "step": 151
1973
+ },
1974
+ {
1975
+ "completion_length": 64.35416793823242,
1976
+ "epoch": 2.672566371681416,
1977
+ "grad_norm": 2.096221752318607,
1978
+ "kl": 0.052490234375,
1979
+ "learning_rate": 1.3299753831787192e-08,
1980
+ "loss": 0.0001,
1981
+ "reward": 0.75,
1982
+ "reward_std": 0.38857419788837433,
1983
+ "rewards/correct_code_reward_func": 0.25,
1984
+ "rewards/len_reward_func": 0.5,
1985
+ "step": 152
1986
+ },
1987
+ {
1988
+ "completion_length": 96.64583587646484,
1989
+ "epoch": 2.6902654867256635,
1990
+ "grad_norm": 0.6573939843127445,
1991
+ "kl": 0.032470703125,
1992
+ "learning_rate": 1.1793158256050706e-08,
1993
+ "loss": 0.0,
1994
+ "reward": 0.7916666865348816,
1995
+ "reward_std": 0.3521219789981842,
1996
+ "rewards/correct_code_reward_func": 0.3125000074505806,
1997
+ "rewards/len_reward_func": 0.4791666716337204,
1998
+ "step": 153
1999
+ },
2000
+ {
2001
+ "completion_length": 57.45833396911621,
2002
+ "epoch": 2.7079646017699117,
2003
+ "grad_norm": 2.8347099035822936,
2004
+ "kl": 0.051513671875,
2005
+ "learning_rate": 1.0375046797782867e-08,
2006
+ "loss": 0.0001,
2007
+ "reward": 0.8958333730697632,
2008
+ "reward_std": 0.204109326004982,
2009
+ "rewards/correct_code_reward_func": 0.395833358168602,
2010
+ "rewards/len_reward_func": 0.5,
2011
+ "step": 154
2012
+ },
2013
+ {
2014
+ "completion_length": 97.02083587646484,
2015
+ "epoch": 2.725663716814159,
2016
+ "grad_norm": 89.9892277373503,
2017
+ "kl": 0.531982421875,
2018
+ "learning_rate": 9.045946227499297e-09,
2019
+ "loss": 0.0005,
2020
+ "reward": 0.9904971122741699,
2021
+ "reward_std": 0.4233323186635971,
2022
+ "rewards/correct_code_reward_func": 0.5208333432674408,
2023
+ "rewards/len_reward_func": 0.46966375410556793,
2024
+ "step": 155
2025
+ },
2026
+ {
2027
+ "completion_length": 75.37500381469727,
2028
+ "epoch": 2.7433628318584073,
2029
+ "grad_norm": 41.09603274193404,
2030
+ "kl": 0.0634765625,
2031
+ "learning_rate": 7.806350251804483e-09,
2032
+ "loss": 0.0001,
2033
+ "reward": 0.8541666865348816,
2034
+ "reward_std": 0.3492931127548218,
2035
+ "rewards/correct_code_reward_func": 0.3541666716337204,
2036
+ "rewards/len_reward_func": 0.5,
2037
+ "step": 156
2038
+ },
2039
+ {
2040
+ "completion_length": 92.85417175292969,
2041
+ "epoch": 2.7610619469026547,
2042
+ "grad_norm": 1.148928120100101,
2043
+ "kl": 0.0374755859375,
2044
+ "learning_rate": 6.6567193299996996e-09,
2045
+ "loss": 0.0,
2046
+ "reward": 0.7610294222831726,
2047
+ "reward_std": 0.485231876373291,
2048
+ "rewards/correct_code_reward_func": 0.3125,
2049
+ "rewards/len_reward_func": 0.4485294073820114,
2050
+ "step": 157
2051
+ },
2052
+ {
2053
+ "completion_length": 98.41667175292969,
2054
+ "epoch": 2.7787610619469025,
2055
+ "grad_norm": 0.2382852510613484,
2056
+ "kl": 0.02642822265625,
2057
+ "learning_rate": 5.597480503041485e-09,
2058
+ "loss": 0.0,
2059
+ "reward": 0.5000000149011612,
2060
+ "reward_std": 0.1178511306643486,
2061
+ "rewards/correct_code_reward_func": 0.0416666679084301,
2062
+ "rewards/len_reward_func": 0.4583333432674408,
2063
+ "step": 158
2064
+ },
2065
+ {
2066
+ "completion_length": 77.20833587646484,
2067
+ "epoch": 2.7964601769911503,
2068
+ "grad_norm": 6.517708531460766,
2069
+ "kl": 0.0968017578125,
2070
+ "learning_rate": 4.629027234912986e-09,
2071
+ "loss": 0.0001,
2072
+ "reward": 0.9473089575767517,
2073
+ "reward_std": 0.46372126042842865,
2074
+ "rewards/correct_code_reward_func": 0.5208333432674408,
2075
+ "rewards/len_reward_func": 0.4264756143093109,
2076
+ "step": 159
2077
+ },
2078
+ {
2079
+ "completion_length": 80.27083396911621,
2080
+ "epoch": 2.814159292035398,
2081
+ "grad_norm": 3.3787178462046454,
2082
+ "kl": 0.0364990234375,
2083
+ "learning_rate": 3.751719266468584e-09,
2084
+ "loss": 0.0,
2085
+ "reward": 0.8541666865348816,
2086
+ "reward_std": 0.13607724383473396,
2087
+ "rewards/correct_code_reward_func": 0.3541666679084301,
2088
+ "rewards/len_reward_func": 0.5,
2089
+ "step": 160
2090
+ },
2091
+ {
2092
+ "completion_length": 77.83333587646484,
2093
+ "epoch": 2.831858407079646,
2094
+ "grad_norm": 1.0432210255226626,
2095
+ "kl": 0.04864501953125,
2096
+ "learning_rate": 2.9658824818044325e-09,
2097
+ "loss": 0.0,
2098
+ "reward": 0.5659293830394745,
2099
+ "reward_std": 0.21801644191145897,
2100
+ "rewards/correct_code_reward_func": 0.08333333395421505,
2101
+ "rewards/len_reward_func": 0.4825960248708725,
2102
+ "step": 161
2103
+ },
2104
+ {
2105
+ "completion_length": 68.77083587646484,
2106
+ "epoch": 2.849557522123894,
2107
+ "grad_norm": 4.445236186593027,
2108
+ "kl": 0.061279296875,
2109
+ "learning_rate": 2.271808787206092e-09,
2110
+ "loss": 0.0001,
2111
+ "reward": 1.0243507027626038,
2112
+ "reward_std": 0.4195573776960373,
2113
+ "rewards/correct_code_reward_func": 0.6041666865348816,
2114
+ "rewards/len_reward_func": 0.4201839864253998,
2115
+ "step": 162
2116
+ },
2117
+ {
2118
+ "completion_length": 86.83333587646484,
2119
+ "epoch": 2.8672566371681416,
2120
+ "grad_norm": 4.1412440697628705,
2121
+ "kl": 0.5286865234375,
2122
+ "learning_rate": 1.6697560027171543e-09,
2123
+ "loss": 0.0005,
2124
+ "reward": 0.6875000149011612,
2125
+ "reward_std": 0.23144195601344109,
2126
+ "rewards/correct_code_reward_func": 0.2083333432674408,
2127
+ "rewards/len_reward_func": 0.4791666716337204,
2128
+ "step": 163
2129
+ },
2130
+ {
2131
+ "completion_length": 72.6875,
2132
+ "epoch": 2.8849557522123894,
2133
+ "grad_norm": 7.176170560573175,
2134
+ "kl": 0.04962158203125,
2135
+ "learning_rate": 1.1599477663696843e-09,
2136
+ "loss": 0.0,
2137
+ "reward": 0.7519232630729675,
2138
+ "reward_std": 0.26646677777171135,
2139
+ "rewards/correct_code_reward_func": 0.27083333395421505,
2140
+ "rewards/len_reward_func": 0.4810899347066879,
2141
+ "step": 164
2142
+ },
2143
+ {
2144
+ "completion_length": 87.64583587646484,
2145
+ "epoch": 2.9026548672566372,
2146
+ "grad_norm": 1.9523625673288243,
2147
+ "kl": 0.05029296875,
2148
+ "learning_rate": 7.425734511116998e-10,
2149
+ "loss": 0.0001,
2150
+ "reward": 0.6250000298023224,
2151
+ "reward_std": 0.22233545035123825,
2152
+ "rewards/correct_code_reward_func": 0.1250000037252903,
2153
+ "rewards/len_reward_func": 0.5,
2154
+ "step": 165
2155
+ },
2156
+ {
2157
+ "completion_length": 79.41666793823242,
2158
+ "epoch": 2.920353982300885,
2159
+ "grad_norm": 29.99743474150218,
2160
+ "kl": 0.346923828125,
2161
+ "learning_rate": 4.17788094463023e-10,
2162
+ "loss": 0.0003,
2163
+ "reward": 0.5346376597881317,
2164
+ "reward_std": 0.21488384157419205,
2165
+ "rewards/correct_code_reward_func": 0.06250000186264515,
2166
+ "rewards/len_reward_func": 0.4721376597881317,
2167
+ "step": 166
2168
+ },
2169
+ {
2170
+ "completion_length": 71.14583587646484,
2171
+ "epoch": 2.938053097345133,
2172
+ "grad_norm": 2.049279192654788,
2173
+ "kl": 0.037353515625,
2174
+ "learning_rate": 1.857123409250705e-10,
2175
+ "loss": 0.0,
2176
+ "reward": 0.8958333730697632,
2177
+ "reward_std": 0.24056155234575272,
2178
+ "rewards/correct_code_reward_func": 0.3958333432674408,
2179
+ "rewards/len_reward_func": 0.5,
2180
+ "step": 167
2181
+ },
2182
+ {
2183
+ "completion_length": 57.000003814697266,
2184
+ "epoch": 2.9557522123893807,
2185
+ "grad_norm": 11.198599869127536,
2186
+ "kl": 0.0416259765625,
2187
+ "learning_rate": 4.6432397166285e-11,
2188
+ "loss": 0.0,
2189
+ "reward": 0.9583333730697632,
2190
+ "reward_std": 0.29602527618408203,
2191
+ "rewards/correct_code_reward_func": 0.4791666716337204,
2192
+ "rewards/len_reward_func": 0.4791666716337204,
2193
+ "step": 168
2194
+ },
2195
+ {
2196
+ "completion_length": 50.625,
2197
+ "epoch": 2.9734513274336285,
2198
+ "grad_norm": 26.983307381671835,
2199
+ "kl": 0.271484375,
2200
+ "learning_rate": 0.0,
2201
+ "loss": 0.0003,
2202
+ "reward": 0.9375,
2203
+ "reward_std": 0.39486490190029144,
2204
+ "rewards/correct_code_reward_func": 0.4375,
2205
+ "rewards/len_reward_func": 0.5,
2206
+ "step": 169
2207
+ },
2208
+ {
2209
+ "epoch": 2.9734513274336285,
2210
+ "step": 169,
2211
+ "total_flos": 0.0,
2212
+ "train_loss": 6.70051409522318e-05,
2213
+ "train_runtime": 7347.2678,
2214
+ "train_samples_per_second": 0.138,
2215
+ "train_steps_per_second": 0.023
2216
+ }
2217
+ ],
2218
+ "logging_steps": 1,
2219
+ "max_steps": 169,
2220
+ "num_input_tokens_seen": 0,
2221
+ "num_train_epochs": 4,
2222
+ "save_steps": 50,
2223
+ "stateful_callbacks": {
2224
+ "TrainerControl": {
2225
+ "args": {
2226
+ "should_epoch_stop": false,
2227
+ "should_evaluate": false,
2228
+ "should_log": false,
2229
+ "should_save": true,
2230
+ "should_training_stop": true
2231
+ },
2232
+ "attributes": {}
2233
+ }
2234
+ },
2235
+ "total_flos": 0.0,
2236
+ "train_batch_size": 1,
2237
+ "trial_name": null,
2238
+ "trial_params": null
2239
+ }