weijie210 commited on
Commit
a49977d
·
verified ·
1 Parent(s): 1b41a7e

Model save

Browse files
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: weijie210/mistral_gsm8k_sft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: mistral_gsm8k_dpo_0
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # mistral_gsm8k_dpo_0
17
+
18
+ This model is a fine-tuned version of [weijie210/mistral_gsm8k_sft](https://huggingface.co/weijie210/mistral_gsm8k_sft) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.0108
21
+ - Rewards/chosen: -1.6999
22
+ - Rewards/rejected: -22.5187
23
+ - Rewards/accuracies: 0.9957
24
+ - Rewards/margins: 20.8188
25
+ - Logps/rejected: -238.0359
26
+ - Logps/chosen: -48.7549
27
+ - Logits/rejected: -2.6146
28
+ - Logits/chosen: -2.6687
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-07
48
+ - train_batch_size: 8
49
+ - eval_batch_size: 8
50
+ - seed: 42
51
+ - distributed_type: multi-GPU
52
+ - num_devices: 4
53
+ - total_train_batch_size: 32
54
+ - total_eval_batch_size: 32
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: linear
57
+ - lr_scheduler_warmup_ratio: 0.1
58
+ - num_epochs: 2
59
+
60
+ ### Training results
61
+
62
+
63
+
64
+ ### Framework versions
65
+
66
+ - Transformers 4.36.1
67
+ - Pytorch 2.0.1+cu117
68
+ - Datasets 2.16.1
69
+ - Tokenizers 0.15.0
all_results.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": -2.668743848800659,
4
+ "eval_logits/rejected": -2.614588499069214,
5
+ "eval_logps/chosen": -48.75492858886719,
6
+ "eval_logps/rejected": -238.03585815429688,
7
+ "eval_loss": 0.010777823626995087,
8
+ "eval_rewards/accuracies": 0.9956896305084229,
9
+ "eval_rewards/chosen": -1.6998504400253296,
10
+ "eval_rewards/margins": 20.81882667541504,
11
+ "eval_rewards/rejected": -22.5186767578125,
12
+ "eval_runtime": 231.3578,
13
+ "eval_samples": 905,
14
+ "eval_samples_per_second": 3.912,
15
+ "eval_steps_per_second": 0.125,
16
+ "train_loss": 0.06552632141962543,
17
+ "train_runtime": 3687.7187,
18
+ "train_samples": 4570,
19
+ "train_samples_per_second": 2.478,
20
+ "train_steps_per_second": 0.078
21
+ }
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "weijie210/mistral_gsm8k_sft",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 10000.0,
20
+ "sliding_window": 4096,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.36.1",
24
+ "use_cache": false,
25
+ "vocab_size": 32000
26
+ }
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_logits/chosen": -2.668743848800659,
4
+ "eval_logits/rejected": -2.614588499069214,
5
+ "eval_logps/chosen": -48.75492858886719,
6
+ "eval_logps/rejected": -238.03585815429688,
7
+ "eval_loss": 0.010777823626995087,
8
+ "eval_rewards/accuracies": 0.9956896305084229,
9
+ "eval_rewards/chosen": -1.6998504400253296,
10
+ "eval_rewards/margins": 20.81882667541504,
11
+ "eval_rewards/rejected": -22.5186767578125,
12
+ "eval_runtime": 231.3578,
13
+ "eval_samples": 905,
14
+ "eval_samples_per_second": 3.912,
15
+ "eval_steps_per_second": 0.125
16
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.1"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1fc4ae69f0657ea215db6e5e9e12ac0bc3c89237fe4ad55907e57c6d333781d
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cca397186b414340bf11e0e97b31da81a39d7360906409f0ecdd05568167285
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a8172879d63ccfe57f48341c65c1644ab15cd1220f2ab8396334cd0262cdbda
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
runs/Feb09_01-36-34_node01/events.out.tfevents.1707414076.node01.399158.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee60cddfb1ac1820c072d6bdc496cbcc48b7f81d92b6a922dc7acc5f40fc90a9
3
+ size 17330
runs/Feb09_01-36-34_node01/events.out.tfevents.1707416827.node01.399158.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97d1be9fb48069a55da6b4117006b2648f68d4c77c66b5aeb09fbff2ec281c8b
3
+ size 828
runs/Feb09_09-39-43_node01/events.out.tfevents.1707442937.node01.450204.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1620b5311503f983bb4e73f4079a205b24f0325e119d8b9a509153e2f65eac4d
3
+ size 6308
runs/Feb09_10-55-15_node01/events.out.tfevents.1707447447.node01.464573.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ce1ffabb1c82c213970d9e6a0a2e9aaa20480ecc433ef92e0808bda1867d905
3
+ size 15708
runs/Feb10_10-59-01_node01/events.out.tfevents.1707534108.node01.620006.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:713ebec4324a203229690878836683a6027e05826aff9b595ff0eb4ec022b143
3
+ size 23036
runs/Feb10_10-59-01_node01/events.out.tfevents.1707537988.node01.620006.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16467635a65d859117f4fceedccc16d0fb4c25d977e82fe3f8bdfada4c23f12b
3
+ size 828
runs/Feb12_11-33-36_node01/events.out.tfevents.1707709012.node01.922658.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5856e57efa9759fa827209b1d10dcca1e9e25a635172d1558511cbef92aef294
3
+ size 23036
runs/Feb12_11-33-36_node01/events.out.tfevents.1707712931.node01.922658.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67ae926f1a1dbe008d1a4a01f20c64f38d6aa2489ce5c8fbe34a2807e2e6b0c3
3
+ size 828
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": true,
36
+ "max_length": 2048,
37
+ "model_max_length": 4096,
38
+ "pad_token": "</s>",
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "stride": 0,
42
+ "tokenizer_class": "LlamaTokenizer",
43
+ "truncation_side": "left",
44
+ "truncation_strategy": "longest_first",
45
+ "unk_token": "<unk>",
46
+ "use_default_system_prompt": false
47
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "train_loss": 0.06552632141962543,
4
+ "train_runtime": 3687.7187,
5
+ "train_samples": 4570,
6
+ "train_samples_per_second": 2.478,
7
+ "train_steps_per_second": 0.078
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 286,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "learning_rate": 1.7241379310344825e-08,
14
+ "logits/chosen": -2.46767258644104,
15
+ "logits/rejected": -2.4595911502838135,
16
+ "logps/chosen": -22.810455322265625,
17
+ "logps/rejected": -22.677112579345703,
18
+ "loss": 0.6931,
19
+ "rewards/accuracies": 0.0,
20
+ "rewards/chosen": 0.0,
21
+ "rewards/margins": 0.0,
22
+ "rewards/rejected": 0.0,
23
+ "step": 1
24
+ },
25
+ {
26
+ "epoch": 0.07,
27
+ "learning_rate": 1.7241379310344828e-07,
28
+ "logits/chosen": -2.815382719039917,
29
+ "logits/rejected": -2.7576191425323486,
30
+ "logps/chosen": -19.62689971923828,
31
+ "logps/rejected": -11.512568473815918,
32
+ "loss": 0.6817,
33
+ "rewards/accuracies": 0.6944444179534912,
34
+ "rewards/chosen": -0.002836173167452216,
35
+ "rewards/margins": 0.020304910838603973,
36
+ "rewards/rejected": -0.023141082376241684,
37
+ "step": 10
38
+ },
39
+ {
40
+ "epoch": 0.14,
41
+ "learning_rate": 3.4482758620689656e-07,
42
+ "logits/chosen": -2.760253429412842,
43
+ "logits/rejected": -2.776731014251709,
44
+ "logps/chosen": -18.500263214111328,
45
+ "logps/rejected": -24.879409790039062,
46
+ "loss": 0.3663,
47
+ "rewards/accuracies": 0.925000011920929,
48
+ "rewards/chosen": -0.05979099124670029,
49
+ "rewards/margins": 1.2453944683074951,
50
+ "rewards/rejected": -1.3051855564117432,
51
+ "step": 20
52
+ },
53
+ {
54
+ "epoch": 0.21,
55
+ "learning_rate": 4.980544747081712e-07,
56
+ "logits/chosen": -2.8509573936462402,
57
+ "logits/rejected": -2.8971199989318848,
58
+ "logps/chosen": -20.137048721313477,
59
+ "logps/rejected": -61.92778778076172,
60
+ "loss": 0.1236,
61
+ "rewards/accuracies": 0.9750000238418579,
62
+ "rewards/chosen": -0.16065621376037598,
63
+ "rewards/margins": 4.86615514755249,
64
+ "rewards/rejected": -5.026811599731445,
65
+ "step": 30
66
+ },
67
+ {
68
+ "epoch": 0.28,
69
+ "learning_rate": 4.785992217898833e-07,
70
+ "logits/chosen": -2.792508602142334,
71
+ "logits/rejected": -2.8558077812194824,
72
+ "logps/chosen": -23.098939895629883,
73
+ "logps/rejected": -88.87196350097656,
74
+ "loss": 0.0921,
75
+ "rewards/accuracies": 0.987500011920929,
76
+ "rewards/chosen": -0.44465622305870056,
77
+ "rewards/margins": 7.166872978210449,
78
+ "rewards/rejected": -7.611530303955078,
79
+ "step": 40
80
+ },
81
+ {
82
+ "epoch": 0.35,
83
+ "learning_rate": 4.591439688715953e-07,
84
+ "logits/chosen": -2.8872122764587402,
85
+ "logits/rejected": -2.8914027214050293,
86
+ "logps/chosen": -21.817670822143555,
87
+ "logps/rejected": -82.24397277832031,
88
+ "loss": 0.0947,
89
+ "rewards/accuracies": 0.9750000238418579,
90
+ "rewards/chosen": -0.6026766896247864,
91
+ "rewards/margins": 6.256636619567871,
92
+ "rewards/rejected": -6.85931396484375,
93
+ "step": 50
94
+ },
95
+ {
96
+ "epoch": 0.42,
97
+ "learning_rate": 4.3968871595330735e-07,
98
+ "logits/chosen": -2.8102235794067383,
99
+ "logits/rejected": -2.867000102996826,
100
+ "logps/chosen": -22.098957061767578,
101
+ "logps/rejected": -106.98995208740234,
102
+ "loss": 0.0684,
103
+ "rewards/accuracies": 1.0,
104
+ "rewards/chosen": -0.6044292449951172,
105
+ "rewards/margins": 8.731573104858398,
106
+ "rewards/rejected": -9.336004257202148,
107
+ "step": 60
108
+ },
109
+ {
110
+ "epoch": 0.49,
111
+ "learning_rate": 4.202334630350194e-07,
112
+ "logits/chosen": -2.838426113128662,
113
+ "logits/rejected": -2.8441996574401855,
114
+ "logps/chosen": -32.25341033935547,
115
+ "logps/rejected": -112.56993103027344,
116
+ "loss": 0.0467,
117
+ "rewards/accuracies": 1.0,
118
+ "rewards/chosen": -0.9181193113327026,
119
+ "rewards/margins": 8.931884765625,
120
+ "rewards/rejected": -9.850004196166992,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.56,
125
+ "learning_rate": 4.007782101167315e-07,
126
+ "logits/chosen": -2.7943294048309326,
127
+ "logits/rejected": -2.81146240234375,
128
+ "logps/chosen": -28.41353416442871,
129
+ "logps/rejected": -115.3572769165039,
130
+ "loss": 0.0531,
131
+ "rewards/accuracies": 0.987500011920929,
132
+ "rewards/chosen": -0.8534424901008606,
133
+ "rewards/margins": 9.425840377807617,
134
+ "rewards/rejected": -10.27928352355957,
135
+ "step": 80
136
+ },
137
+ {
138
+ "epoch": 0.63,
139
+ "learning_rate": 3.813229571984435e-07,
140
+ "logits/chosen": -2.6642768383026123,
141
+ "logits/rejected": -2.695777416229248,
142
+ "logps/chosen": -30.20281410217285,
143
+ "logps/rejected": -115.2649917602539,
144
+ "loss": 0.0493,
145
+ "rewards/accuracies": 0.987500011920929,
146
+ "rewards/chosen": -0.9560843706130981,
147
+ "rewards/margins": 9.298072814941406,
148
+ "rewards/rejected": -10.254158020019531,
149
+ "step": 90
150
+ },
151
+ {
152
+ "epoch": 0.7,
153
+ "learning_rate": 3.618677042801556e-07,
154
+ "logits/chosen": -2.61277437210083,
155
+ "logits/rejected": -2.7204833030700684,
156
+ "logps/chosen": -33.576595306396484,
157
+ "logps/rejected": -150.16848754882812,
158
+ "loss": 0.0391,
159
+ "rewards/accuracies": 0.987500011920929,
160
+ "rewards/chosen": -1.3517389297485352,
161
+ "rewards/margins": 12.079813003540039,
162
+ "rewards/rejected": -13.431551933288574,
163
+ "step": 100
164
+ },
165
+ {
166
+ "epoch": 0.77,
167
+ "learning_rate": 3.4241245136186767e-07,
168
+ "logits/chosen": -2.7867584228515625,
169
+ "logits/rejected": -2.7667887210845947,
170
+ "logps/chosen": -24.368350982666016,
171
+ "logps/rejected": -107.03987884521484,
172
+ "loss": 0.0492,
173
+ "rewards/accuracies": 0.987500011920929,
174
+ "rewards/chosen": -0.7897183895111084,
175
+ "rewards/margins": 8.651564598083496,
176
+ "rewards/rejected": -9.441283226013184,
177
+ "step": 110
178
+ },
179
+ {
180
+ "epoch": 0.84,
181
+ "learning_rate": 3.2295719844357975e-07,
182
+ "logits/chosen": -2.6104767322540283,
183
+ "logits/rejected": -2.675987482070923,
184
+ "logps/chosen": -33.704322814941406,
185
+ "logps/rejected": -135.02169799804688,
186
+ "loss": 0.023,
187
+ "rewards/accuracies": 0.987500011920929,
188
+ "rewards/chosen": -1.1443722248077393,
189
+ "rewards/margins": 11.034332275390625,
190
+ "rewards/rejected": -12.178705215454102,
191
+ "step": 120
192
+ },
193
+ {
194
+ "epoch": 0.91,
195
+ "learning_rate": 3.0350194552529177e-07,
196
+ "logits/chosen": -2.568190574645996,
197
+ "logits/rejected": -2.6318023204803467,
198
+ "logps/chosen": -30.85686683654785,
199
+ "logps/rejected": -167.8513946533203,
200
+ "loss": 0.0389,
201
+ "rewards/accuracies": 0.987500011920929,
202
+ "rewards/chosen": -1.3852249383926392,
203
+ "rewards/margins": 14.069249153137207,
204
+ "rewards/rejected": -15.454473495483398,
205
+ "step": 130
206
+ },
207
+ {
208
+ "epoch": 0.98,
209
+ "learning_rate": 2.8404669260700384e-07,
210
+ "logits/chosen": -2.5317270755767822,
211
+ "logits/rejected": -2.593646764755249,
212
+ "logps/chosen": -33.05012512207031,
213
+ "logps/rejected": -199.79788208007812,
214
+ "loss": 0.0281,
215
+ "rewards/accuracies": 1.0,
216
+ "rewards/chosen": -1.1634151935577393,
217
+ "rewards/margins": 17.322010040283203,
218
+ "rewards/rejected": -18.485424041748047,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 1.05,
223
+ "learning_rate": 2.645914396887159e-07,
224
+ "logits/chosen": -2.6736645698547363,
225
+ "logits/rejected": -2.65024995803833,
226
+ "logps/chosen": -32.11931610107422,
227
+ "logps/rejected": -172.3567352294922,
228
+ "loss": 0.011,
229
+ "rewards/accuracies": 1.0,
230
+ "rewards/chosen": -1.398026704788208,
231
+ "rewards/margins": 14.626693725585938,
232
+ "rewards/rejected": -16.02471923828125,
233
+ "step": 150
234
+ },
235
+ {
236
+ "epoch": 1.12,
237
+ "learning_rate": 2.45136186770428e-07,
238
+ "logits/chosen": -2.6248397827148438,
239
+ "logits/rejected": -2.6503357887268066,
240
+ "logps/chosen": -35.05925369262695,
241
+ "logps/rejected": -219.28378295898438,
242
+ "loss": 0.0095,
243
+ "rewards/accuracies": 1.0,
244
+ "rewards/chosen": -1.4844539165496826,
245
+ "rewards/margins": 19.174663543701172,
246
+ "rewards/rejected": -20.659120559692383,
247
+ "step": 160
248
+ },
249
+ {
250
+ "epoch": 1.19,
251
+ "learning_rate": 2.2568093385214007e-07,
252
+ "logits/chosen": -2.6490864753723145,
253
+ "logits/rejected": -2.6252989768981934,
254
+ "logps/chosen": -31.650588989257812,
255
+ "logps/rejected": -194.4810333251953,
256
+ "loss": 0.0158,
257
+ "rewards/accuracies": 1.0,
258
+ "rewards/chosen": -0.9476087689399719,
259
+ "rewards/margins": 17.060314178466797,
260
+ "rewards/rejected": -18.007923126220703,
261
+ "step": 170
262
+ },
263
+ {
264
+ "epoch": 1.26,
265
+ "learning_rate": 2.0622568093385212e-07,
266
+ "logits/chosen": -2.7093279361724854,
267
+ "logits/rejected": -2.638579845428467,
268
+ "logps/chosen": -32.82453155517578,
269
+ "logps/rejected": -185.39230346679688,
270
+ "loss": 0.0104,
271
+ "rewards/accuracies": 1.0,
272
+ "rewards/chosen": -1.2247194051742554,
273
+ "rewards/margins": 15.979866027832031,
274
+ "rewards/rejected": -17.204586029052734,
275
+ "step": 180
276
+ },
277
+ {
278
+ "epoch": 1.33,
279
+ "learning_rate": 1.867704280155642e-07,
280
+ "logits/chosen": -2.6887717247009277,
281
+ "logits/rejected": -2.6667675971984863,
282
+ "logps/chosen": -34.072509765625,
283
+ "logps/rejected": -194.00869750976562,
284
+ "loss": 0.0099,
285
+ "rewards/accuracies": 1.0,
286
+ "rewards/chosen": -1.21720290184021,
287
+ "rewards/margins": 16.749324798583984,
288
+ "rewards/rejected": -17.96652603149414,
289
+ "step": 190
290
+ },
291
+ {
292
+ "epoch": 1.4,
293
+ "learning_rate": 1.6731517509727624e-07,
294
+ "logits/chosen": -2.6063685417175293,
295
+ "logits/rejected": -2.5614001750946045,
296
+ "logps/chosen": -41.58473587036133,
297
+ "logps/rejected": -219.73104858398438,
298
+ "loss": 0.0121,
299
+ "rewards/accuracies": 0.987500011920929,
300
+ "rewards/chosen": -1.829288125038147,
301
+ "rewards/margins": 18.672809600830078,
302
+ "rewards/rejected": -20.502098083496094,
303
+ "step": 200
304
+ },
305
+ {
306
+ "epoch": 1.47,
307
+ "learning_rate": 1.4785992217898832e-07,
308
+ "logits/chosen": -2.7379167079925537,
309
+ "logits/rejected": -2.674070119857788,
310
+ "logps/chosen": -35.14494323730469,
311
+ "logps/rejected": -182.973388671875,
312
+ "loss": 0.0093,
313
+ "rewards/accuracies": 1.0,
314
+ "rewards/chosen": -1.3816758394241333,
315
+ "rewards/margins": 15.531547546386719,
316
+ "rewards/rejected": -16.913225173950195,
317
+ "step": 210
318
+ },
319
+ {
320
+ "epoch": 1.54,
321
+ "learning_rate": 1.2840466926070036e-07,
322
+ "logits/chosen": -2.7067456245422363,
323
+ "logits/rejected": -2.632633686065674,
324
+ "logps/chosen": -34.483577728271484,
325
+ "logps/rejected": -222.17990112304688,
326
+ "loss": 0.0054,
327
+ "rewards/accuracies": 1.0,
328
+ "rewards/chosen": -0.9836966395378113,
329
+ "rewards/margins": 19.769222259521484,
330
+ "rewards/rejected": -20.752918243408203,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 1.61,
335
+ "learning_rate": 1.0894941634241245e-07,
336
+ "logits/chosen": -2.608846664428711,
337
+ "logits/rejected": -2.604660749435425,
338
+ "logps/chosen": -32.35722732543945,
339
+ "logps/rejected": -246.7973175048828,
340
+ "loss": 0.0076,
341
+ "rewards/accuracies": 1.0,
342
+ "rewards/chosen": -1.2495921850204468,
343
+ "rewards/margins": 22.08835220336914,
344
+ "rewards/rejected": -23.33794403076172,
345
+ "step": 230
346
+ },
347
+ {
348
+ "epoch": 1.68,
349
+ "learning_rate": 8.949416342412451e-08,
350
+ "logits/chosen": -2.607797622680664,
351
+ "logits/rejected": -2.566305637359619,
352
+ "logps/chosen": -27.041118621826172,
353
+ "logps/rejected": -209.3827667236328,
354
+ "loss": 0.0122,
355
+ "rewards/accuracies": 1.0,
356
+ "rewards/chosen": -0.9218173027038574,
357
+ "rewards/margins": 18.870208740234375,
358
+ "rewards/rejected": -19.79202651977539,
359
+ "step": 240
360
+ },
361
+ {
362
+ "epoch": 1.75,
363
+ "learning_rate": 7.003891050583658e-08,
364
+ "logits/chosen": -2.518440008163452,
365
+ "logits/rejected": -2.5331220626831055,
366
+ "logps/chosen": -31.538745880126953,
367
+ "logps/rejected": -247.35238647460938,
368
+ "loss": 0.004,
369
+ "rewards/accuracies": 1.0,
370
+ "rewards/chosen": -1.2074544429779053,
371
+ "rewards/margins": 22.2752742767334,
372
+ "rewards/rejected": -23.48272705078125,
373
+ "step": 250
374
+ },
375
+ {
376
+ "epoch": 1.82,
377
+ "learning_rate": 5.058365758754864e-08,
378
+ "logits/chosen": -2.5735764503479004,
379
+ "logits/rejected": -2.52424693107605,
380
+ "logps/chosen": -28.21148681640625,
381
+ "logps/rejected": -196.92958068847656,
382
+ "loss": 0.0018,
383
+ "rewards/accuracies": 1.0,
384
+ "rewards/chosen": -1.0159205198287964,
385
+ "rewards/margins": 17.290904998779297,
386
+ "rewards/rejected": -18.306825637817383,
387
+ "step": 260
388
+ },
389
+ {
390
+ "epoch": 1.89,
391
+ "learning_rate": 3.11284046692607e-08,
392
+ "logits/chosen": -2.643744945526123,
393
+ "logits/rejected": -2.626131772994995,
394
+ "logps/chosen": -29.778423309326172,
395
+ "logps/rejected": -234.2376251220703,
396
+ "loss": 0.0074,
397
+ "rewards/accuracies": 1.0,
398
+ "rewards/chosen": -1.0768438577651978,
399
+ "rewards/margins": 20.66169548034668,
400
+ "rewards/rejected": -21.738544464111328,
401
+ "step": 270
402
+ },
403
+ {
404
+ "epoch": 1.96,
405
+ "learning_rate": 1.1673151750972762e-08,
406
+ "logits/chosen": -2.467341899871826,
407
+ "logits/rejected": -2.524524211883545,
408
+ "logps/chosen": -29.820302963256836,
409
+ "logps/rejected": -301.8487243652344,
410
+ "loss": 0.0019,
411
+ "rewards/accuracies": 1.0,
412
+ "rewards/chosen": -0.9357167482376099,
413
+ "rewards/margins": 27.8673152923584,
414
+ "rewards/rejected": -28.80303382873535,
415
+ "step": 280
416
+ },
417
+ {
418
+ "epoch": 2.0,
419
+ "step": 286,
420
+ "total_flos": 0.0,
421
+ "train_loss": 0.06552632141962543,
422
+ "train_runtime": 3687.7187,
423
+ "train_samples_per_second": 2.478,
424
+ "train_steps_per_second": 0.078
425
+ }
426
+ ],
427
+ "logging_steps": 10,
428
+ "max_steps": 286,
429
+ "num_input_tokens_seen": 0,
430
+ "num_train_epochs": 2,
431
+ "save_steps": 500,
432
+ "total_flos": 0.0,
433
+ "train_batch_size": 8,
434
+ "trial_name": null,
435
+ "trial_params": null
436
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a8f95b6a65812eada38d9e7e87e8e68d0988e75cacd5214d60fdee95413b15e
3
+ size 5371