jcmei commited on
Commit
f1c8b89
·
verified ·
1 Parent(s): f2cc023

Model save

Browse files
README.md CHANGED
@@ -2,15 +2,10 @@
2
  license: mit
3
  base_model: HuggingFaceH4/mistral-7b-sft-beta
4
  tags:
5
- - alignment-handbook
6
- - trl
7
- - dpo
8
- - generated_from_trainer
9
  - trl
10
  - dpo
 
11
  - generated_from_trainer
12
- datasets:
13
- - HuggingFaceH4/ultrafeedback_binarized
14
  model-index:
15
  - name: DPO-Zephyr-7B
16
  results: []
@@ -21,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  # DPO-Zephyr-7B
23
 
24
- This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the HuggingFaceH4/ultrafeedback_binarized dataset.
25
 
26
  ## Model description
27
 
 
2
  license: mit
3
  base_model: HuggingFaceH4/mistral-7b-sft-beta
4
  tags:
 
 
 
 
5
  - trl
6
  - dpo
7
+ - alignment-handbook
8
  - generated_from_trainer
 
 
9
  model-index:
10
  - name: DPO-Zephyr-7B
11
  results: []
 
16
 
17
  # DPO-Zephyr-7B
18
 
19
+ This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6207737401753914,
5
- "train_runtime": 2197.4341,
6
  "train_samples": 15283,
7
- "train_samples_per_second": 6.955,
8
- "train_steps_per_second": 0.054
9
  }
 
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.09242776261658228,
5
+ "train_runtime": 417.0202,
6
  "train_samples": 15283,
7
+ "train_samples_per_second": 36.648,
8
+ "train_steps_per_second": 0.285
9
  }
config.json CHANGED
@@ -21,6 +21,6 @@
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.40.2",
24
- "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.40.2",
24
+ "use_cache": false,
25
  "vocab_size": 32000
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26cca4b287c978b5da15afb90e3213e3c13eada6ce39254e1af4af6124a65797
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fd2a19ccb0893f580a7235f0f8fd0fd9a589129b99b84c3395259a864ae8c57
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f6001ee57f534204dade687209d56b14fab77802aabbf7342040c93123d5415
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdcfd1a54675ca8e337ea7dd8b603926c8a5b117479c8c4dd42792b8f743f605
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dddc589e77483ccca76ae5c994b2e991a0658f090b51b2a10ad407152b70e722
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98370412ab5b475b5891780bc29108d4652c88f67530631146785e0f11bd27f2
3
  size 4540516344
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6207737401753914,
5
- "train_runtime": 2197.4341,
6
  "train_samples": 15283,
7
- "train_samples_per_second": 6.955,
8
- "train_steps_per_second": 0.054
9
  }
 
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.09242776261658228,
5
+ "train_runtime": 417.0202,
6
  "train_samples": 15283,
7
+ "train_samples_per_second": 36.648,
8
+ "train_steps_per_second": 0.285
9
  }
trainer_state.json CHANGED
@@ -175,27 +175,27 @@
175
  },
176
  {
177
  "epoch": 0.9205020920502092,
178
- "grad_norm": 15.784629310581684,
179
  "learning_rate": 8.677580722139671e-09,
180
- "logits/chosen": -2.7726333141326904,
181
- "logits/rejected": -2.7129316329956055,
182
- "logps/chosen": -367.2535095214844,
183
- "logps/rejected": -343.4007568359375,
184
- "loss": 0.5944,
185
  "rewards/accuracies": 0.699999988079071,
186
- "rewards/chosen": -0.4677084982395172,
187
- "rewards/margins": 0.3370305001735687,
188
- "rewards/rejected": -0.8047389984130859,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.99581589958159,
193
  "step": 119,
194
  "total_flos": 0.0,
195
- "train_loss": 0.6207737401753914,
196
- "train_runtime": 2197.4341,
197
- "train_samples_per_second": 6.955,
198
- "train_steps_per_second": 0.054
199
  }
200
  ],
201
  "logging_steps": 10,
 
175
  },
176
  {
177
  "epoch": 0.9205020920502092,
178
+ "grad_norm": 15.709464172960688,
179
  "learning_rate": 8.677580722139671e-09,
180
+ "logits/chosen": -2.7726309299468994,
181
+ "logits/rejected": -2.7129008769989014,
182
+ "logps/chosen": -367.25946044921875,
183
+ "logps/rejected": -343.40789794921875,
184
+ "loss": 0.5946,
185
  "rewards/accuracies": 0.699999988079071,
186
+ "rewards/chosen": -0.4677678644657135,
187
+ "rewards/margins": 0.3370424211025238,
188
+ "rewards/rejected": -0.8048103451728821,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.99581589958159,
193
  "step": 119,
194
  "total_flos": 0.0,
195
+ "train_loss": 0.09242776261658228,
196
+ "train_runtime": 417.0202,
197
+ "train_samples_per_second": 36.648,
198
+ "train_steps_per_second": 0.285
199
  }
200
  ],
201
  "logging_steps": 10,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e53e3bf661f988f74d812917404c9504100734c564b6d07061a95819b6487c21
3
  size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c65cd7b5c101d95a5dfadd2ce76f2e9250c4bc66203ed4673ccb1be24637b335
3
  size 6264