jcmei commited on
Commit
17bb92c
·
verified ·
1 Parent(s): 165dcf4

Model save

Browse files
README.md CHANGED
@@ -2,16 +2,10 @@
2
  license: mit
3
  base_model: HuggingFaceH4/mistral-7b-sft-beta
4
  tags:
5
- - alignment-handbook
6
- - trl
7
- - dpo
8
- - generated_from_trainer
9
  - trl
10
  - dpo
11
  - alignment-handbook
12
  - generated_from_trainer
13
- datasets:
14
- - HuggingFaceH4/ultrafeedback_binarized
15
  model-index:
16
  - name: DPO-Zephyr-7B
17
  results: []
@@ -22,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
22
 
23
  # DPO-Zephyr-7B
24
 
25
- This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the HuggingFaceH4/ultrafeedback_binarized dataset.
26
 
27
  ## Model description
28
 
 
2
  license: mit
3
  base_model: HuggingFaceH4/mistral-7b-sft-beta
4
  tags:
 
 
 
 
5
  - trl
6
  - dpo
7
  - alignment-handbook
8
  - generated_from_trainer
 
 
9
  model-index:
10
  - name: DPO-Zephyr-7B
11
  results: []
 
16
 
17
  # DPO-Zephyr-7B
18
 
19
+ This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
20
 
21
  ## Model description
22
 
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
- "train_loss": 0.09242776261658228,
5
- "train_runtime": 417.0202,
6
  "train_samples": 15283,
7
- "train_samples_per_second": 36.648,
8
- "train_steps_per_second": 0.285
9
  }
 
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.09238386955581793,
5
+ "train_runtime": 386.322,
6
  "train_samples": 15283,
7
+ "train_samples_per_second": 39.56,
8
+ "train_steps_per_second": 0.308
9
  }
config.json CHANGED
@@ -21,6 +21,6 @@
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.40.2",
24
- "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.40.2",
24
+ "use_cache": false,
25
  "vocab_size": 32000
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fd2a19ccb0893f580a7235f0f8fd0fd9a589129b99b84c3395259a864ae8c57
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af488688f56cda56ff87ad1eec035fa9ae3a85bca93bdbe59211aa9ae3eae187
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cdcfd1a54675ca8e337ea7dd8b603926c8a5b117479c8c4dd42792b8f743f605
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3768b0b7c81899c59e9afb844018c1b1f295d77c8c47e5c118e3562a776ae5a7
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98370412ab5b475b5891780bc29108d4652c88f67530631146785e0f11bd27f2
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16f3d87f94195cc6683db7aeafd3e916dad7e931f54a05872d55a851ad2d0ed0
3
  size 4540516344
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
- "train_loss": 0.09242776261658228,
5
- "train_runtime": 417.0202,
6
  "train_samples": 15283,
7
- "train_samples_per_second": 36.648,
8
- "train_steps_per_second": 0.285
9
  }
 
1
  {
2
  "epoch": 0.99581589958159,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.09238386955581793,
5
+ "train_runtime": 386.322,
6
  "train_samples": 15283,
7
+ "train_samples_per_second": 39.56,
8
+ "train_steps_per_second": 0.308
9
  }
trainer_state.json CHANGED
@@ -175,27 +175,27 @@
175
  },
176
  {
177
  "epoch": 0.9205020920502092,
178
- "grad_norm": 15.709464172960688,
179
  "learning_rate": 8.677580722139671e-09,
180
- "logits/chosen": -2.7726309299468994,
181
- "logits/rejected": -2.7129008769989014,
182
- "logps/chosen": -367.25946044921875,
183
- "logps/rejected": -343.40789794921875,
184
- "loss": 0.5946,
185
  "rewards/accuracies": 0.699999988079071,
186
- "rewards/chosen": -0.4677678644657135,
187
- "rewards/margins": 0.3370424211025238,
188
- "rewards/rejected": -0.8048103451728821,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.99581589958159,
193
  "step": 119,
194
  "total_flos": 0.0,
195
- "train_loss": 0.09242776261658228,
196
- "train_runtime": 417.0202,
197
- "train_samples_per_second": 36.648,
198
- "train_steps_per_second": 0.285
199
  }
200
  ],
201
  "logging_steps": 10,
 
175
  },
176
  {
177
  "epoch": 0.9205020920502092,
178
+ "grad_norm": 15.746470079783288,
179
  "learning_rate": 8.677580722139671e-09,
180
+ "logits/chosen": -2.7727127075195312,
181
+ "logits/rejected": -2.7130322456359863,
182
+ "logps/chosen": -367.272216796875,
183
+ "logps/rejected": -343.4369201660156,
184
+ "loss": 0.5945,
185
  "rewards/accuracies": 0.699999988079071,
186
+ "rewards/chosen": -0.46789541840553284,
187
+ "rewards/margins": 0.3372054696083069,
188
+ "rewards/rejected": -0.8051007986068726,
189
  "step": 110
190
  },
191
  {
192
  "epoch": 0.99581589958159,
193
  "step": 119,
194
  "total_flos": 0.0,
195
+ "train_loss": 0.09238386955581793,
196
+ "train_runtime": 386.322,
197
+ "train_samples_per_second": 39.56,
198
+ "train_steps_per_second": 0.308
199
  }
200
  ],
201
  "logging_steps": 10,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c65cd7b5c101d95a5dfadd2ce76f2e9250c4bc66203ed4673ccb1be24637b335
3
  size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:128ab99ba4e1911f9502d9e9cba3a105bb6a758aa83704d9f3e92a77614e5773
3
  size 6264