Wenboz commited on
Commit
bb945d2
·
verified ·
1 Parent(s): dc42fbc

Model save

Browse files
Files changed (4) hide show
  1. README.md +61 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +58 -0
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: princeton-nlp/Llama-3-Base-8B-SFT
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: zephyr-7b-wpo-lora
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # zephyr-7b-wpo-lora
17
+
18
+ This model is a fine-tuned version of [princeton-nlp/Llama-3-Base-8B-SFT](https://huggingface.co/princeton-nlp/Llama-3-Base-8B-SFT) on the None dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-06
38
+ - train_batch_size: 4
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 64
45
+ - total_eval_batch_size: 32
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 1
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - PEFT 0.7.1
58
+ - Transformers 4.44.2
59
+ - Pytorch 2.2.1+cu121
60
+ - Datasets 2.14.6
61
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.8,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.693049430847168,
5
+ "train_runtime": 130.1581,
6
+ "train_samples": 240,
7
+ "train_samples_per_second": 1.844,
8
+ "train_steps_per_second": 0.023
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.8,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.693049430847168,
5
+ "train_runtime": 130.1581,
6
+ "train_samples": 240,
7
+ "train_samples_per_second": 1.844,
8
+ "train_steps_per_second": 0.023
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.8,
5
+ "eval_steps": 100,
6
+ "global_step": 3,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.26666666666666666,
13
+ "grad_norm": 0.16037533305228002,
14
+ "learning_rate": 5e-06,
15
+ "logits/chosen": -0.496604323387146,
16
+ "logits/rejected": -0.4112662672996521,
17
+ "logps/chosen": -242.63560485839844,
18
+ "logps/ref_response": -0.496604323387146,
19
+ "logps/rejected": -275.38885498046875,
20
+ "loss": 0.6931,
21
+ "rewards/accuracies": 0.0,
22
+ "rewards/chosen": 0.0,
23
+ "rewards/margins": 0.0,
24
+ "rewards/rejected": 0.0,
25
+ "step": 1
26
+ },
27
+ {
28
+ "epoch": 0.8,
29
+ "step": 3,
30
+ "total_flos": 0.0,
31
+ "train_loss": 0.693049430847168,
32
+ "train_runtime": 130.1581,
33
+ "train_samples_per_second": 1.844,
34
+ "train_steps_per_second": 0.023
35
+ }
36
+ ],
37
+ "logging_steps": 10,
38
+ "max_steps": 3,
39
+ "num_input_tokens_seen": 0,
40
+ "num_train_epochs": 1,
41
+ "save_steps": 100000,
42
+ "stateful_callbacks": {
43
+ "TrainerControl": {
44
+ "args": {
45
+ "should_epoch_stop": false,
46
+ "should_evaluate": false,
47
+ "should_log": false,
48
+ "should_save": true,
49
+ "should_training_stop": true
50
+ },
51
+ "attributes": {}
52
+ }
53
+ },
54
+ "total_flos": 0.0,
55
+ "train_batch_size": 4,
56
+ "trial_name": null,
57
+ "trial_params": null
58
+ }