yyx123 commited on
Commit
92c088c
1 Parent(s): aa95c18

Model save

Browse files
README.md CHANGED
@@ -2,13 +2,9 @@
2
  license: other
3
  library_name: peft
4
  tags:
5
- - alignment-handbook
6
- - generated_from_trainer
7
  - trl
8
  - sft
9
  - generated_from_trainer
10
- datasets:
11
- - ruozhiba
12
  base_model: 01-ai/Yi-6B
13
  model-index:
14
  - name: Yi-6B-ruozhiba3
@@ -20,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
20
 
21
  # Yi-6B-ruozhiba3
22
 
23
- This model is a fine-tuned version of [01-ai/Yi-6B](https://huggingface.co/01-ai/Yi-6B) on the ruozhiba dataset.
24
  It achieves the following results on the evaluation set:
25
  - Loss: 4.3351
26
 
@@ -48,7 +44,7 @@ The following hyperparameters were used during training:
48
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
  - lr_scheduler_type: cosine
50
  - lr_scheduler_warmup_ratio: 0.1
51
- - num_epochs: 3
52
 
53
  ### Training results
54
 
 
2
  license: other
3
  library_name: peft
4
  tags:
 
 
5
  - trl
6
  - sft
7
  - generated_from_trainer
 
 
8
  base_model: 01-ai/Yi-6B
9
  model-index:
10
  - name: Yi-6B-ruozhiba3
 
16
 
17
  # Yi-6B-ruozhiba3
18
 
19
+ This model is a fine-tuned version of [01-ai/Yi-6B](https://huggingface.co/01-ai/Yi-6B) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 4.3351
22
 
 
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: cosine
46
  - lr_scheduler_warmup_ratio: 0.1
47
+ - num_epochs: 20
48
 
49
  ### Training results
50
 
adapter_config.json CHANGED
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "up_proj",
23
- "o_proj",
24
- "down_proj",
25
- "gate_proj",
26
  "q_proj",
 
 
27
  "k_proj",
28
- "v_proj"
29
  ],
30
  "task_type": "CAUSAL_LM"
31
  }
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "v_proj",
23
  "up_proj",
 
 
 
24
  "q_proj",
25
+ "gate_proj",
26
+ "down_proj",
27
  "k_proj",
28
+ "o_proj"
29
  ],
30
  "task_type": "CAUSAL_LM"
31
  }
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 20.0,
3
  "eval_loss": 4.335062026977539,
4
- "eval_runtime": 6.4371,
5
  "eval_samples": 23,
6
- "eval_samples_per_second": 3.573,
7
- "eval_steps_per_second": 0.932,
8
  "train_loss": 0.0,
9
- "train_runtime": 10.2256,
10
  "train_samples": 217,
11
- "train_samples_per_second": 63.664,
12
- "train_steps_per_second": 16.136
13
  }
 
1
  {
2
  "epoch": 20.0,
3
  "eval_loss": 4.335062026977539,
4
+ "eval_runtime": 6.2462,
5
  "eval_samples": 23,
6
+ "eval_samples_per_second": 3.682,
7
+ "eval_steps_per_second": 0.961,
8
  "train_loss": 0.0,
9
+ "train_runtime": 10.1895,
10
  "train_samples": 217,
11
+ "train_samples_per_second": 425.927,
12
+ "train_steps_per_second": 107.954
13
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 20.0,
3
  "eval_loss": 4.335062026977539,
4
- "eval_runtime": 6.4371,
5
  "eval_samples": 23,
6
- "eval_samples_per_second": 3.573,
7
- "eval_steps_per_second": 0.932
8
  }
 
1
  {
2
  "epoch": 20.0,
3
  "eval_loss": 4.335062026977539,
4
+ "eval_runtime": 6.2462,
5
  "eval_samples": 23,
6
+ "eval_samples_per_second": 3.682,
7
+ "eval_steps_per_second": 0.961
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 20.0,
3
  "train_loss": 0.0,
4
- "train_runtime": 10.2256,
5
  "train_samples": 217,
6
- "train_samples_per_second": 63.664,
7
- "train_steps_per_second": 16.136
8
  }
 
1
  {
2
  "epoch": 20.0,
3
  "train_loss": 0.0,
4
+ "train_runtime": 10.1895,
5
  "train_samples": 217,
6
+ "train_samples_per_second": 425.927,
7
+ "train_steps_per_second": 107.954
8
  }
trainer_state.json CHANGED
@@ -501,15 +501,15 @@
501
  "step": 1100,
502
  "total_flos": 3.807078373542298e+16,
503
  "train_loss": 0.0,
504
- "train_runtime": 10.2256,
505
- "train_samples_per_second": 63.664,
506
- "train_steps_per_second": 16.136
507
  }
508
  ],
509
  "logging_steps": 20,
510
- "max_steps": 165,
511
  "num_input_tokens_seen": 0,
512
- "num_train_epochs": 3,
513
  "save_steps": 20,
514
  "total_flos": 3.807078373542298e+16,
515
  "train_batch_size": 4,
 
501
  "step": 1100,
502
  "total_flos": 3.807078373542298e+16,
503
  "train_loss": 0.0,
504
+ "train_runtime": 10.1895,
505
+ "train_samples_per_second": 425.927,
506
+ "train_steps_per_second": 107.954
507
  }
508
  ],
509
  "logging_steps": 20,
510
+ "max_steps": 1100,
511
  "num_input_tokens_seen": 0,
512
+ "num_train_epochs": 20,
513
  "save_steps": 20,
514
  "total_flos": 3.807078373542298e+16,
515
  "train_batch_size": 4,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b591bed5d250490d702e6db47039b4083880a6eccfa19736ca1c3fe16762de21
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9f736ab86e14c5ce1673e8cdbda8ef77a9397529a9bc1b33bab2a4b8bbb8729
3
  size 4728