chchen commited on
Commit
406fbbb
·
verified ·
1 Parent(s): 06188f3

Training in progress, step 168

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/llama3_lora_sft-checkpoint.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: mistralai/Mistral-Nemo-Instruct-2407
3
+
4
+ ### method
5
+ stage: sft
6
+ do_train: true
7
+ finetuning_type: lora
8
+ lora_target: all
9
+
10
+ ### dataset
11
+ dataset: bct_non_cot_sft_1000
12
+ dataset_dir: data_private
13
+ template: mistral
14
+ cutoff_len: 1024
15
+ # max_samples: 1000
16
+ overwrite_cache: true
17
+ preprocessing_num_workers: 16
18
+
19
+ ### output
20
+ output_dir: saves/Mistral-Nemo-12B-Instruct/lora/sft
21
+ logging_steps: 10
22
+ save_steps: 500
23
+ plot_loss: true
24
+ overwrite_output_dir: true
25
+ save_total_limit: 3
26
+ load_best_model_at_end: true
27
+ push_to_hub: true
28
+ hub_model_id: chchen/Mistral-Nemo-12B-Instruct-SFT
29
+
30
+ ### train
31
+ per_device_train_batch_size: 2
32
+ gradient_accumulation_steps: 8
33
+ learning_rate: 0.000005
34
+ num_train_epochs: 3.0
35
+ lr_scheduler_type: cosine
36
+ warmup_ratio: 0.1
37
+ fp16: true
38
+
39
+ ### eval
40
+ val_size: 0.1
41
+ per_device_eval_batch_size: 2
42
+ evaluation_strategy: steps
43
+ eval_steps: 500
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-Nemo-Instruct-2407",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "down_proj",
25
+ "q_proj",
26
+ "o_proj",
27
+ "v_proj",
28
+ "gate_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41dba1a3813b753b664642d6a5bdebf06876732d2cc16c6bc18f22cdb1f9e654
3
+ size 114106856
llama3_lora_sft.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: mistralai/Mistral-Nemo-Instruct-2407
3
+
4
+ ### method
5
+ stage: sft
6
+ do_train: true
7
+ finetuning_type: lora
8
+ lora_target: all
9
+
10
+ ### dataset
11
+ dataset: bct_non_cot_sft_1000
12
+ dataset_dir: data_private
13
+ template: mistral
14
+ cutoff_len: 1024
15
+ # max_samples: 1000
16
+ overwrite_cache: true
17
+ preprocessing_num_workers: 16
18
+
19
+ ### output
20
+ output_dir: saves/Mistral-Nemo-12B-Instruct/lora/sft
21
+ logging_steps: 10
22
+ save_steps: 500
23
+ plot_loss: true
24
+ overwrite_output_dir: true
25
+ save_total_limit: 3
26
+ load_best_model_at_end: true
27
+ push_to_hub: true
28
+ hub_model_id: chchen/Mistral-Nemo-12B-Instruct-SFT
29
+
30
+ ### train
31
+ per_device_train_batch_size: 2
32
+ gradient_accumulation_steps: 8
33
+ learning_rate: 0.000005
34
+ num_train_epochs: 3.0
35
+ lr_scheduler_type: cosine
36
+ warmup_ratio: 0.1
37
+ fp16: true
38
+
39
+ ### eval
40
+ val_size: 0.1
41
+ per_device_eval_batch_size: 2
42
+ evaluation_strategy: steps
43
+ eval_steps: 500
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0240ce510f08e6c2041724e9043e33be9d251d1e4a4d94eb68cd47b954b61d2
3
+ size 17078292
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
trainer_log.jsonl ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 168, "loss": 1.2612, "learning_rate": 2.647058823529412e-06, "epoch": 0.17777777777777778, "percentage": 5.95, "elapsed_time": "0:00:29", "remaining_time": "0:07:48"}
2
+ {"current_steps": 20, "total_steps": 168, "loss": 1.2578, "learning_rate": 4.997836020254328e-06, "epoch": 0.35555555555555557, "percentage": 11.9, "elapsed_time": "0:00:57", "remaining_time": "0:07:06"}
3
+ {"current_steps": 30, "total_steps": 168, "loss": 1.0594, "learning_rate": 4.922489359292928e-06, "epoch": 0.5333333333333333, "percentage": 17.86, "elapsed_time": "0:01:25", "remaining_time": "0:06:32"}
4
+ {"current_steps": 40, "total_steps": 168, "loss": 0.7524, "learning_rate": 4.7426609101991605e-06, "epoch": 0.7111111111111111, "percentage": 23.81, "elapsed_time": "0:01:53", "remaining_time": "0:06:03"}
5
+ {"current_steps": 50, "total_steps": 168, "loss": 0.5438, "learning_rate": 4.466106660773884e-06, "epoch": 0.8888888888888888, "percentage": 29.76, "elapsed_time": "0:02:21", "remaining_time": "0:05:34"}
6
+ {"current_steps": 60, "total_steps": 168, "loss": 0.481, "learning_rate": 4.104754375481665e-06, "epoch": 1.0666666666666667, "percentage": 35.71, "elapsed_time": "0:02:49", "remaining_time": "0:05:05"}
7
+ {"current_steps": 70, "total_steps": 168, "loss": 0.2657, "learning_rate": 3.674189151845515e-06, "epoch": 1.2444444444444445, "percentage": 41.67, "elapsed_time": "0:03:17", "remaining_time": "0:04:36"}
8
+ {"current_steps": 80, "total_steps": 168, "loss": 0.1834, "learning_rate": 3.1929812363354766e-06, "epoch": 1.4222222222222223, "percentage": 47.62, "elapsed_time": "0:03:45", "remaining_time": "0:04:08"}
9
+ {"current_steps": 90, "total_steps": 168, "loss": 0.164, "learning_rate": 2.68188509100236e-06, "epoch": 1.6, "percentage": 53.57, "elapsed_time": "0:04:13", "remaining_time": "0:03:40"}
10
+ {"current_steps": 100, "total_steps": 168, "loss": 0.1039, "learning_rate": 2.1629442550539283e-06, "epoch": 1.7777777777777777, "percentage": 59.52, "elapsed_time": "0:04:41", "remaining_time": "0:03:11"}
11
+ {"current_steps": 110, "total_steps": 168, "loss": 0.1299, "learning_rate": 1.6585406086279847e-06, "epoch": 1.9555555555555557, "percentage": 65.48, "elapsed_time": "0:05:10", "remaining_time": "0:02:43"}
12
+ {"current_steps": 120, "total_steps": 168, "loss": 0.0871, "learning_rate": 1.1904290439459974e-06, "epoch": 2.1333333333333333, "percentage": 71.43, "elapsed_time": "0:05:38", "remaining_time": "0:02:15"}
13
+ {"current_steps": 130, "total_steps": 168, "loss": 0.1068, "learning_rate": 7.787991784095e-07, "epoch": 2.311111111111111, "percentage": 77.38, "elapsed_time": "0:06:05", "remaining_time": "0:01:46"}
14
+ {"current_steps": 140, "total_steps": 168, "loss": 0.1238, "learning_rate": 4.414045778845144e-07, "epoch": 2.488888888888889, "percentage": 83.33, "elapsed_time": "0:06:33", "remaining_time": "0:01:18"}
15
+ {"current_steps": 150, "total_steps": 168, "loss": 0.1024, "learning_rate": 1.927970467097573e-07, "epoch": 2.6666666666666665, "percentage": 89.29, "elapsed_time": "0:07:01", "remaining_time": "0:00:50"}
16
+ {"current_steps": 160, "total_steps": 168, "loss": 0.0923, "learning_rate": 4.369900944435734e-08, "epoch": 2.8444444444444446, "percentage": 95.24, "elapsed_time": "0:07:29", "remaining_time": "0:00:22"}
17
+ {"current_steps": 168, "total_steps": 168, "epoch": 2.986666666666667, "percentage": 100.0, "elapsed_time": "0:07:53", "remaining_time": "0:00:00"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5607cec29c654bea9d011aef860bb72d887f8478ae20b5a2d11f46368b515c7b
3
+ size 5432