duyphu commited on
Commit
61c56f2
·
verified ·
1 Parent(s): 73191c6

Training in progress, step 1, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "o_proj",
24
  "k_proj",
25
  "gate_proj",
 
26
  "up_proj",
27
  "v_proj",
28
- "q_proj",
29
- "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "down_proj",
24
  "k_proj",
25
  "gate_proj",
26
+ "o_proj",
27
  "up_proj",
28
  "v_proj",
29
+ "q_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcf8f3f81077c72b30757b447323ff03eafe77a4c7a7c16fc24907fdc8bfe29d
3
  size 30026872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7a7bfa3c9e322c18f110a1dd21f7919c4e2959c2fa1b459904044f9946a0a8
3
  size 30026872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb97cff48ebb4707b5c7966da00071b163a2170a1b90811e7f23c85e3eb6df90
3
  size 15611412
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3722569925c9a26ca6bd9d0a34b2e3cee883cb8e9cae7a6c78947c515a35018b
3
  size 15611412
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6035abaef6269d017307cb69d94afde51c7a814a09f6c71bcbc401d0d2da6877
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b0fa30ce7d6caf58ef5c2227062e5ee5947fe33564486703caa8d601663ea6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae751897b8e87ff08962a91d1d3485984775a96aa89e29a1caac3d6f449228f7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02658160552897395,
5
- "eval_steps": 10,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,127 +11,17 @@
11
  {
12
  "epoch": 0.000531632110579479,
13
  "eval_loss": 2.794513702392578,
14
- "eval_runtime": 35.2595,
15
- "eval_samples_per_second": 22.462,
16
- "eval_steps_per_second": 11.231,
17
  "step": 1
18
- },
19
- {
20
- "epoch": 0.002658160552897395,
21
- "grad_norm": 2.5087876319885254,
22
- "learning_rate": 5e-05,
23
- "loss": 2.9129,
24
- "step": 5
25
- },
26
- {
27
- "epoch": 0.00531632110579479,
28
- "grad_norm": 3.172278881072998,
29
- "learning_rate": 0.0001,
30
- "loss": 2.5148,
31
- "step": 10
32
- },
33
- {
34
- "epoch": 0.00531632110579479,
35
- "eval_loss": 2.32590651512146,
36
- "eval_runtime": 26.5891,
37
- "eval_samples_per_second": 29.787,
38
- "eval_steps_per_second": 14.893,
39
- "step": 10
40
- },
41
- {
42
- "epoch": 0.007974481658692184,
43
- "grad_norm": 2.6018869876861572,
44
- "learning_rate": 9.619397662556435e-05,
45
- "loss": 1.7981,
46
- "step": 15
47
- },
48
- {
49
- "epoch": 0.01063264221158958,
50
- "grad_norm": 1.8246312141418457,
51
- "learning_rate": 8.535533905932738e-05,
52
- "loss": 1.145,
53
- "step": 20
54
- },
55
- {
56
- "epoch": 0.01063264221158958,
57
- "eval_loss": 0.6814725399017334,
58
- "eval_runtime": 27.0108,
59
- "eval_samples_per_second": 29.322,
60
- "eval_steps_per_second": 14.661,
61
- "step": 20
62
- },
63
- {
64
- "epoch": 0.013290802764486975,
65
- "grad_norm": 2.5452466011047363,
66
- "learning_rate": 6.91341716182545e-05,
67
- "loss": 0.6943,
68
- "step": 25
69
- },
70
- {
71
- "epoch": 0.01594896331738437,
72
- "grad_norm": 1.912396788597107,
73
- "learning_rate": 5e-05,
74
- "loss": 0.3943,
75
- "step": 30
76
- },
77
- {
78
- "epoch": 0.01594896331738437,
79
- "eval_loss": 0.2496986836194992,
80
- "eval_runtime": 27.3235,
81
- "eval_samples_per_second": 28.986,
82
- "eval_steps_per_second": 14.493,
83
- "step": 30
84
- },
85
- {
86
- "epoch": 0.018607123870281767,
87
- "grad_norm": 0.5929796099662781,
88
- "learning_rate": 3.086582838174551e-05,
89
- "loss": 0.1247,
90
- "step": 35
91
- },
92
- {
93
- "epoch": 0.02126528442317916,
94
- "grad_norm": 1.806302547454834,
95
- "learning_rate": 1.4644660940672627e-05,
96
- "loss": 0.1757,
97
- "step": 40
98
- },
99
- {
100
- "epoch": 0.02126528442317916,
101
- "eval_loss": 0.1834142655134201,
102
- "eval_runtime": 27.0169,
103
- "eval_samples_per_second": 29.315,
104
- "eval_steps_per_second": 14.657,
105
- "step": 40
106
- },
107
- {
108
- "epoch": 0.023923444976076555,
109
- "grad_norm": 2.3250198364257812,
110
- "learning_rate": 3.8060233744356633e-06,
111
- "loss": 0.2178,
112
- "step": 45
113
- },
114
- {
115
- "epoch": 0.02658160552897395,
116
- "grad_norm": 0.5584317445755005,
117
- "learning_rate": 0.0,
118
- "loss": 0.0977,
119
- "step": 50
120
- },
121
- {
122
- "epoch": 0.02658160552897395,
123
- "eval_loss": 0.17906339466571808,
124
- "eval_runtime": 26.7116,
125
- "eval_samples_per_second": 29.65,
126
- "eval_steps_per_second": 14.825,
127
- "step": 50
128
  }
129
  ],
130
  "logging_steps": 5,
131
- "max_steps": 50,
132
  "num_input_tokens_seen": 0,
133
  "num_train_epochs": 1,
134
- "save_steps": 13,
135
  "stateful_callbacks": {
136
  "TrainerControl": {
137
  "args": {
@@ -144,7 +34,7 @@
144
  "attributes": {}
145
  }
146
  },
147
- "total_flos": 1581936279552000.0,
148
  "train_batch_size": 2,
149
  "trial_name": null,
150
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.000531632110579479,
5
+ "eval_steps": 1,
6
+ "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 0.000531632110579479,
13
  "eval_loss": 2.794513702392578,
14
+ "eval_runtime": 26.5875,
15
+ "eval_samples_per_second": 29.788,
16
+ "eval_steps_per_second": 14.894,
17
  "step": 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  ],
20
  "logging_steps": 5,
21
+ "max_steps": 1,
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
+ "save_steps": 1,
25
  "stateful_callbacks": {
26
  "TrainerControl": {
27
  "args": {
 
34
  "attributes": {}
35
  }
36
  },
37
+ "total_flos": 31638725591040.0,
38
  "train_batch_size": 2,
39
  "trial_name": null,
40
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d8014bdbf365b7c80c19de9c9ffd0ae1c588b167a2374fea0ff34c2127a39f0
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91f112afae4e2301bbefc56d64d917f6205e0d3af916c3d8ee71db3d6b725c73
3
  size 6776