tomtseng commited on
Commit
8e62afa
·
verified ·
1 Parent(s): e9650d8

Model save

Browse files
README.md CHANGED
@@ -38,10 +38,10 @@ The following hyperparameters were used during training:
38
  - eval_batch_size: 2
39
  - seed: 42
40
  - distributed_type: multi-GPU
41
- - num_devices: 4
42
  - gradient_accumulation_steps: 4
43
- - total_train_batch_size: 16
44
- - total_eval_batch_size: 8
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: cosine
47
  - num_epochs: 1
 
38
  - eval_batch_size: 2
39
  - seed: 42
40
  - distributed_type: multi-GPU
41
+ - num_devices: 2
42
  - gradient_accumulation_steps: 4
43
+ - total_train_batch_size: 8
44
+ - total_eval_batch_size: 4
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: cosine
47
  - num_epochs: 1
adapter_config.json CHANGED
@@ -16,9 +16,9 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "q_proj",
20
  "v_proj",
21
  "k_proj",
 
22
  "o_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
19
  "v_proj",
20
  "k_proj",
21
+ "q_proj",
22
  "o_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
runs/Feb02_00-01-42_r2d2-devbox-lcd4w/events.out.tfevents.1738454980.r2d2-devbox-lcd4w.3320.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53c23a606d7910d79ac889c98f39a9801484a08693aa7fb74b0498c0414da87b
3
- size 4612
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:783bc1f40d3231e9b3ecbdb5522267828060ae96cb143d061c9ded7668e37998
3
+ size 4766
runs/Feb02_00-24-35_r2d2-devbox-lcd4w/events.out.tfevents.1738455895.r2d2-devbox-lcd4w.8462.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19b6858d84c4ed397c142972a32f4bb4e86da3c199bdabdf5d9a4e241cdcfddc
3
+ size 4612
step_0/README.md CHANGED
@@ -203,4 +203,11 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
203
  ### Framework versions
204
 
205
 
 
 
 
 
 
 
 
206
  - PEFT 0.6.1
 
203
  ### Framework versions
204
 
205
 
206
+ - PEFT 0.6.1
207
+ ## Training procedure
208
+
209
+
210
+ ### Framework versions
211
+
212
+
213
  - PEFT 0.6.1
step_0/adapter_config.json CHANGED
@@ -16,9 +16,9 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "q_proj",
20
  "v_proj",
21
  "k_proj",
 
22
  "o_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
19
  "v_proj",
20
  "k_proj",
21
+ "q_proj",
22
  "o_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
step_0/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c227d230575fc99ea17e41c04d005ea238f73b5b4b99dba46c598857dfb6cf95
3
  size 5816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a322bac975ed907e0f4ed69d712e860be8f2468f1c1aa656133f87c43c3e4ff
3
  size 5816
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c227d230575fc99ea17e41c04d005ea238f73b5b4b99dba46c598857dfb6cf95
3
  size 5816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a322bac975ed907e0f4ed69d712e860be8f2468f1c1aa656133f87c43c3e4ff
3
  size 5816