tomtseng commited on
Commit
39b746b
·
verified ·
1 Parent(s): 8e62afa

Model save

Browse files
README.md CHANGED
@@ -38,10 +38,10 @@ The following hyperparameters were used during training:
38
  - eval_batch_size: 2
39
  - seed: 42
40
  - distributed_type: multi-GPU
41
- - num_devices: 2
42
  - gradient_accumulation_steps: 4
43
- - total_train_batch_size: 8
44
- - total_eval_batch_size: 4
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: cosine
47
  - num_epochs: 1
 
38
  - eval_batch_size: 2
39
  - seed: 42
40
  - distributed_type: multi-GPU
41
+ - num_devices: 3
42
  - gradient_accumulation_steps: 4
43
+ - total_train_batch_size: 12
44
+ - total_eval_batch_size: 6
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: cosine
47
  - num_epochs: 1
adapter_config.json CHANGED
@@ -16,10 +16,10 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_proj",
20
- "k_proj",
21
  "q_proj",
22
- "o_proj"
 
 
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
 
19
  "q_proj",
20
+ "v_proj",
21
+ "o_proj",
22
+ "k_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
runs/Feb02_00-26-47_r2d2-devbox-lcd4w/events.out.tfevents.1738456028.r2d2-devbox-lcd4w.8834.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae65fb0c86079162973984e326b62b6dd47894eda744a427b9a83b2bff019610
3
+ size 4612
runs/Feb02_00-28-28_r2d2-devbox-lcd4w/events.out.tfevents.1738456128.r2d2-devbox-lcd4w.9194.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4864531968e144b07be3bb9b14e64d100dcf88ee501da644dd8cebac98cce240
3
+ size 4612
step_0/README.md CHANGED
@@ -210,4 +210,11 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
210
  ### Framework versions
211
 
212
 
 
 
 
 
 
 
 
213
  - PEFT 0.6.1
 
210
  ### Framework versions
211
 
212
 
213
+ - PEFT 0.6.1
214
+ ## Training procedure
215
+
216
+
217
+ ### Framework versions
218
+
219
+
220
  - PEFT 0.6.1
step_0/adapter_config.json CHANGED
@@ -16,10 +16,10 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_proj",
20
- "k_proj",
21
  "q_proj",
22
- "o_proj"
 
 
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
 
 
19
  "q_proj",
20
+ "v_proj",
21
+ "o_proj",
22
+ "k_proj"
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
step_0/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a322bac975ed907e0f4ed69d712e860be8f2468f1c1aa656133f87c43c3e4ff
3
  size 5816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:313830ae44ff799a704dc5781ea87d293f695aab9f4ae8a9000bbc674c36db6c
3
  size 5816
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a322bac975ed907e0f4ed69d712e860be8f2468f1c1aa656133f87c43c3e4ff
3
  size 5816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:313830ae44ff799a704dc5781ea87d293f695aab9f4ae8a9000bbc674c36db6c
3
  size 5816