Tatvajsh commited on
Commit
b319949
·
1 Parent(s): f850e58

Tatvajsh/Lllama_AHS_V_6.6

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. adapter_config.json +5 -5
  3. adapter_model.bin +1 -1
  4. training_args.bin +1 -1
README.md CHANGED
@@ -38,7 +38,7 @@ The following hyperparameters were used during training:
38
  - seed: 42
39
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
  - lr_scheduler_type: linear
41
- - training_steps: 100
42
  - mixed_precision_training: Native AMP
43
 
44
  ### Training results
 
38
  - seed: 42
39
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
  - lr_scheduler_type: linear
41
+ - training_steps: 500
42
  - mixed_precision_training: Native AMP
43
 
44
  ### Training results
adapter_config.json CHANGED
@@ -16,14 +16,14 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "base_layer",
20
- "q_proj",
21
- "v_proj",
22
  "o_proj",
23
- "gate_proj",
24
  "up_proj",
 
25
  "k_proj",
26
- "down_proj"
 
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "down_proj",
 
 
20
  "o_proj",
21
+ "v_proj",
22
  "up_proj",
23
+ "q_proj",
24
  "k_proj",
25
+ "gate_proj",
26
+ "base_layer"
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18dafa6c0ee06c46901f52d9eed0b943e355197fe1341923551c2481c0391409
3
  size 101834237
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26ae46498cc6ea63d753a209c8a9d49b8fc6d9e12e70aed0c6a4be18e45c5aef
3
  size 101834237
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f9955b04800dc6c2cf924ada51ee03b2e139da1bb54bddd1445c63daa7185fc
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fd357cd3434ca8ab3b16b6eee237402522b87a2e930ce73f350b77ea8380a45
3
  size 4027