lesso commited on
Commit
45ca2dd
·
verified ·
1 Parent(s): 4874012

End of training

Browse files
README.md CHANGED
@@ -104,7 +104,7 @@ xformers_attention: null
104
 
105
  This model is a fine-tuned version of [OpenBuddy/openbuddy-llama2-13b-v8.1-fp16](https://huggingface.co/OpenBuddy/openbuddy-llama2-13b-v8.1-fp16) on the None dataset.
106
  It achieves the following results on the evaluation set:
107
- - Loss: 1.8668
108
 
109
  ## Model description
110
 
@@ -127,11 +127,8 @@ The following hyperparameters were used during training:
127
  - train_batch_size: 1
128
  - eval_batch_size: 1
129
  - seed: 42
130
- - distributed_type: multi-GPU
131
- - num_devices: 2
132
  - gradient_accumulation_steps: 4
133
- - total_train_batch_size: 8
134
- - total_eval_batch_size: 2
135
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
136
  - lr_scheduler_type: cosine
137
  - training_steps: 1000
@@ -141,7 +138,7 @@ The following hyperparameters were used during training:
141
 
142
  | Training Loss | Epoch | Step | Validation Loss |
143
  |:-------------:|:------:|:----:|:---------------:|
144
- | 2.3434 | 0.8903 | 1000 | 1.8668 |
145
 
146
 
147
  ### Framework versions
 
104
 
105
  This model is a fine-tuned version of [OpenBuddy/openbuddy-llama2-13b-v8.1-fp16](https://huggingface.co/OpenBuddy/openbuddy-llama2-13b-v8.1-fp16) on the None dataset.
106
  It achieves the following results on the evaluation set:
107
+ - Loss: 1.9381
108
 
109
  ## Model description
110
 
 
127
  - train_batch_size: 1
128
  - eval_batch_size: 1
129
  - seed: 42
 
 
130
  - gradient_accumulation_steps: 4
131
+ - total_train_batch_size: 4
 
132
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
133
  - lr_scheduler_type: cosine
134
  - training_steps: 1000
 
138
 
139
  | Training Loss | Epoch | Step | Validation Loss |
140
  |:-------------:|:------:|:----:|:---------------:|
141
+ | 1.9986 | 0.4452 | 1000 | 1.9381 |
142
 
143
 
144
  ### Framework versions
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "gate_proj",
24
- "down_proj",
25
- "o_proj",
26
  "k_proj",
27
- "up_proj",
28
  "q_proj",
29
- "v_proj"
 
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "k_proj",
 
24
  "q_proj",
25
+ "gate_proj",
26
+ "v_proj",
27
+ "up_proj",
28
+ "down_proj",
29
+ "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a8775195766ad999de849f644c69c735425569de5337cdde8e27bc8545cc22b
3
  size 125375434
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1835348e9409e9c023f2a8aebf3055cfc984b2554f4576183eab86c69f4ef916
3
  size 125375434
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83103d7faa29829787db70f1dcb8853ffad46570fc3c5447a70eb6fb01bd18b5
3
  size 125248064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ec2b5c9910f19f9d31fb3ada3b2068564872af1a45733257482d529d6718c37
3
  size 125248064
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a0d45884620ceef6085956828d6091f386b853e712570dbb107fbd8ba655102
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c85d9b31c5e60e07d0d797918d00e942e0788a5ca9744133f73c8b957eb78f2
3
  size 6776