Farouk commited on
Commit
0eb810b
·
1 Parent(s): 4cae84f

Training in progress, step 6000

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e479a2816f8579f7d795f26c6e76d4c6617567fcbef52ee3f437547a461fa1c
3
  size 871609293
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcb8e0e5b9b35a1744defdf16c1328ef8088022cb7d068af80b5615e4bcbbb88
3
  size 871609293
checkpoint-4200/adapter_model/adapter_model/README.md CHANGED
@@ -103,6 +103,17 @@ The following `bitsandbytes` quantization config was used during training:
103
  - bnb_4bit_use_double_quant: True
104
  - bnb_4bit_compute_dtype: bfloat16
105
 
 
 
 
 
 
 
 
 
 
 
 
106
  The following `bitsandbytes` quantization config was used during training:
107
  - load_in_8bit: False
108
  - load_in_4bit: True
@@ -124,5 +135,6 @@ The following `bitsandbytes` quantization config was used during training:
124
  - PEFT 0.4.0
125
  - PEFT 0.4.0
126
  - PEFT 0.4.0
 
127
 
128
  - PEFT 0.4.0
 
103
  - bnb_4bit_use_double_quant: True
104
  - bnb_4bit_compute_dtype: bfloat16
105
 
106
+ The following `bitsandbytes` quantization config was used during training:
107
+ - load_in_8bit: False
108
+ - load_in_4bit: True
109
+ - llm_int8_threshold: 6.0
110
+ - llm_int8_skip_modules: None
111
+ - llm_int8_enable_fp32_cpu_offload: False
112
+ - llm_int8_has_fp16_weight: False
113
+ - bnb_4bit_quant_type: nf4
114
+ - bnb_4bit_use_double_quant: True
115
+ - bnb_4bit_compute_dtype: bfloat16
116
+
117
  The following `bitsandbytes` quantization config was used during training:
118
  - load_in_8bit: False
119
  - load_in_4bit: True
 
135
  - PEFT 0.4.0
136
  - PEFT 0.4.0
137
  - PEFT 0.4.0
138
+ - PEFT 0.4.0
139
 
140
  - PEFT 0.4.0
checkpoint-4200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a81912e202e5ef1e0abe63acfe2600dcb02f3768a37b847ef40a41ebbb64f69
3
  size 871609293
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e479a2816f8579f7d795f26c6e76d4c6617567fcbef52ee3f437547a461fa1c
3
  size 871609293
checkpoint-6000/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-6000/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "codellama/CodeLlama-34b-Python-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "o_proj",
18
+ "k_proj",
19
+ "down_proj",
20
+ "gate_proj",
21
+ "up_proj",
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-6000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcb8e0e5b9b35a1744defdf16c1328ef8088022cb7d068af80b5615e4bcbbb88
3
+ size 871609293
checkpoint-6000/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 32000
3
+ }
checkpoint-6000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32b3268d3631a07608172e664cf36645f9449beb12fe27843b21588fb9b26fd
3
+ size 873873439
checkpoint-6000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8ed64404762cbf088ea54979b215b846a7cd6fd488280e0f7bd52ca15e13fea
3
+ size 14511
checkpoint-6000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:375ff21cc0ac3c3d2481c3e10491bf0755513bd8242939e41e1aee1a2d5b88f8
3
+ size 627
checkpoint-6000/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "<unk>"
6
+ }
checkpoint-6000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-6000/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": null,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
checkpoint-6000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-6000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe7d931ebfbcece1009124b9eae98d1a465edd703240c0655ee9bb17db395973
3
+ size 6011