added output adapter files from fine tuning
#1
by
sriram-g
- opened
- checkpoint-672/README.md +34 -0
- checkpoint-672/adapter_config.json +21 -0
- checkpoint-672/adapter_model.bin +3 -0
- checkpoint-672/optimizer.pt +3 -0
- checkpoint-672/rng_state.pth +3 -0
- checkpoint-672/scheduler.pt +3 -0
- checkpoint-672/special_tokens_map.json +24 -0
- checkpoint-672/tokenizer.json +0 -0
- checkpoint-672/tokenizer.model +3 -0
- checkpoint-672/tokenizer_config.json +32 -0
- checkpoint-672/trainer_state.json +91 -0
- checkpoint-672/training_args.bin +3 -0
checkpoint-672/README.md
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- quant_method: bitsandbytes
|
9 |
+
- load_in_8bit: False
|
10 |
+
- load_in_4bit: True
|
11 |
+
- llm_int8_threshold: 6.0
|
12 |
+
- llm_int8_skip_modules: None
|
13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
14 |
+
- llm_int8_has_fp16_weight: False
|
15 |
+
- bnb_4bit_quant_type: nf4
|
16 |
+
- bnb_4bit_use_double_quant: False
|
17 |
+
- bnb_4bit_compute_dtype: float16
|
18 |
+
|
19 |
+
The following `bitsandbytes` quantization config was used during training:
|
20 |
+
- quant_method: bitsandbytes
|
21 |
+
- load_in_8bit: False
|
22 |
+
- load_in_4bit: True
|
23 |
+
- llm_int8_threshold: 6.0
|
24 |
+
- llm_int8_skip_modules: None
|
25 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
26 |
+
- llm_int8_has_fp16_weight: False
|
27 |
+
- bnb_4bit_quant_type: nf4
|
28 |
+
- bnb_4bit_use_double_quant: False
|
29 |
+
- bnb_4bit_compute_dtype: float16
|
30 |
+
### Framework versions
|
31 |
+
|
32 |
+
- PEFT 0.6.0.dev0
|
33 |
+
|
34 |
+
- PEFT 0.6.0.dev0
|
checkpoint-672/adapter_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "TinyPixel/Llama-2-7B-bf16-sharded",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 32,
|
11 |
+
"lora_dropout": 0.05,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 16,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"v_proj"
|
19 |
+
],
|
20 |
+
"task_type": "CAUSAL_LM"
|
21 |
+
}
|
checkpoint-672/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b96108b9b122965ce48ef33916a24aee0f5ad6f5e7148489fdc758b1952870d5
|
3 |
+
size 33600461
|
checkpoint-672/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e30215ee1b9c2d172d7a5732b94eab2adeec75a9f471ddb287eba6e47bba2f15
|
3 |
+
size 67216581
|
checkpoint-672/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7ba545e04e51e43cba6e578fb17332802d38a7cc94b401982295e96d50ff730
|
3 |
+
size 14511
|
checkpoint-672/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40a35f013979a507c07573a899158aa07173000d9fe1743b865c9c20c330a96e
|
3 |
+
size 627
|
checkpoint-672/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "</s>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
checkpoint-672/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-672/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
checkpoint-672/tokenizer_config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"__type": "AddedToken",
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": true,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false
|
9 |
+
},
|
10 |
+
"clean_up_tokenization_spaces": false,
|
11 |
+
"eos_token": {
|
12 |
+
"__type": "AddedToken",
|
13 |
+
"content": "</s>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false
|
18 |
+
},
|
19 |
+
"model_max_length": 340,
|
20 |
+
"pad_token": null,
|
21 |
+
"sp_model_kwargs": {},
|
22 |
+
"tokenizer_class": "LlamaTokenizer",
|
23 |
+
"unk_token": {
|
24 |
+
"__type": "AddedToken",
|
25 |
+
"content": "<unk>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": true,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
},
|
31 |
+
"use_default_system_prompt": true
|
32 |
+
}
|
checkpoint-672/trainer_state.json
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 7.319391634980988,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 672,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"learning_rate": 4.928909952606635e-05,
|
14 |
+
"loss": 1.7844,
|
15 |
+
"step": 52
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"epoch": 1.08,
|
19 |
+
"learning_rate": 9.85781990521327e-05,
|
20 |
+
"loss": 1.0308,
|
21 |
+
"step": 104
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"epoch": 1.27,
|
25 |
+
"learning_rate": 0.00014786729857819904,
|
26 |
+
"loss": 0.832,
|
27 |
+
"step": 156
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 2.15,
|
31 |
+
"learning_rate": 0.0001971563981042654,
|
32 |
+
"loss": 0.7868,
|
33 |
+
"step": 208
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"epoch": 3.03,
|
37 |
+
"learning_rate": 0.0001948230322239831,
|
38 |
+
"loss": 0.7672,
|
39 |
+
"step": 260
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"epoch": 3.23,
|
43 |
+
"learning_rate": 0.00018932910723718966,
|
44 |
+
"loss": 0.7315,
|
45 |
+
"step": 312
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"epoch": 4.11,
|
49 |
+
"learning_rate": 0.0001838351822503962,
|
50 |
+
"loss": 0.6889,
|
51 |
+
"step": 364
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 4.3,
|
55 |
+
"learning_rate": 0.00017834125726360275,
|
56 |
+
"loss": 0.6646,
|
57 |
+
"step": 416
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"epoch": 5.18,
|
61 |
+
"learning_rate": 0.00017284733227680932,
|
62 |
+
"loss": 0.6056,
|
63 |
+
"step": 468
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"epoch": 6.06,
|
67 |
+
"learning_rate": 0.00016735340729001586,
|
68 |
+
"loss": 0.5724,
|
69 |
+
"step": 520
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"epoch": 6.26,
|
73 |
+
"learning_rate": 0.0001618594823032224,
|
74 |
+
"loss": 0.5251,
|
75 |
+
"step": 572
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"epoch": 7.14,
|
79 |
+
"learning_rate": 0.00015636555731642895,
|
80 |
+
"loss": 0.4665,
|
81 |
+
"step": 624
|
82 |
+
}
|
83 |
+
],
|
84 |
+
"logging_steps": 52,
|
85 |
+
"max_steps": 2104,
|
86 |
+
"num_train_epochs": 8,
|
87 |
+
"save_steps": 500,
|
88 |
+
"total_flos": 3.682319674638336e+16,
|
89 |
+
"trial_name": null,
|
90 |
+
"trial_params": null
|
91 |
+
}
|
checkpoint-672/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ccd040e94deeb93fb5664fab182f4eecb9002a7a1647a63d23aadfd2405bfd72
|
3 |
+
size 4091
|