kallaballa
commited on
Commit
•
64e04bb
1
Parent(s):
c8751e5
config for llm-tuner
Browse files- README.md +27 -0
- adapter-config.json +20 -0
- adapter_config.json +21 -0
- adapter_model.bin +3 -0
- config.json +6 -0
- fusion-config.json +8 -0
- llm-tuner-config.json +21 -0
README.md
CHANGED
@@ -1,3 +1,30 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
pipeline_tag: text2text-generation
|
6 |
+
tags:
|
7 |
+
- alpaca
|
8 |
+
- llama
|
9 |
+
- chat
|
10 |
+
- gpt4
|
11 |
---
|
12 |
+
|
13 |
+
This repository comes with LoRA checkpoint to make LLaMA into a chatbot like language model. The checkpoint is the output of instruction following fine-tuning process with the following settings on 8xA100(40G) DGX system.
|
14 |
+
- Training script: borrowed from the official [Alpaca-LoRA](https://github.com/tloen/alpaca-lora) implementation
|
15 |
+
- Training script:
|
16 |
+
```shell
|
17 |
+
python finetune.py \
|
18 |
+
--base_model='decapoda-research/llama-7b-hf' \
|
19 |
+
--data_path='alpaca_data_gpt4.json' \
|
20 |
+
--num_epochs=10 \
|
21 |
+
--cutoff_len=512 \
|
22 |
+
--group_by_length \
|
23 |
+
--output_dir='./gpt4-alpaca-lora-7b' \
|
24 |
+
--lora_target_modules='[q_proj,k_proj,v_proj,o_proj]' \
|
25 |
+
--lora_r=16 \
|
26 |
+
--batch_size=... \
|
27 |
+
--micro_batch_size=...
|
28 |
+
```
|
29 |
+
|
30 |
+
You can find how the training went from W&B report [here](https://wandb.ai/chansung18/gpt4_alpaca_lora/runs/nl1xi6ru?workspace=user-chansung18).
|
adapter-config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_name_or_path": "decapoda-research/llama-7b-hf",
|
3 |
+
"bias": "none",
|
4 |
+
"enable_lora": null,
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"lora_alpha": 16,
|
9 |
+
"lora_dropout": 0.05,
|
10 |
+
"merge_weights": false,
|
11 |
+
"modules_to_save": null,
|
12 |
+
"peft_type": "LORA",
|
13 |
+
"r": 16,
|
14 |
+
"target_modules": [
|
15 |
+
"q_proj",
|
16 |
+
"k_proj",
|
17 |
+
"v_proj",
|
18 |
+
"o_proj"
|
19 |
+
]
|
20 |
+
}
|
adapter_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_name_or_path": "decapoda-research/llama-7b-hf",
|
3 |
+
"bias": "none",
|
4 |
+
"enable_lora": null,
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"lora_alpha": 16,
|
9 |
+
"lora_dropout": 0.05,
|
10 |
+
"merge_weights": false,
|
11 |
+
"modules_to_save": null,
|
12 |
+
"peft_type": "LORA",
|
13 |
+
"r": 16,
|
14 |
+
"target_modules": [
|
15 |
+
"q_proj",
|
16 |
+
"k_proj",
|
17 |
+
"v_proj",
|
18 |
+
"o_proj"
|
19 |
+
],
|
20 |
+
"task_type": "CAUSAL_LM"
|
21 |
+
}
|
adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3117d8d2972e4733004f36da6a6cbce3238ae9bb5001da2d253b2bb00a103987
|
3 |
+
size 67201357
|
config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "chansung/gpt4-alpaca-lora-7b",
|
3 |
+
"adapter_type": "lora",
|
4 |
+
"adapter_fusion": "dynamic",
|
5 |
+
"task_type": "text2text-generation"
|
6 |
+
}
|
fusion-config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"fusion": "dynamic",
|
3 |
+
"fusion_params": {
|
4 |
+
"temperature": 1.0,
|
5 |
+
"trainable": true,
|
6 |
+
"initial_weights": null
|
7 |
+
}
|
8 |
+
}
|
llm-tuner-config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"data_path": "path/to/your/data/file",
|
3 |
+
"num_epochs": 3,
|
4 |
+
"cutoff_len": 512,
|
5 |
+
"group_by_length": true,
|
6 |
+
"output_dir": "path/to/your/output/directory",
|
7 |
+
"batch_size": 8,
|
8 |
+
"micro_batch_size": 1,
|
9 |
+
"lr": 5e-5,
|
10 |
+
"warmup_steps": 100,
|
11 |
+
"weight_decay": 0.01,
|
12 |
+
"adam_epsilon": 1e-8,
|
13 |
+
"max_grad_norm": 1.0,
|
14 |
+
"logging_steps": 50,
|
15 |
+
"save_steps": 500,
|
16 |
+
"eval_steps": 500,
|
17 |
+
"overwrite_output_dir": true,
|
18 |
+
"do_train": true,
|
19 |
+
"do_eval": true,
|
20 |
+
"do_predict": true
|
21 |
+
}
|