Upload 7 files
Browse files- README.md +16 -13
- adapter_config.json +4 -4
- adapter_model.bin +1 -1
- tokenizer_config.json +0 -1
README.md
CHANGED
@@ -29,8 +29,17 @@ model_type: MistralForCausalLM
|
|
29 |
tokenizer_type: LlamaTokenizer
|
30 |
is_mistral_derived_model: true
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
# accelerate launch -m axolotl.cli.train ./llama_7b_config.yaml
|
33 |
|
|
|
|
|
|
|
34 |
load_in_8bit: true
|
35 |
load_in_4bit: false
|
36 |
strict: false
|
@@ -49,9 +58,6 @@ dataset_prepared_path:
|
|
49 |
val_set_size: 0.05
|
50 |
output_dir: ./lora-out
|
51 |
|
52 |
-
chat_template: chatml
|
53 |
-
default_system_message: You are a helpful assistant, specialising in financial text sentiment and emotional analysis. # Currently only supports chatml.
|
54 |
-
|
55 |
sequence_len: 512
|
56 |
sample_packing: true
|
57 |
pad_to_sequence_len: true
|
@@ -71,7 +77,7 @@ wandb_name:
|
|
71 |
wandb_log_model:
|
72 |
|
73 |
gradient_accumulation_steps: 1
|
74 |
-
micro_batch_size:
|
75 |
# max_steps: 1000
|
76 |
num_epochs: 2
|
77 |
optimizer: adamw_bnb_8bit
|
@@ -94,7 +100,7 @@ flash_attention: true
|
|
94 |
s2_attention:
|
95 |
|
96 |
warmup_steps: 50
|
97 |
-
evals_per_epoch:
|
98 |
eval_table_size:
|
99 |
eval_table_max_new_tokens: 128
|
100 |
saves_per_epoch: 1
|
@@ -115,7 +121,7 @@ special_tokens:
|
|
115 |
|
116 |
This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
|
117 |
It achieves the following results on the evaluation set:
|
118 |
-
- Loss: 0.
|
119 |
|
120 |
## Model description
|
121 |
|
@@ -135,8 +141,8 @@ More information needed
|
|
135 |
|
136 |
The following hyperparameters were used during training:
|
137 |
- learning_rate: 0.0002
|
138 |
-
- train_batch_size:
|
139 |
-
- eval_batch_size:
|
140 |
- seed: 42
|
141 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
142 |
- lr_scheduler_type: cosine
|
@@ -147,11 +153,8 @@ The following hyperparameters were used during training:
|
|
147 |
|
148 |
| Training Loss | Epoch | Step | Validation Loss |
|
149 |
|:-------------:|:-----:|:----:|:---------------:|
|
150 |
-
|
|
151 |
-
| 0.
|
152 |
-
| 0.0956 | 1.0 | 494 | 0.0998 |
|
153 |
-
| 0.0705 | 1.48 | 741 | 0.0990 |
|
154 |
-
| 0.0858 | 1.98 | 988 | 0.0942 |
|
155 |
|
156 |
|
157 |
### Framework versions
|
|
|
29 |
tokenizer_type: LlamaTokenizer
|
30 |
is_mistral_derived_model: true
|
31 |
|
32 |
+
# git clone https://github.com/OpenAccess-AI-Collective/axolotl
|
33 |
+
# cd axolotl
|
34 |
+
|
35 |
+
# pip3 install packaging
|
36 |
+
# pip3 install -e '.[flash-attn,deepspeed]'
|
37 |
+
|
38 |
# accelerate launch -m axolotl.cli.train ./llama_7b_config.yaml
|
39 |
|
40 |
+
# accelerate launch -m axolotl.cli.inference ./llama_7b_config.yaml \
|
41 |
+
# --lora_model_dir="dohonba/mistral_7b_fingpt"
|
42 |
+
|
43 |
load_in_8bit: true
|
44 |
load_in_4bit: false
|
45 |
strict: false
|
|
|
58 |
val_set_size: 0.05
|
59 |
output_dir: ./lora-out
|
60 |
|
|
|
|
|
|
|
61 |
sequence_len: 512
|
62 |
sample_packing: true
|
63 |
pad_to_sequence_len: true
|
|
|
77 |
wandb_log_model:
|
78 |
|
79 |
gradient_accumulation_steps: 1
|
80 |
+
micro_batch_size: 14
|
81 |
# max_steps: 1000
|
82 |
num_epochs: 2
|
83 |
optimizer: adamw_bnb_8bit
|
|
|
100 |
s2_attention:
|
101 |
|
102 |
warmup_steps: 50
|
103 |
+
evals_per_epoch: 0
|
104 |
eval_table_size:
|
105 |
eval_table_max_new_tokens: 128
|
106 |
saves_per_epoch: 1
|
|
|
121 |
|
122 |
This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
|
123 |
It achieves the following results on the evaluation set:
|
124 |
+
- Loss: 0.0917
|
125 |
|
126 |
## Model description
|
127 |
|
|
|
141 |
|
142 |
The following hyperparameters were used during training:
|
143 |
- learning_rate: 0.0002
|
144 |
+
- train_batch_size: 14
|
145 |
+
- eval_batch_size: 14
|
146 |
- seed: 42
|
147 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
148 |
- lr_scheduler_type: cosine
|
|
|
153 |
|
154 |
| Training Loss | Epoch | Step | Validation Loss |
|
155 |
|:-------------:|:-----:|:----:|:---------------:|
|
156 |
+
| 0.08 | 1.02 | 566 | 0.0986 |
|
157 |
+
| 0.0919 | 1.98 | 1110 | 0.0917 |
|
|
|
|
|
|
|
158 |
|
159 |
|
160 |
### Framework versions
|
adapter_config.json
CHANGED
@@ -19,13 +19,13 @@
|
|
19 |
"rank_pattern": {},
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
|
|
22 |
"up_proj",
|
23 |
-
"q_proj",
|
24 |
"down_proj",
|
25 |
-
"v_proj",
|
26 |
-
"o_proj",
|
27 |
"gate_proj",
|
28 |
-
"k_proj"
|
|
|
|
|
29 |
],
|
30 |
"task_type": "CAUSAL_LM"
|
31 |
}
|
|
|
19 |
"rank_pattern": {},
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
+
"v_proj",
|
23 |
"up_proj",
|
|
|
24 |
"down_proj",
|
|
|
|
|
25 |
"gate_proj",
|
26 |
+
"k_proj",
|
27 |
+
"o_proj",
|
28 |
+
"q_proj"
|
29 |
],
|
30 |
"task_type": "CAUSAL_LM"
|
31 |
}
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 335705741
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed44fe38ab406e5b8a358047c9754533cfe79a547f5ae7d159b73dc5b5ba2319
|
3 |
size 335705741
|
tokenizer_config.json
CHANGED
@@ -29,7 +29,6 @@
|
|
29 |
},
|
30 |
"additional_special_tokens": [],
|
31 |
"bos_token": "<s>",
|
32 |
-
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant, specialising in financial text sentiment and emotional analysis.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
33 |
"clean_up_tokenization_spaces": false,
|
34 |
"eos_token": "</s>",
|
35 |
"legacy": true,
|
|
|
29 |
},
|
30 |
"additional_special_tokens": [],
|
31 |
"bos_token": "<s>",
|
|
|
32 |
"clean_up_tokenization_spaces": false,
|
33 |
"eos_token": "</s>",
|
34 |
"legacy": true,
|