wangd12 commited on
Commit
967a407
·
verified ·
1 Parent(s): 202b705

Add/update LoRA model: xbrl_extract_llama_3_1_8b_8bits_r8

Browse files
Files changed (3) hide show
  1. README.md +124 -0
  2. adapter_config.json +35 -0
  3. adapter_model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
3
+ should probably proofread and complete it, then remove this comment. -->
4
+
5
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
6
+ <details><summary>See axolotl config</summary>
7
+
8
+ axolotl version: `0.9.1.post1`
9
+ ```yaml
10
+ base_model: meta-llama/Llama-3.1-8B-Instruct
11
+ model_type: LlamaForCausalLM
12
+ tokenizer_type: AutoTokenizer
13
+ gradient_accumulation_steps: 8
14
+ micro_batch_size: 1
15
+ num_epochs: 1
16
+ optimizer: adamw_bnb_8bit
17
+ lr_scheduler: cosine
18
+ learning_rate: 0.0001
19
+ load_in_8bit: true
20
+ load_in_4bit: false
21
+ adapter: lora
22
+ lora_model_dir: null
23
+ lora_r: 8
24
+ lora_alpha: 16
25
+ lora_dropout: 0.05
26
+ lora_target_modules:
27
+ - q_proj
28
+ - v_proj
29
+ - k_proj
30
+ datasets:
31
+ - path: /workspace/FinLoRA/data/train/xbrl_extract_train.jsonl
32
+ type:
33
+ system_prompt: ''
34
+ field_system: system
35
+ field_instruction: context
36
+ field_output: target
37
+ format: '[INST] {instruction} [/INST]'
38
+ no_input_format: '[INST] {instruction} [/INST]'
39
+ dataset_prepared_path: null
40
+ val_set_size: 0.02
41
+ output_dir: /workspace/FinLoRA/lora/axolotl-output/xbrl_extract_llama_3_1_8b_8bits_r8
42
+ peft_use_dora: false
43
+ peft_use_rslora: false
44
+ sequence_len: 4096
45
+ sample_packing: false
46
+ pad_to_sequence_len: false
47
+ wandb_project: finlora_models
48
+ wandb_entity: null
49
+ wandb_watch: gradients
50
+ wandb_name: xbrl_extract_llama_3_1_8b_8bits_r8
51
+ wandb_log_model: 'false'
52
+ bf16: auto
53
+ tf32: false
54
+ gradient_checkpointing: true
55
+ resume_from_checkpoint: null
56
+ logging_steps: 500
57
+ flash_attention: false
58
+ deepspeed: deepspeed_configs/zero1.json
59
+ warmup_steps: 10
60
+ evals_per_epoch: 4
61
+ saves_per_epoch: 1
62
+ weight_decay: 0.0
63
+ special_tokens:
64
+ pad_token: <|end_of_text|>
65
+ chat_template: llama3
66
+
67
+ ```
68
+
69
+ </details><br>
70
+
71
+ # workspace/FinLoRA/lora/axolotl-output/xbrl_extract_llama_3_1_8b_8bits_r8
72
+
73
+ This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) on the /workspace/FinLoRA/data/train/xbrl_extract_train.jsonl dataset.
74
+ It achieves the following results on the evaluation set:
75
+ - Loss: 0.0025
76
+
77
+ ## Model description
78
+
79
+ More information needed
80
+
81
+ ## Intended uses & limitations
82
+
83
+ More information needed
84
+
85
+ ## Training and evaluation data
86
+
87
+ More information needed
88
+
89
+ ## Training procedure
90
+
91
+ ### Training hyperparameters
92
+
93
+ The following hyperparameters were used during training:
94
+ - learning_rate: 0.0001
95
+ - train_batch_size: 1
96
+ - eval_batch_size: 1
97
+ - seed: 42
98
+ - distributed_type: multi-GPU
99
+ - num_devices: 4
100
+ - gradient_accumulation_steps: 8
101
+ - total_train_batch_size: 32
102
+ - total_eval_batch_size: 4
103
+ - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
104
+ - lr_scheduler_type: cosine
105
+ - lr_scheduler_warmup_steps: 10
106
+ - num_epochs: 1.0
107
+
108
+ ### Training results
109
+
110
+ | Training Loss | Epoch | Step | Validation Loss |
111
+ |:-------------:|:------:|:----:|:---------------:|
112
+ | No log | 0.0038 | 1 | 1.6299 |
113
+ | No log | 0.2526 | 67 | 0.0075 |
114
+ | No log | 0.5052 | 134 | 0.0037 |
115
+ | No log | 0.7578 | 201 | 0.0025 |
116
+
117
+
118
+ ### Framework versions
119
+
120
+ - PEFT 0.15.2
121
+ - Transformers 4.51.3
122
+ - Pytorch 2.8.0.dev20250319+cu128
123
+ - Datasets 3.5.1
124
+ - Tokenizers 0.21.1
adapter_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-3.1-8B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": null,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "q_proj",
28
+ "v_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "trainable_token_indices": null,
33
+ "use_dora": false,
34
+ "use_rslora": false
35
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb3c3622e2c20b903a37a51149121196cc2a6ae83e63ca97d859d3389bbc5025
3
+ size 9462656