SystemAdmin123 commited on
Commit
ed5b375
·
verified ·
1 Parent(s): 7cd62b8

Training in progress, step 100

Browse files
Files changed (3) hide show
  1. axolotl_config.yaml +13 -7
  2. model.safetensors +1 -1
  3. training_args.bin +2 -2
axolotl_config.yaml CHANGED
@@ -1,5 +1,5 @@
1
  base_model: Maykeye/TinyLLama-v0
2
- batch_size: 32
3
  bf16: true
4
  chat_template: tokenizer_default_fallback_alpaca
5
  datasets:
@@ -13,29 +13,35 @@ datasets:
13
  no_input_format: '{instruction}'
14
  system_format: '{system}'
15
  system_prompt: ''
16
- eval_steps: 20
 
 
17
  flash_attention: true
18
- gpu_memory_limit: 80GiB
19
  group_by_length: true
20
  hub_model_id: SystemAdmin123/TinyLLama-v0
21
  hub_strategy: checkpoint
22
  learning_rate: 0.0002
23
  logging_steps: 10
24
  lr_scheduler: cosine
25
- max_steps: 2500
26
- micro_batch_size: 2
27
  model_type: AutoModelForCausalLM
28
  num_epochs: 100
29
  optimizer: adamw_bnb_8bit
30
- output_dir: /root/.sn56/axolotl/outputs/TinyLLama-v0
31
  pad_to_sequence_len: true
32
  resize_token_embeddings_to_32x: false
33
- save_steps: 40
 
34
  save_total_limit: 1
35
  sequence_len: 2048
36
  special_tokens:
37
  pad_token: </s>
38
  tokenizer_type: LlamaTokenizerFast
 
 
 
39
  trust_remote_code: true
40
  val_set_size: 0.1
41
  wandb_entity: ''
 
1
  base_model: Maykeye/TinyLLama-v0
2
+ batch_size: 128
3
  bf16: true
4
  chat_template: tokenizer_default_fallback_alpaca
5
  datasets:
 
13
  no_input_format: '{instruction}'
14
  system_format: '{system}'
15
  system_prompt: ''
16
+ device_map: auto
17
+ eval_sample_packing: false
18
+ eval_steps: 200
19
  flash_attention: true
20
+ gradient_checkpointing: true
21
  group_by_length: true
22
  hub_model_id: SystemAdmin123/TinyLLama-v0
23
  hub_strategy: checkpoint
24
  learning_rate: 0.0002
25
  logging_steps: 10
26
  lr_scheduler: cosine
27
+ max_steps: 10000
28
+ micro_batch_size: 32
29
  model_type: AutoModelForCausalLM
30
  num_epochs: 100
31
  optimizer: adamw_bnb_8bit
32
+ output_dir: /root/.sn56/axolotl/tmp/TinyLLama-v0
33
  pad_to_sequence_len: true
34
  resize_token_embeddings_to_32x: false
35
+ sample_packing: true
36
+ save_steps: 200
37
  save_total_limit: 1
38
  sequence_len: 2048
39
  special_tokens:
40
  pad_token: </s>
41
  tokenizer_type: LlamaTokenizerFast
42
+ torch_dtype: bf16
43
+ training_args_kwargs:
44
+ hub_private_repo: true
45
  trust_remote_code: true
46
  val_set_size: 0.1
47
  wandb_entity: ''
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad456f579311a0f5eb96bbef4cbf70a4e5eeec10c557f40792d944631dd9d796
3
  size 9250704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:319278c870d9a5ea4178fca00222c5beeba5f93cdf8be6bc64256c87594b8647
3
  size 9250704
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:10220c4137ea48c69f7a997accbc718211651f88c44fd5064aaf86fc05beb658
3
- size 6712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d83a9a919016b89c2e22a0a9e1bea7cd88625f15ca678835191dd97e6ead48
3
+ size 6840