mhenrichsen commited on
Commit
8b30f67
1 Parent(s): 235f839

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. llama.yml +79 -0
llama.yml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: NousResearch/Meta-Llama-3-8B
2
+ model_type: LlamaForCausalLM
3
+ tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: true
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: alexandrainst/lexdk-open
11
+ type: completion
12
+ - path: syvai/wiki-da
13
+ type: completion
14
+
15
+
16
+ dataset_prepared_path: last_run_prepared
17
+ val_set_size: 0.001
18
+ output_dir: ./out/qlora-llama3-70b
19
+
20
+ adapter: qlora
21
+
22
+ lora_r: 32
23
+ lora_alpha: 16
24
+ lora_dropout: 0.05
25
+ lora_target_linear: true
26
+ lora_fan_in_fan_out:
27
+ lora_target_modules:
28
+
29
+
30
+ wandb_project: llama-3
31
+ wandb_entity:
32
+ wandb_watch:
33
+ wandb_name:
34
+ wandb_log_model:
35
+
36
+ train_on_inputs: false
37
+ group_by_length: false
38
+ bf16: auto
39
+ fp16:
40
+ tf32: false
41
+
42
+ sequence_len: 8192
43
+ sample_packing: true
44
+ pad_to_sequence_len: true
45
+ eval_sample_packing: false
46
+
47
+
48
+ gradient_accumulation_steps: 1
49
+ micro_batch_size: 1
50
+ num_epochs: 2
51
+ optimizer: adamw_bnb_8bit
52
+ lr_scheduler: cosine
53
+ learning_rate: 0.0002
54
+
55
+ train_on_inputs: false
56
+ group_by_length: false
57
+ bf16: true
58
+ fp16: false
59
+ tf32: false
60
+
61
+ gradient_checkpointing: true
62
+ early_stopping_patience:
63
+ resume_from_checkpoint:
64
+ local_rank:
65
+ logging_steps: 1
66
+ xformers_attention:
67
+ flash_attention: true
68
+
69
+ warmup_steps: 10
70
+ evals_per_epoch: 10
71
+ eval_table_size:
72
+ saves_per_epoch: 2
73
+ debug:
74
+ deepspeed:
75
+ weight_decay: 0.1
76
+ fsdp:
77
+ fsdp_config:
78
+ special_tokens:
79
+ pad_token: <|end_of_text|>