Delta-Vector commited on
Commit
297e743
·
verified ·
1 Parent(s): 7b7477c

Upload kto-4b.yml

Browse files
Files changed (1) hide show
  1. kto-4b.yml +109 -0
kto-4b.yml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: NewEden/Hamanasu-4B-R2
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: false
7
+ strict: false
8
+
9
+ hub_model_id: NewEden/KTO-4B
10
+ hub_strategy: "all_checkpoints"
11
+ push_dataset_to_hub:
12
+ hf_use_auth_token: true
13
+
14
+ chat_template: chatml
15
+
16
+ rl: kto
17
+ rl_beta: 0.2
18
+ kto_desirable_weight: 0.2
19
+
20
+ datasets:
21
+ - path: NewEden/Opus-accepted-hermes-rejected-shuffled
22
+ split: train
23
+ type: chatml.argilla
24
+ - path: NewEden/KTO-IF-Dans
25
+ split: train
26
+ type: chatml.argilla
27
+ - path: NewEden/KTO-IF-Dans
28
+ split: train
29
+ type: chatml.argilla
30
+ - path: NewEden/Purpura-Arkhaios-CC-KTO
31
+ split: train
32
+ type: chatml.argilla
33
+ dataset_prepared_path: last_run_prepared
34
+
35
+ shuffle_merged_datasets: true
36
+ val_set_size: 0.0
37
+ output_dir: ./outputs/out
38
+
39
+ adapter: lora
40
+ lora_model_dir:
41
+
42
+ lora_r: 64
43
+ lora_alpha: 32
44
+ lora_dropout: 0.0
45
+ lora_target_linear: true
46
+ lora_fan_in_fan_out:
47
+ lora_target_modules:
48
+ - gate_proj
49
+ - down_proj
50
+ - up_proj
51
+ - q_proj
52
+ - v_proj
53
+ - k_proj
54
+ - o_proj
55
+
56
+ sequence_len: 32768
57
+ sample_packing: false
58
+ eval_sample_packing: false
59
+ pad_to_sequence_len: false
60
+
61
+ wandb_project: tavbussy
62
+ wandb_entity:
63
+ wandb_watch:
64
+ wandb_name: kto-1
65
+ wandb_log_model:
66
+
67
+ gradient_accumulation_steps: 2
68
+ micro_batch_size: 4
69
+ num_epochs: 1
70
+ optimizer: paged_adamw_8bit
71
+ lr_scheduler:
72
+ learning_rate: 1e-6
73
+ max_grad_norm: 0.001
74
+
75
+ optimizer: paged_adamw_8bit
76
+ lr_scheduler: cosine
77
+ learning_rate: 1e-5
78
+ weight_decay: 0.02
79
+
80
+ train_on_inputs: false
81
+ group_by_length: false
82
+ bf16: auto
83
+ fp16:
84
+ tf32: true
85
+
86
+ gradient_checkpointing: true
87
+ remove_unused_columns: false
88
+ early_stopping_patience:
89
+ resume_from_checkpoint:
90
+ local_rank:
91
+ logging_steps: 1
92
+ xformers_attention:
93
+ flash_attention: true
94
+
95
+ warmup_steps: 35
96
+ evals_per_epoch: 2
97
+ eval_table_size:
98
+ eval_max_new_tokens:
99
+ saves_per_epoch: 2
100
+
101
+ debug:
102
+ deepspeed: ./deepspeed_configs/zero3_bf16.json
103
+ fsdp:
104
+ fsdp_config:
105
+ fsdp:
106
+ fsdp_config:
107
+
108
+ special_tokens:
109
+ pad_token: <|finetune_right_pad_id|>