almersawi commited on
Commit
dc81d00
·
verified ·
1 Parent(s): 713b969

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +119 -0
cfg.yaml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: bfloat16
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ neftune_noise_alpha: 0.0
10
+ random_parent_probability: 0.0
11
+ skip_parent_probability: 0.0
12
+ token_mask_probability: 0.0
13
+ dataset:
14
+ add_eos_token_to_answer: false
15
+ add_eos_token_to_prompt: false
16
+ add_eos_token_to_system: false
17
+ answer_column: content
18
+ chatbot_author: ''
19
+ chatbot_name: ''
20
+ data_sample: 1.0
21
+ data_sample_choice:
22
+ - Train
23
+ - Validation
24
+ limit_chained_samples: false
25
+ mask_prompt_labels: false
26
+ parent_id_column: None
27
+ personalize: false
28
+ prompt_column:
29
+ - metadata
30
+ system_column: None
31
+ text_answer_separator: ''
32
+ text_prompt_start: ''
33
+ text_system_start: ''
34
+ train_dataframe: /app/train_df.csv
35
+ validation_dataframe: ''
36
+ validation_size: 0.01
37
+ validation_strategy: automatic
38
+ environment:
39
+ compile_model: false
40
+ deepspeed_offload_optimizer: false
41
+ deepspeed_reduce_bucket_size: 10000001.0
42
+ deepspeed_stage3_max_live_parameters: 2000000000.0
43
+ deepspeed_stage3_max_reuse_distance: 10000001.0
44
+ deepspeed_stage3_param_persistence_threshold: 10000001.0
45
+ deepspeed_stage3_prefetch_bucket_size: 1000000000.0
46
+ find_unused_parameters: false
47
+ gpus:
48
+ - '0'
49
+ - '1'
50
+ - '2'
51
+ - '3'
52
+ huggingface_branch: main
53
+ mixed_precision: false
54
+ number_of_workers: 8
55
+ seed: -1
56
+ trust_remote_code: false
57
+ use_deepspeed: true
58
+ experiment_name: llama-13b-chat
59
+ llm_backbone: meta-llama/Llama-2-13b-hf
60
+ logging:
61
+ logger: None
62
+ neptune_project: ''
63
+ output_directory: /app/output
64
+ prediction:
65
+ batch_size_inference: 0
66
+ do_sample: false
67
+ max_length_inference: 256
68
+ metric: Perplexity
69
+ metric_gpt_model: gpt-3.5-turbo-0301
70
+ metric_gpt_template: general
71
+ min_length_inference: 2
72
+ num_beams: 1
73
+ num_history: 4
74
+ repetition_penalty: 1.2000000477
75
+ stop_tokens: ''
76
+ temperature: 0.0
77
+ top_k: 0
78
+ top_p: 1.0
79
+ problem_type: text_causal_language_modeling
80
+ tokenizer:
81
+ add_prefix_space: false
82
+ add_prompt_answer_tokens: false
83
+ max_length: 512
84
+ max_length_answer: 256
85
+ max_length_prompt: 256
86
+ padding_quantile: 1.0
87
+ use_fast: true
88
+ tracking_mode: during_epoch
89
+ training:
90
+ alpha: 0.9900000095
91
+ batch_size: 2
92
+ beta1: 0.8999999762
93
+ beta2: 0.9990000129
94
+ differential_learning_rate: 1.0e-05
95
+ differential_learning_rate_layers: []
96
+ drop_last_batch: true
97
+ epochs: 1
98
+ eps: 1.0e-08
99
+ evaluate_before_training: false
100
+ evaluation_epochs: 1.0
101
+ grad_accumulation: 1
102
+ gradient_clip: 0.5
103
+ learning_rate: 0.0001
104
+ lora: true
105
+ lora_alpha: 16
106
+ lora_dropout: 0.0500000007
107
+ lora_r: 4
108
+ lora_target_modules: []
109
+ loss_function: TokenAveragedCrossEntropy
110
+ momentum: 0.0
111
+ nesterov: false
112
+ optimizer: AdamW
113
+ rho: 0.8999999762
114
+ save_best_checkpoint: false
115
+ schedule: Cosine
116
+ train_validation_data: false
117
+ use_flash_attention_2: false
118
+ warmup_epochs: 0.0
119
+ weight_decay: 0.0