Ning Sun commited on
Commit
aeaa9de
1 Parent(s): dc85b32

initial upload

Browse files
Files changed (2) hide show
  1. config.yaml +207 -0
  2. model.ckpt +3 -0
config.yaml ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lightning.pytorch==2.4.0
2
+ seed_everything: 42
3
+ trainer:
4
+ accelerator: auto
5
+ strategy:
6
+ class_path: lightning.pytorch.strategies.DDPStrategy
7
+ init_args:
8
+ accelerator: null
9
+ parallel_devices: null
10
+ cluster_environment: null
11
+ checkpoint_io: null
12
+ precision_plugin: null
13
+ ddp_comm_state: null
14
+ ddp_comm_hook: null
15
+ ddp_comm_wrapper: null
16
+ model_averaging_period: null
17
+ process_group_backend: null
18
+ timeout: 0:30:00
19
+ start_method: popen
20
+ output_device: null
21
+ dim: 0
22
+ broadcast_buffers: true
23
+ process_group: null
24
+ bucket_cap_mb: 25
25
+ find_unused_parameters: false
26
+ check_reduction: false
27
+ gradient_as_bucket_view: false
28
+ static_graph: false
29
+ delay_all_reduce_named_params: null
30
+ param_to_hook_all_reduce: null
31
+ mixed_precision: null
32
+ device_mesh: null
33
+ devices: auto
34
+ num_nodes: 8
35
+ precision: 32
36
+ logger:
37
+ class_path: lightning.pytorch.loggers.WandbLogger
38
+ init_args:
39
+ name: 7B_splice_reconstruction
40
+ save_dir: logs
41
+ version: null
42
+ offline: false
43
+ dir: null
44
+ id: null
45
+ anonymous: null
46
+ project: GBFT_DNAFM_GUE
47
+ log_model: false
48
+ experiment: null
49
+ prefix: ''
50
+ checkpoint_name: null
51
+ job_type: null
52
+ config: null
53
+ entity: null
54
+ reinit: null
55
+ tags: null
56
+ group: null
57
+ notes: null
58
+ magic: null
59
+ config_exclude_keys: null
60
+ config_include_keys: null
61
+ mode: null
62
+ allow_val_change: null
63
+ resume: null
64
+ force: null
65
+ tensorboard: null
66
+ sync_tensorboard: null
67
+ monitor_gym: null
68
+ save_code: true
69
+ settings: null
70
+ callbacks:
71
+ - class_path: lightning.pytorch.callbacks.LearningRateMonitor
72
+ init_args:
73
+ logging_interval: step
74
+ log_momentum: false
75
+ log_weight_decay: false
76
+ - class_path: lightning.pytorch.callbacks.ModelCheckpoint # save ckpt at the end of each epoch, and save the best val_mcc ckpt
77
+ init_args:
78
+ dirpath: null
79
+ filename: epoch_{epoch}-val_mcc:{val_mcc:.3f}
80
+ monitor: val_mcc
81
+ verbose: false
82
+ save_last: true
83
+ save_top_k: 1
84
+ save_weights_only: false
85
+ mode: max
86
+ auto_insert_metric_name: true
87
+ every_n_train_steps: null
88
+ train_time_interval: null
89
+ every_n_epochs: 1
90
+ save_on_train_epoch_end: null
91
+ enable_version_counter: true
92
+ - class_path: lightning.pytorch.callbacks.early_stopping.EarlyStopping
93
+ dict_kwargs:
94
+ monitor: val_mcc
95
+ mode: max
96
+ patience: 10
97
+ fast_dev_run: false
98
+ max_epochs: 20
99
+ min_epochs: null
100
+ max_steps: -1
101
+ min_steps: null
102
+ max_time: null
103
+ limit_train_batches: null
104
+ limit_val_batches: null
105
+ limit_test_batches: null
106
+ limit_predict_batches: null
107
+ overfit_batches: 0.0
108
+ val_check_interval: null
109
+ check_val_every_n_epoch: 1
110
+ num_sanity_val_steps: null
111
+ log_every_n_steps: 50
112
+ enable_checkpointing: null
113
+ enable_progress_bar: null
114
+ enable_model_summary: null
115
+ accumulate_grad_batches: 1
116
+ gradient_clip_val: 1
117
+ gradient_clip_algorithm: null
118
+ deterministic: null
119
+ benchmark: null
120
+ inference_mode: true
121
+ use_distributed_sampler: true
122
+ profiler:
123
+ class_path: lightning.pytorch.profilers.PyTorchProfiler
124
+ init_args:
125
+ dirpath: null
126
+ filename: null
127
+ group_by_input_shapes: false
128
+ emit_nvtx: false
129
+ export_to_chrome: true
130
+ row_limit: 20
131
+ sort_by_key: null
132
+ record_module_names: true
133
+ table_kwargs: null
134
+ record_shapes: false
135
+ dict_kwargs:
136
+ profile_memory: true
137
+ detect_anomaly: false
138
+ barebones: false
139
+ plugins: null
140
+ sync_batchnorm: false
141
+ reload_dataloaders_every_n_epochs: 0
142
+ default_root_dir: logs
143
+ model:
144
+ class_path: genbio_finetune.tasks.SequenceClassification
145
+ init_args:
146
+ backbone:
147
+ class_path: genbio_finetune.models.dnafm
148
+ init_args:
149
+ from_scratch: false
150
+ use_peft: true
151
+ save_peft_only: true
152
+ lora_r: 16
153
+ lora_alpha: 32
154
+ lora_dropout: 0.1
155
+ config_overwrites: null
156
+ model_init_args: null
157
+ max_length: 402
158
+ adapter:
159
+ class_path: genbio_finetune.models.MLPPoolAdapter
160
+ init_args:
161
+ pooling: mean_pooling
162
+ hidden_sizes:
163
+ - 128
164
+ bias: true
165
+ dropout: 0.1
166
+ dropout_in_middle: false
167
+ n_classes: 3
168
+ optimizer:
169
+ class_path: torch.optim.AdamW
170
+ init_args:
171
+ lr: 0.0005
172
+ betas:
173
+ - 0.9
174
+ - 0.95
175
+ eps: 1.0e-08
176
+ weight_decay: 0.1
177
+ amsgrad: false
178
+ maximize: false
179
+ foreach: null
180
+ capturable: false
181
+ differentiable: false
182
+ fused: null
183
+ lr_scheduler:
184
+ class_path: genbio_finetune.lr_schedulers.CosineWithWarmup
185
+ init_args:
186
+ warmup_ratio: 0.1
187
+ use_legacy_adapter: false
188
+ strict_loading: true
189
+ reset_optimizer_states: false
190
+ data:
191
+ class_path: genbio_finetune.data.GUEClassification
192
+ init_args:
193
+ hf_name: leannmlindsey/GUE
194
+ task: splice_reconstructed
195
+ x_col: sequence
196
+ y_col: label
197
+ train_split_name: train
198
+ test_split_name: test
199
+ valid_split_name: null
200
+ valid_split_size: 0.1
201
+ batch_size: 4
202
+ shuffle: true
203
+ sampler: null
204
+ num_workers: 0
205
+ pin_memory: true
206
+ persistent_workers: false
207
+ ckpt_path: null
model.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ec8e18576a89093246482ed5d8520dbf64994c9a5b534712b6c256bf6c75c05
3
+ size 107394014