Training in progress, step 3401, checkpoint
Browse files- last-checkpoint/adapter_config.json +1 -1
- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/global_step3401/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step3401/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step3401/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step3401/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step3401/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step3401/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step3401/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step3401/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/latest +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +6 -6
- last-checkpoint/training_args.bin +1 -1
last-checkpoint/adapter_config.json
CHANGED
@@ -19,7 +19,7 @@
|
|
19 |
"r": 8,
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
-
"target_modules": "^(?!.*patch_embed).*(?:
|
23 |
"task_type": "CAUSAL_LM",
|
24 |
"use_dora": false,
|
25 |
"use_rslora": false
|
|
|
19 |
"r": 8,
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
+
"target_modules": "^(?!.*patch_embed).*(?:proj|v_proj|fc1|gate_proj|down_proj|o_proj|fc2|k_proj|q_proj|up_proj|qkv).*",
|
23 |
"task_type": "CAUSAL_LM",
|
24 |
"use_dora": false,
|
25 |
"use_rslora": false
|
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 29034840
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:302b9ce048dad99dc59725f9bc543136929b03fd0548bef61608ff98a26b885e
|
3 |
size 29034840
|
last-checkpoint/global_step3401/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:860876f98bf03c575d9bc5921783fd219c7ce0e0f47f53cc31e943c2bc5f81dc
|
3 |
+
size 43429616
|
last-checkpoint/global_step3401/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af76082060d99cabb6cf54a5de33e91f9afcbe1cb03f27effeb57c5272eeba35
|
3 |
+
size 43429616
|
last-checkpoint/global_step3401/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c2ebe21d90d4da9ac7c8938d709c4e4ac5a423f047f062156c5cc024ec022684
|
3 |
+
size 43429616
|
last-checkpoint/global_step3401/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a9dd47364015c06839e58d41804e93f4f3eb84000c395948f049763066d044c
|
3 |
+
size 43429616
|
last-checkpoint/global_step3401/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70455332cd654c8606fdab3c958e34cfb2dfc91b03c03e361d5d2e7734630486
|
3 |
+
size 637299
|
last-checkpoint/global_step3401/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:570bfba1d3e07de3836b00bdea62b95fe6d5d0e0e65c10bd9766a23c4f6ac8c5
|
3 |
+
size 637171
|
last-checkpoint/global_step3401/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:984640bbf0cdbe7ead13f1a20c28e262fcc60b3d15d4abc95f25a927d576c7ca
|
3 |
+
size 637171
|
last-checkpoint/global_step3401/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:305bc562bdc0a7614ebce33e5124dbeba838eea4588e98e9e12061348423eec5
|
3 |
+
size 637171
|
last-checkpoint/latest
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
global_step3401
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f2d6472ebcb070058b4055bf37ad09560e10692efc14730a79abd74cca6a737
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.1869634985923767,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
|
4 |
-
"epoch": 1.
|
5 |
"eval_steps": 50,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -6062,8 +6062,8 @@
|
|
6062 |
}
|
6063 |
],
|
6064 |
"logging_steps": 5,
|
6065 |
-
"max_steps":
|
6066 |
-
"num_input_tokens_seen":
|
6067 |
"num_train_epochs": 2,
|
6068 |
"save_steps": 50,
|
6069 |
"stateful_callbacks": {
|
@@ -6073,12 +6073,12 @@
|
|
6073 |
"should_evaluate": false,
|
6074 |
"should_log": false,
|
6075 |
"should_save": true,
|
6076 |
-
"should_training_stop":
|
6077 |
},
|
6078 |
"attributes": {}
|
6079 |
}
|
6080 |
},
|
6081 |
-
"total_flos":
|
6082 |
"train_batch_size": 1,
|
6083 |
"trial_name": null,
|
6084 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.1869634985923767,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
|
4 |
+
"epoch": 1.0047267355982274,
|
5 |
"eval_steps": 50,
|
6 |
+
"global_step": 3401,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
6062 |
}
|
6063 |
],
|
6064 |
"logging_steps": 5,
|
6065 |
+
"max_steps": 3400,
|
6066 |
+
"num_input_tokens_seen": 35316128,
|
6067 |
"num_train_epochs": 2,
|
6068 |
"save_steps": 50,
|
6069 |
"stateful_callbacks": {
|
|
|
6073 |
"should_evaluate": false,
|
6074 |
"should_log": false,
|
6075 |
"should_save": true,
|
6076 |
+
"should_training_stop": true
|
6077 |
},
|
6078 |
"attributes": {}
|
6079 |
}
|
6080 |
},
|
6081 |
+
"total_flos": 2329910849044480.0,
|
6082 |
"train_batch_size": 1,
|
6083 |
"trial_name": null,
|
6084 |
"trial_params": null
|
last-checkpoint/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 7480
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85339a802dfd06cbba7d2568099cd0116c5715fbc57a8d66871ebfbd39c462c6
|
3 |
size 7480
|