diff --git a/llava-v1.6-13b-unk-vqa-v1.0/checkpoint-1000/model-00005-of-00006.safetensors b/llava-v1.6-13b-unk-vqa-v1.0/checkpoint-1000/model-00005-of-00006.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..100158647b40c744d09eca5dee2ef8e91223ad76
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.0/checkpoint-1000/model-00005-of-00006.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5722a0d8a0b65ffbcb357956880e989ba745f65dfd8431858ec397f32d53d0e1
+size 4933722216
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b6d238b44bac3c6fa409ed9a9075907037571058
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/config.json
@@ -0,0 +1,74 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-vicuna-13b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 13824,
+ "max_length": 4096,
+ "max_position_embeddings": 4096,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 40,
+ "num_key_value_heads": 40,
+ "pad_token_id": 0,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/generation_config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..70ffa4e13b28eca9f452207a778bb73c036f3d03
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/generation_config.json
@@ -0,0 +1,8 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "max_length": 4096,
+ "pad_token_id": 0,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/latest b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/latest
new file mode 100644
index 0000000000000000000000000000000000000000..744ae7dbad571b6f37ec6c7066549494261bb59e
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/latest
@@ -0,0 +1 @@
+global_step100
\ No newline at end of file
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/model.safetensors.index.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..5cb0534e5af0581cc99cf491b62264df614dd647
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/model.safetensors.index.json
@@ -0,0 +1,766 @@
+{
+ "metadata": {
+ "total_size": 26701688832
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00006-of-00006.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00006.safetensors",
+ "model.image_newline": "model-00001-of-00006.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.mm_projector.0.bias": "model-00006-of-00006.safetensors",
+ "model.mm_projector.0.weight": "model-00006-of-00006.safetensors",
+ "model.mm_projector.2.bias": "model-00006-of-00006.safetensors",
+ "model.mm_projector.2.weight": "model-00006-of-00006.safetensors",
+ "model.norm.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00006-of-00006.safetensors"
+ }
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_2.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ea499e285c97cca07fedd34662c3d4ab44ff6f47
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4e481d4ef1546694da7337f6bb6c658b866dcb79b85deeb477da0d27ebe851e
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_3.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..aeb38f92f106ac3f08bae4f82179a8a12243bccb
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:353c60be37ea56fc992fca446598ceca5d1fd002aa3bd6dbb9ad740e6f47ebb3
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_4.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9d5856cb7a3f15092fa5593507022316916f648e
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9107fe964ba7205e354084b85210e5a5ea1c98cfd4d38adb9cd3926945dcae4
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_6.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a9fd0364bb8f1a8e91eca45be5e1b6672b4d9afd
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afd5516048e20f36959601574e29e40106085a7d3cdc7bf425ce5e84633490e6
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/scheduler.pt b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fb6372408918017849d562bdfead314b0390dd30
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4357adac8296985cb4b98d4cc54cbe1338e42459aa4150e5eb8b32da703ed47
+size 1064
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/special_tokens_map.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/tokenizer.model b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/tokenizer_config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/trainer_state.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..e751856b7e9167dbf5ba89c0541c77c2e81c52b5
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/trainer_state.json
@@ -0,0 +1,621 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.19230769230769232,
+ "eval_steps": 500,
+ "global_step": 100,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.1205,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 3.2041,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 3.0927,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 3.1433,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 3.036,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 2.4568,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 1.2898,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.97,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.6386,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.4549,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.4718,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.4106,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.3442,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.3178,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.2579,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.3623,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.3338,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.3265,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.3804,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.2543,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.3619,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.3095,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.2835,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.2666,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.3076,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.2789,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.3008,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.2998,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.272,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.371,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.3543,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.2793,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.278,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.3078,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.3137,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.2804,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.2322,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.2754,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.3334,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.3721,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.3324,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.2972,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.3039,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.2436,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.3353,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.2819,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.2769,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.253,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.2614,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.3297,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.3112,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.2611,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.3002,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.2485,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.287,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.331,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.2785,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.3088,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.2558,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.3732,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.2935,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.2946,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.2806,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.2934,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.3366,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.2754,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.3244,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.3061,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.4533,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 1.2644,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.3184,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.2933,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.3077,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.3038,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.3063,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.311,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.3081,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.261,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.3248,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.2814,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.2683,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.2755,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.3489,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.3311,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.3237,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.3344,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.3195,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.3099,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.271,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.2612,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.3401,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.2479,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.2462,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.3064,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.3066,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.3402,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.2497,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.2513,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.3157,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.2776,
+ "step": 100
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 5216186105856.0,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/training_args.bin b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..1db02bc253be0ccb38137b90117e8cf432939218
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:573a026c745bf25ae971945f26077f5567afd7eec66a80e3591209bf82e13ca8
+size 6712
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/zero_to_fp32.py b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-100/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-1000/config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-1000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b6d238b44bac3c6fa409ed9a9075907037571058
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-1000/config.json
@@ -0,0 +1,74 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-vicuna-13b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 13824,
+ "max_length": 4096,
+ "max_position_embeddings": 4096,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 40,
+ "num_key_value_heads": 40,
+ "pad_token_id": 0,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..b6d238b44bac3c6fa409ed9a9075907037571058
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/config.json
@@ -0,0 +1,74 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-vicuna-13b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 13824,
+ "max_length": 4096,
+ "max_position_embeddings": 4096,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 40,
+ "num_key_value_heads": 40,
+ "pad_token_id": 0,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/generation_config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..70ffa4e13b28eca9f452207a778bb73c036f3d03
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/generation_config.json
@@ -0,0 +1,8 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "max_length": 4096,
+ "pad_token_id": 0,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/latest b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/latest
new file mode 100644
index 0000000000000000000000000000000000000000..6761b575fffac7f1984044dcb6446b3a51da04c8
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/latest
@@ -0,0 +1 @@
+global_step300
\ No newline at end of file
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/model.safetensors.index.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..5cb0534e5af0581cc99cf491b62264df614dd647
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/model.safetensors.index.json
@@ -0,0 +1,766 @@
+{
+ "metadata": {
+ "total_size": 26701688832
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00006-of-00006.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00006.safetensors",
+ "model.image_newline": "model-00001-of-00006.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.mm_projector.0.bias": "model-00006-of-00006.safetensors",
+ "model.mm_projector.0.weight": "model-00006-of-00006.safetensors",
+ "model.mm_projector.2.bias": "model-00006-of-00006.safetensors",
+ "model.mm_projector.2.weight": "model-00006-of-00006.safetensors",
+ "model.norm.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00006-of-00006.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00006-of-00006.safetensors"
+ }
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_0.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b6473612e41c5cfd6973c2e71fa5f3ad2b2bcad1
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:575119a228f98110923ffa2dedcb50e3317251b26054355d015e0b2240d566f2
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_1.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8506e00431b6ac7067699c0ea4f59adb6fa0ba20
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0728b56dab7abb5ef8a0d4bae3519c5767c97467bdd886d26bf19cc8599d0312
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_2.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ea499e285c97cca07fedd34662c3d4ab44ff6f47
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4e481d4ef1546694da7337f6bb6c658b866dcb79b85deeb477da0d27ebe851e
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_3.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..aeb38f92f106ac3f08bae4f82179a8a12243bccb
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:353c60be37ea56fc992fca446598ceca5d1fd002aa3bd6dbb9ad740e6f47ebb3
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_4.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9d5856cb7a3f15092fa5593507022316916f648e
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9107fe964ba7205e354084b85210e5a5ea1c98cfd4d38adb9cd3926945dcae4
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_5.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b824ee24d256695aad4a69a62d8e7125f51a17f2
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69d1bb1abee38b92e53f3f23549b642ce0f1edcdccf7b6129847ac61636e96d5
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_6.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a9fd0364bb8f1a8e91eca45be5e1b6672b4d9afd
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afd5516048e20f36959601574e29e40106085a7d3cdc7bf425ce5e84633490e6
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_7.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e80125fd18efcb1097384319888b699f4dce7e7
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e2c46927fc06939b4c976a01e4b95dec1f8b98ceaea86d31a5d756fc30ff006
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/scheduler.pt b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..07c142e4fa627f224d9f4f0e1b661f274a96532f
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d8e0c47defe64b52adb69462bbb40710426836b1ff0a9bd9ee95694e9751adbc
+size 1064
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/special_tokens_map.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/tokenizer.model b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/tokenizer_config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/trainer_state.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..640719cf80aeeb80fa024e6e806a0ce615f5e248
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/trainer_state.json
@@ -0,0 +1,1821 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.5769230769230769,
+ "eval_steps": 500,
+ "global_step": 300,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.1205,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 3.2041,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 3.0927,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 3.1433,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 3.036,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 2.4568,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 1.2898,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.97,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.6386,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.4549,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.4718,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.4106,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.3442,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.3178,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.2579,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.3623,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.3338,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.3265,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.3804,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.2543,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.3619,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.3095,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.2835,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.2666,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.3076,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.2789,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.3008,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.2998,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.272,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.371,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.3543,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.2793,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.278,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.3078,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.3137,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.2804,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.2322,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.2754,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.3334,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.3721,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.3324,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.2972,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.3039,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.2436,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.3353,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.2819,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.2769,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.253,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.2614,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.3297,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.3112,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.2611,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.3002,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.2485,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.287,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.331,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.2785,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.3088,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.2558,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.3732,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.2935,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.2946,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.2806,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.2934,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.3366,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.2754,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.3244,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.3061,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.4533,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 1.2644,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.3184,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.2933,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.3077,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.3038,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.3063,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.311,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.3081,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.261,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.3248,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.2814,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.2683,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.2755,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.3489,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.3311,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.3237,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.3344,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.3195,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.3099,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.271,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.2612,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.3401,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.2479,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.2462,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.3064,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.3066,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.3402,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.2497,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.2513,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.3157,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.2776,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.3394,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.2657,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.2507,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.3201,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.2509,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.3257,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.3234,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.3496,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.2802,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.2662,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.2971,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.271,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.3354,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.3134,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.2734,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.273,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.2422,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.3048,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.3284,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.289,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.2867,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.2646,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.3302,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.3099,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.2953,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.3058,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.2787,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.2357,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.3695,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.2825,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.279,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.2977,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.3032,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.2568,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.3435,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.2819,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.2415,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.3235,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.2474,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.2894,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.3165,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.2488,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.2624,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.3221,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.2695,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.2303,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.2722,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.3543,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.2498,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.2593,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.3057,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.2861,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.2738,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.2727,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.2787,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.3542,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.2699,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.3191,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.3212,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.3175,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.3035,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.2824,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.3072,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.222,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.2342,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.2743,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.3544,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.2701,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.2766,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.2867,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.2272,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.2459,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.319,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.258,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.307,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.3004,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.2587,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.2956,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.3749,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.3463,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.3514,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.2839,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.2168,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.2748,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.2885,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.2699,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.3133,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.2773,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.3472,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.3342,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.2864,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.3115,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.2726,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.3033,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.2785,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.3222,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.2268,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.2259,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.2777,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.2765,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.3088,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.2766,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.2699,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.2973,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.3202,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.2645,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.2514,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.2983,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.2516,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.2891,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.2445,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.2506,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.317,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.2946,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.3034,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.3061,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.2333,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.318,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.2176,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.2698,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.2831,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.2773,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.2892,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.3196,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.2989,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.2624,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.2856,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.3125,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.2491,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.2719,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.2805,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.2564,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.2422,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.259,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.2272,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.266,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.2909,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.2731,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.2517,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.2572,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.2328,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.241,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.3629,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.3116,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.2936,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.2804,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.2711,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.3023,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.251,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.2831,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.2369,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.2613,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.2944,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.2577,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.277,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.256,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.3175,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.3031,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.262,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.3123,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.2985,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.2605,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.3238,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.3072,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.2944,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.2557,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.2991,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.3116,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.2911,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.3144,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.3243,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.2743,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.2532,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.2777,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.3015,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.3001,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.3408,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.2568,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.3001,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.231,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.2371,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.3373,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.3651,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.307,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.3449,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.3196,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.272,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.2899,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.2488,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.2224,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.3136,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.2671,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.2923,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.3313,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.2638,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.3061,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.2966,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.251,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.2769,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.2796,
+ "step": 300
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 15645950115840.0,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/training_args.bin b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..1db02bc253be0ccb38137b90117e8cf432939218
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:573a026c745bf25ae971945f26077f5567afd7eec66a80e3591209bf82e13ca8
+size 6712
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/zero_to_fp32.py b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-300/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/generation_config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..70ffa4e13b28eca9f452207a778bb73c036f3d03
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/generation_config.json
@@ -0,0 +1,8 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "max_length": 4096,
+ "pad_token_id": 0,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_0.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_1.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..68f3c6994456cb8d0592a5375d99503c8924b1c4
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_2.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be044f6ceeed587d30e80c2f72d5aa19fdc9947b
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_3.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fc825249656a9b858782542bd3f4386250f1dfe0
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_4.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30f52a44be563c152ae09db6ae934da6da0d3ed
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_5.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c8715d27ab23ae545d58039cf949cc44ecc1da5e
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_6.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ed791b6ef76eadf0b0c55a5733411771e2ae027
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_7.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..800c3bbbc5edf7db01a8316069d439c5fb8d8c30
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
+size 15984
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/scheduler.pt b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..522b5e783e41d1fdf39b58aa3a02b807ebc04907
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f69074328a85426f71aa98590d9b0521e0c534d0df8d3a277a9e97971cc12fd
+size 1064
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/tokenizer.model b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/tokenizer_config.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d53c0f8edb049fa98763ee75652fafa68bf7f42
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/tokenizer_config.json
@@ -0,0 +1,42 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/trainer_state.json b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..19bb75e1c91f6c6017d3eaa1edae838fa275c937
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/trainer_state.json
@@ -0,0 +1,4821 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.5384615384615383,
+ "eval_steps": 500,
+ "global_step": 800,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.1205,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 3.2041,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 3.0927,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 3.1433,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 3.036,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 2.4568,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 1.2898,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.97,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.6386,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.4549,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.4718,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.4106,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.3442,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.3178,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.2579,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.3623,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.3338,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.3265,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.3804,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.2543,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.3619,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.3095,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.2835,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.2666,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.3076,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.2789,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.3008,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.2998,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.272,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.371,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.3543,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.2793,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.278,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.3078,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.3137,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.2804,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.2322,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.2754,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.3334,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.3721,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.3324,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.2972,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.3039,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.2436,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.3353,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.2819,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.2769,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.253,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.2614,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.3297,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.3112,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.2611,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.3002,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.2485,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.287,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.331,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.2785,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.3088,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.2558,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.3732,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.2935,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.2946,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.2806,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.2934,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.3366,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.2754,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.3244,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.3061,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.4533,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 1.2644,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.3184,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.2933,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.3077,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.3038,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.3063,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.311,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.3081,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.261,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.3248,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.2814,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.2683,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.2755,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.3489,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.3311,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.3237,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.3344,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.3195,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.3099,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.271,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.2612,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.3401,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.2479,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.2462,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.3064,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.3066,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.3402,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.2497,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.2513,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.3157,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.2776,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.3394,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.2657,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.2507,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.3201,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.2509,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.3257,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.3234,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.3496,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.2802,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.2662,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.2971,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.271,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.3354,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.3134,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.2734,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.273,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.2422,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.3048,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.3284,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.289,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.2867,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.2646,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.3302,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.3099,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.2953,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.3058,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.2787,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.2357,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.3695,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.2825,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.279,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.2977,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.3032,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.2568,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.3435,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.2819,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.2415,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.3235,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.2474,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.2894,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.3165,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.2488,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.2624,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.3221,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.2695,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.2303,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.2722,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.3543,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.2498,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.2593,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.3057,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.2861,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.2738,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.2727,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.2787,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.3542,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.2699,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.3191,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.3212,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.3175,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.3035,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.2824,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.3072,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.222,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.2342,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.2743,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.3544,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.2701,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.2766,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.2867,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.2272,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.2459,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.319,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.258,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.307,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.3004,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.2587,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.2956,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.3749,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.3463,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.3514,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.2839,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.2168,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.2748,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.2885,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.2699,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.3133,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.2773,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.3472,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.3342,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.2864,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.3115,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.2726,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.3033,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.2785,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.3222,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.2268,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.2259,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.2777,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.2765,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.3088,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.2766,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.2699,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.2973,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.3202,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.2645,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.2514,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.2983,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.2516,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.2891,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.2445,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.2506,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.317,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.2946,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.3034,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.3061,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.2333,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.318,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.2176,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.2698,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.2831,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.2773,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.2892,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.3196,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.2989,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.2624,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.2856,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.3125,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.2491,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.2719,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.2805,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.2564,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.2422,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.259,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.2272,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.266,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.2909,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.2731,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.2517,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.2572,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.2328,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.241,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.3629,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.3116,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.2936,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.2804,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.2711,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.3023,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.251,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.2831,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.2369,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.2613,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.2944,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.2577,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.277,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.256,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.3175,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.3031,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.262,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.3123,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.2985,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.2605,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.3238,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.3072,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.2944,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.2557,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.2991,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.3116,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.2911,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.3144,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.3243,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.2743,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.2532,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.2777,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.3015,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.3001,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.3408,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.2568,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.3001,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.231,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.2371,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.3373,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.3651,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.307,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.3449,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.3196,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.272,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.2899,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.2488,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.2224,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.3136,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.2671,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.2923,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.3313,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.2638,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.3061,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.2966,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.251,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.2769,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.2796,
+ "step": 300
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.668667250537987e-05,
+ "loss": 0.3202,
+ "step": 301
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6663465779520042e-05,
+ "loss": 0.2775,
+ "step": 302
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6640194327700087e-05,
+ "loss": 0.3842,
+ "step": 303
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6616858375968596e-05,
+ "loss": 0.2959,
+ "step": 304
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.659345815100069e-05,
+ "loss": 0.2942,
+ "step": 305
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6569993880095807e-05,
+ "loss": 0.2933,
+ "step": 306
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6546465791175498e-05,
+ "loss": 0.2575,
+ "step": 307
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6522874112781213e-05,
+ "loss": 0.2374,
+ "step": 308
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6499219074072087e-05,
+ "loss": 0.297,
+ "step": 309
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6475500904822707e-05,
+ "loss": 0.2665,
+ "step": 310
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.645171983542088e-05,
+ "loss": 0.3204,
+ "step": 311
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6427876096865394e-05,
+ "loss": 0.3059,
+ "step": 312
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.640396992076379e-05,
+ "loss": 0.2196,
+ "step": 313
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6380001539330088e-05,
+ "loss": 0.2718,
+ "step": 314
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6355971185382547e-05,
+ "loss": 0.2303,
+ "step": 315
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6331879092341402e-05,
+ "loss": 0.3033,
+ "step": 316
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6307725494226586e-05,
+ "loss": 0.2913,
+ "step": 317
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6283510625655474e-05,
+ "loss": 0.2674,
+ "step": 318
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6259234721840595e-05,
+ "loss": 0.2809,
+ "step": 319
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6234898018587336e-05,
+ "loss": 0.2553,
+ "step": 320
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6210500752291682e-05,
+ "loss": 0.3065,
+ "step": 321
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6186043159937884e-05,
+ "loss": 0.3345,
+ "step": 322
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.616152547909618e-05,
+ "loss": 0.3098,
+ "step": 323
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6136947947920477e-05,
+ "loss": 0.2784,
+ "step": 324
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.611231080514605e-05,
+ "loss": 0.2735,
+ "step": 325
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.608761429008721e-05,
+ "loss": 0.2737,
+ "step": 326
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.606285864263498e-05,
+ "loss": 0.2493,
+ "step": 327
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.6038044103254775e-05,
+ "loss": 0.3252,
+ "step": 328
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.601317091298406e-05,
+ "loss": 0.3153,
+ "step": 329
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.5988239313430004e-05,
+ "loss": 0.3649,
+ "step": 330
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5963249546767144e-05,
+ "loss": 0.2791,
+ "step": 331
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5938201855735017e-05,
+ "loss": 0.2858,
+ "step": 332
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5913096483635827e-05,
+ "loss": 0.2663,
+ "step": 333
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5887933674332048e-05,
+ "loss": 0.3026,
+ "step": 334
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5862713672244092e-05,
+ "loss": 0.2936,
+ "step": 335
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5837436722347902e-05,
+ "loss": 0.3158,
+ "step": 336
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5812103070172592e-05,
+ "loss": 0.2877,
+ "step": 337
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.578671296179806e-05,
+ "loss": 0.322,
+ "step": 338
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5761266643852587e-05,
+ "loss": 0.2558,
+ "step": 339
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.573576436351046e-05,
+ "loss": 0.2491,
+ "step": 340
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5710206368489555e-05,
+ "loss": 0.2728,
+ "step": 341
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5684592907048925e-05,
+ "loss": 0.3044,
+ "step": 342
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5658924227986415e-05,
+ "loss": 0.2578,
+ "step": 343
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.563320058063622e-05,
+ "loss": 0.2324,
+ "step": 344
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.560742221486648e-05,
+ "loss": 0.2622,
+ "step": 345
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5581589381076843e-05,
+ "loss": 0.2837,
+ "step": 346
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5555702330196024e-05,
+ "loss": 0.2865,
+ "step": 347
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5529761313679396e-05,
+ "loss": 0.242,
+ "step": 348
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5503766583506522e-05,
+ "loss": 0.3048,
+ "step": 349
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5477718392178716e-05,
+ "loss": 0.2945,
+ "step": 350
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.545161699271659e-05,
+ "loss": 0.2965,
+ "step": 351
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5425462638657597e-05,
+ "loss": 0.2797,
+ "step": 352
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5399255584053568e-05,
+ "loss": 0.2599,
+ "step": 353
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5372996083468242e-05,
+ "loss": 0.2638,
+ "step": 354
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5346684391974792e-05,
+ "loss": 0.2961,
+ "step": 355
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5320320765153367e-05,
+ "loss": 0.3614,
+ "step": 356
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.529390545908857e-05,
+ "loss": 0.2709,
+ "step": 357
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.526743873036701e-05,
+ "loss": 0.3059,
+ "step": 358
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5240920836074777e-05,
+ "loss": 0.2747,
+ "step": 359
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5214352033794981e-05,
+ "loss": 0.2479,
+ "step": 360
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5187732581605217e-05,
+ "loss": 0.2541,
+ "step": 361
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5161062738075068e-05,
+ "loss": 0.2887,
+ "step": 362
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5134342762263606e-05,
+ "loss": 0.32,
+ "step": 363
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5107572913716859e-05,
+ "loss": 0.2669,
+ "step": 364
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5080753452465296e-05,
+ "loss": 0.2927,
+ "step": 365
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.505388463902131e-05,
+ "loss": 0.352,
+ "step": 366
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.502696673437667e-05,
+ "loss": 0.243,
+ "step": 367
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.3295,
+ "step": 368
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4972984697834238e-05,
+ "loss": 0.2579,
+ "step": 369
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4945921090294076e-05,
+ "loss": 0.2393,
+ "step": 370
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4918809440263435e-05,
+ "loss": 0.2985,
+ "step": 371
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4891650011092896e-05,
+ "loss": 0.3277,
+ "step": 372
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.486444306659714e-05,
+ "loss": 0.2591,
+ "step": 373
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4837188871052399e-05,
+ "loss": 0.2722,
+ "step": 374
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4809887689193878e-05,
+ "loss": 0.2754,
+ "step": 375
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4782539786213184e-05,
+ "loss": 0.2422,
+ "step": 376
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4755145427755755e-05,
+ "loss": 0.2508,
+ "step": 377
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4727704879918272e-05,
+ "loss": 0.2739,
+ "step": 378
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4700218409246087e-05,
+ "loss": 0.3071,
+ "step": 379
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4672686282730622e-05,
+ "loss": 0.2606,
+ "step": 380
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4645108767806778e-05,
+ "loss": 0.3498,
+ "step": 381
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4617486132350343e-05,
+ "loss": 0.3105,
+ "step": 382
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4589818644675378e-05,
+ "loss": 0.2651,
+ "step": 383
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4562106573531632e-05,
+ "loss": 0.2546,
+ "step": 384
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4534350188101905e-05,
+ "loss": 0.3083,
+ "step": 385
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4506549757999456e-05,
+ "loss": 0.2503,
+ "step": 386
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4478705553265363e-05,
+ "loss": 0.2802,
+ "step": 387
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4450817844365924e-05,
+ "loss": 0.2667,
+ "step": 388
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4422886902190014e-05,
+ "loss": 0.2697,
+ "step": 389
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4394912998046451e-05,
+ "loss": 0.2197,
+ "step": 390
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.436689640366137e-05,
+ "loss": 0.2745,
+ "step": 391
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4338837391175582e-05,
+ "loss": 0.2911,
+ "step": 392
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4310736233141926e-05,
+ "loss": 0.3044,
+ "step": 393
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4282593202522627e-05,
+ "loss": 0.2614,
+ "step": 394
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4254408572686642e-05,
+ "loss": 0.2115,
+ "step": 395
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4226182617406996e-05,
+ "loss": 0.2747,
+ "step": 396
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4197915610858143e-05,
+ "loss": 0.2757,
+ "step": 397
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4169607827613284e-05,
+ "loss": 0.319,
+ "step": 398
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4141259542641706e-05,
+ "loss": 0.2993,
+ "step": 399
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4112871031306118e-05,
+ "loss": 0.3396,
+ "step": 400
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4084442569359964e-05,
+ "loss": 0.3013,
+ "step": 401
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4055974432944753e-05,
+ "loss": 0.3251,
+ "step": 402
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.4027466898587375e-05,
+ "loss": 0.2337,
+ "step": 403
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3998920243197408e-05,
+ "loss": 0.265,
+ "step": 404
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3970334744064451e-05,
+ "loss": 0.2818,
+ "step": 405
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3941710678855396e-05,
+ "loss": 0.2539,
+ "step": 406
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.391304832561175e-05,
+ "loss": 0.2297,
+ "step": 407
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3884347962746949e-05,
+ "loss": 0.4147,
+ "step": 408
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3855609869043618e-05,
+ "loss": 0.2954,
+ "step": 409
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3826834323650899e-05,
+ "loss": 0.3311,
+ "step": 410
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3798021606081713e-05,
+ "loss": 0.2711,
+ "step": 411
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3769171996210053e-05,
+ "loss": 0.3318,
+ "step": 412
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3740285774268282e-05,
+ "loss": 0.2981,
+ "step": 413
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.371136322084438e-05,
+ "loss": 0.232,
+ "step": 414
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3682404616879246e-05,
+ "loss": 0.2655,
+ "step": 415
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3653410243663953e-05,
+ "loss": 0.2999,
+ "step": 416
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3624380382837017e-05,
+ "loss": 0.3296,
+ "step": 417
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3595315316381676e-05,
+ "loss": 0.2932,
+ "step": 418
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3566215326623131e-05,
+ "loss": 0.3093,
+ "step": 419
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3537080696225815e-05,
+ "loss": 0.2761,
+ "step": 420
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3507911708190646e-05,
+ "loss": 0.2225,
+ "step": 421
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3478708645852272e-05,
+ "loss": 0.2524,
+ "step": 422
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3449471792876333e-05,
+ "loss": 0.329,
+ "step": 423
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.342020143325669e-05,
+ "loss": 0.226,
+ "step": 424
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3390897851312667e-05,
+ "loss": 0.2471,
+ "step": 425
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.336156133168631e-05,
+ "loss": 0.3506,
+ "step": 426
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3332192159339595e-05,
+ "loss": 0.3431,
+ "step": 427
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3302790619551673e-05,
+ "loss": 0.3355,
+ "step": 428
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3273356997916106e-05,
+ "loss": 0.2985,
+ "step": 429
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3243891580338074e-05,
+ "loss": 0.2887,
+ "step": 430
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3214394653031616e-05,
+ "loss": 0.2807,
+ "step": 431
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3184866502516846e-05,
+ "loss": 0.2298,
+ "step": 432
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3155307415617156e-05,
+ "loss": 0.2633,
+ "step": 433
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3125717679456447e-05,
+ "loss": 0.2659,
+ "step": 434
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.309609758145633e-05,
+ "loss": 0.2968,
+ "step": 435
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3066447409333345e-05,
+ "loss": 0.2782,
+ "step": 436
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3036767451096148e-05,
+ "loss": 0.2766,
+ "step": 437
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.300705799504273e-05,
+ "loss": 0.2436,
+ "step": 438
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.2977319329757616e-05,
+ "loss": 0.3231,
+ "step": 439
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2947551744109044e-05,
+ "loss": 0.272,
+ "step": 440
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2917755527246179e-05,
+ "loss": 0.2204,
+ "step": 441
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.28879309685963e-05,
+ "loss": 0.294,
+ "step": 442
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2858078357861979e-05,
+ "loss": 0.2625,
+ "step": 443
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2828197985018276e-05,
+ "loss": 0.2652,
+ "step": 444
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2798290140309924e-05,
+ "loss": 0.297,
+ "step": 445
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2768355114248493e-05,
+ "loss": 0.2919,
+ "step": 446
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2738393197609602e-05,
+ "loss": 0.3062,
+ "step": 447
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2708404681430054e-05,
+ "loss": 0.2893,
+ "step": 448
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2678389857005033e-05,
+ "loss": 0.2747,
+ "step": 449
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2648349015885272e-05,
+ "loss": 0.2863,
+ "step": 450
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2618282449874221e-05,
+ "loss": 0.3022,
+ "step": 451
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2588190451025209e-05,
+ "loss": 0.2665,
+ "step": 452
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2558073311638604e-05,
+ "loss": 0.2627,
+ "step": 453
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2527931324258975e-05,
+ "loss": 0.2748,
+ "step": 454
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.249776478167227e-05,
+ "loss": 0.2935,
+ "step": 455
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2467573976902936e-05,
+ "loss": 0.3557,
+ "step": 456
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2437359203211109e-05,
+ "loss": 0.269,
+ "step": 457
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2407120754089733e-05,
+ "loss": 0.3202,
+ "step": 458
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2376858923261732e-05,
+ "loss": 0.284,
+ "step": 459
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2346574004677154e-05,
+ "loss": 0.2671,
+ "step": 460
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2316266292510305e-05,
+ "loss": 0.2731,
+ "step": 461
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2285936081156897e-05,
+ "loss": 0.3232,
+ "step": 462
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2255583665231196e-05,
+ "loss": 0.2675,
+ "step": 463
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2225209339563144e-05,
+ "loss": 0.3438,
+ "step": 464
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2194813399195518e-05,
+ "loss": 0.269,
+ "step": 465
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2164396139381029e-05,
+ "loss": 0.3115,
+ "step": 466
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2133957855579501e-05,
+ "loss": 0.2436,
+ "step": 467
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.210349884345496e-05,
+ "loss": 0.3131,
+ "step": 468
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2073019398872778e-05,
+ "loss": 0.3019,
+ "step": 469
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2042519817896805e-05,
+ "loss": 0.2505,
+ "step": 470
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.2012000396786485e-05,
+ "loss": 0.2825,
+ "step": 471
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1981461431993978e-05,
+ "loss": 0.2789,
+ "step": 472
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1950903220161286e-05,
+ "loss": 0.2276,
+ "step": 473
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1920326058117364e-05,
+ "loss": 0.2777,
+ "step": 474
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1889730242875243e-05,
+ "loss": 0.2774,
+ "step": 475
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1859116071629148e-05,
+ "loss": 0.2911,
+ "step": 476
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1828483841751597e-05,
+ "loss": 0.3153,
+ "step": 477
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1797833850790527e-05,
+ "loss": 0.3061,
+ "step": 478
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1767166396466404e-05,
+ "loss": 0.2931,
+ "step": 479
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1736481776669307e-05,
+ "loss": 0.2509,
+ "step": 480
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1705780289456069e-05,
+ "loss": 0.2861,
+ "step": 481
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1675062233047365e-05,
+ "loss": 0.2847,
+ "step": 482
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1644327905824808e-05,
+ "loss": 0.2579,
+ "step": 483
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1613577606328068e-05,
+ "loss": 0.3258,
+ "step": 484
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1582811633251949e-05,
+ "loss": 0.2762,
+ "step": 485
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1552030285443516e-05,
+ "loss": 0.2241,
+ "step": 486
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1521233861899168e-05,
+ "loss": 0.2401,
+ "step": 487
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1490422661761744e-05,
+ "loss": 0.2195,
+ "step": 488
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1459596984317622e-05,
+ "loss": 0.2947,
+ "step": 489
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1428757128993801e-05,
+ "loss": 0.257,
+ "step": 490
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1397903395354996e-05,
+ "loss": 0.2639,
+ "step": 491
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1367036083100735e-05,
+ "loss": 0.2851,
+ "step": 492
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1336155492062439e-05,
+ "loss": 0.2933,
+ "step": 493
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.130526192220052e-05,
+ "loss": 0.2513,
+ "step": 494
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1274355673601446e-05,
+ "loss": 0.2302,
+ "step": 495
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1243437046474854e-05,
+ "loss": 0.2771,
+ "step": 496
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1212506341150615e-05,
+ "loss": 0.3137,
+ "step": 497
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.118156385807593e-05,
+ "loss": 0.3084,
+ "step": 498
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1150609897812387e-05,
+ "loss": 0.2404,
+ "step": 499
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1119644761033079e-05,
+ "loss": 0.2192,
+ "step": 500
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1088668748519646e-05,
+ "loss": 0.2899,
+ "step": 501
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.105768216115938e-05,
+ "loss": 0.2206,
+ "step": 502
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.1026685299942286e-05,
+ "loss": 0.2641,
+ "step": 503
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0995678465958168e-05,
+ "loss": 0.2539,
+ "step": 504
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0964661960393703e-05,
+ "loss": 0.2721,
+ "step": 505
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0933636084529507e-05,
+ "loss": 0.2854,
+ "step": 506
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0902601139737225e-05,
+ "loss": 0.2651,
+ "step": 507
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0871557427476585e-05,
+ "loss": 0.2654,
+ "step": 508
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0840505249292477e-05,
+ "loss": 0.2759,
+ "step": 509
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0809444906812034e-05,
+ "loss": 0.246,
+ "step": 510
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0778376701741688e-05,
+ "loss": 0.2517,
+ "step": 511
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0747300935864245e-05,
+ "loss": 0.2957,
+ "step": 512
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0716217911035952e-05,
+ "loss": 0.2462,
+ "step": 513
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0685127929183567e-05,
+ "loss": 0.3094,
+ "step": 514
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0654031292301432e-05,
+ "loss": 0.3021,
+ "step": 515
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0622928302448523e-05,
+ "loss": 0.2938,
+ "step": 516
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0591819261745528e-05,
+ "loss": 0.3073,
+ "step": 517
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0560704472371919e-05,
+ "loss": 0.2348,
+ "step": 518
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0529584236562995e-05,
+ "loss": 0.2721,
+ "step": 519
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0498458856606972e-05,
+ "loss": 0.2808,
+ "step": 520
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0467328634842024e-05,
+ "loss": 0.2259,
+ "step": 521
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0436193873653362e-05,
+ "loss": 0.2134,
+ "step": 522
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0405054875470287e-05,
+ "loss": 0.2164,
+ "step": 523
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.037391194276326e-05,
+ "loss": 0.2579,
+ "step": 524
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0342765378040953e-05,
+ "loss": 0.2045,
+ "step": 525
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0311615483847333e-05,
+ "loss": 0.1862,
+ "step": 526
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.028046256275869e-05,
+ "loss": 0.2848,
+ "step": 527
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0249306917380731e-05,
+ "loss": 0.2049,
+ "step": 528
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0218148850345613e-05,
+ "loss": 0.2282,
+ "step": 529
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0186988664309023e-05,
+ "loss": 0.2015,
+ "step": 530
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0155826661947232e-05,
+ "loss": 0.1778,
+ "step": 531
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0124663145954152e-05,
+ "loss": 0.2184,
+ "step": 532
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0093498419038394e-05,
+ "loss": 0.2326,
+ "step": 533
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0062332783920337e-05,
+ "loss": 0.2122,
+ "step": 534
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0031166543329179e-05,
+ "loss": 0.2254,
+ "step": 535
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1e-05,
+ "loss": 0.2029,
+ "step": 536
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.968833456670824e-06,
+ "loss": 0.2226,
+ "step": 537
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.937667216079665e-06,
+ "loss": 0.2244,
+ "step": 538
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.90650158096161e-06,
+ "loss": 0.1925,
+ "step": 539
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.87533685404585e-06,
+ "loss": 0.177,
+ "step": 540
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.844173338052771e-06,
+ "loss": 0.2026,
+ "step": 541
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.81301133569098e-06,
+ "loss": 0.21,
+ "step": 542
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.78185114965439e-06,
+ "loss": 0.2318,
+ "step": 543
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.750693082619274e-06,
+ "loss": 0.1783,
+ "step": 544
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.719537437241311e-06,
+ "loss": 0.1722,
+ "step": 545
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.68838451615267e-06,
+ "loss": 0.1958,
+ "step": 546
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.65723462195905e-06,
+ "loss": 0.209,
+ "step": 547
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.626088057236745e-06,
+ "loss": 0.2057,
+ "step": 548
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.594945124529718e-06,
+ "loss": 0.2427,
+ "step": 549
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.563806126346643e-06,
+ "loss": 0.2325,
+ "step": 550
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.532671365157979e-06,
+ "loss": 0.218,
+ "step": 551
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.501541143393028e-06,
+ "loss": 0.1853,
+ "step": 552
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.470415763437003e-06,
+ "loss": 0.2607,
+ "step": 553
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.439295527628083e-06,
+ "loss": 0.2413,
+ "step": 554
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.408180738254472e-06,
+ "loss": 0.1771,
+ "step": 555
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.377071697551479e-06,
+ "loss": 0.1908,
+ "step": 556
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.34596870769857e-06,
+ "loss": 0.1837,
+ "step": 557
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.314872070816435e-06,
+ "loss": 0.1998,
+ "step": 558
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.28378208896405e-06,
+ "loss": 0.1774,
+ "step": 559
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.252699064135759e-06,
+ "loss": 0.2101,
+ "step": 560
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.221623298258315e-06,
+ "loss": 0.2664,
+ "step": 561
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.190555093187968e-06,
+ "loss": 0.2086,
+ "step": 562
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.159494750707527e-06,
+ "loss": 0.2233,
+ "step": 563
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.128442572523418e-06,
+ "loss": 0.183,
+ "step": 564
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.097398860262777e-06,
+ "loss": 0.2087,
+ "step": 565
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.066363915470494e-06,
+ "loss": 0.2189,
+ "step": 566
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.0353380396063e-06,
+ "loss": 0.2335,
+ "step": 567
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.004321534041836e-06,
+ "loss": 0.174,
+ "step": 568
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 8.973314700057717e-06,
+ "loss": 0.1857,
+ "step": 569
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.942317838840625e-06,
+ "loss": 0.1739,
+ "step": 570
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.911331251480357e-06,
+ "loss": 0.2238,
+ "step": 571
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.880355238966923e-06,
+ "loss": 0.181,
+ "step": 572
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.849390102187615e-06,
+ "loss": 0.2084,
+ "step": 573
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.818436141924072e-06,
+ "loss": 0.1913,
+ "step": 574
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.787493658849387e-06,
+ "loss": 0.2048,
+ "step": 575
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.756562953525151e-06,
+ "loss": 0.1866,
+ "step": 576
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.72564432639856e-06,
+ "loss": 0.206,
+ "step": 577
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.694738077799487e-06,
+ "loss": 0.2072,
+ "step": 578
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.663844507937563e-06,
+ "loss": 0.2051,
+ "step": 579
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.632963916899268e-06,
+ "loss": 0.1961,
+ "step": 580
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.602096604645009e-06,
+ "loss": 0.2125,
+ "step": 581
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.571242871006202e-06,
+ "loss": 0.2137,
+ "step": 582
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.540403015682382e-06,
+ "loss": 0.2458,
+ "step": 583
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.509577338238255e-06,
+ "loss": 0.1986,
+ "step": 584
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.478766138100834e-06,
+ "loss": 0.243,
+ "step": 585
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.447969714556484e-06,
+ "loss": 0.1845,
+ "step": 586
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.417188366748051e-06,
+ "loss": 0.1862,
+ "step": 587
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.386422393671934e-06,
+ "loss": 0.2013,
+ "step": 588
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.355672094175192e-06,
+ "loss": 0.2136,
+ "step": 589
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.324937766952638e-06,
+ "loss": 0.2283,
+ "step": 590
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.294219710543931e-06,
+ "loss": 0.156,
+ "step": 591
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.263518223330698e-06,
+ "loss": 0.2428,
+ "step": 592
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.232833603533601e-06,
+ "loss": 0.1803,
+ "step": 593
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.202166149209475e-06,
+ "loss": 0.1704,
+ "step": 594
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.171516158248406e-06,
+ "loss": 0.204,
+ "step": 595
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.140883928370855e-06,
+ "loss": 0.2087,
+ "step": 596
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.11026975712476e-06,
+ "loss": 0.2008,
+ "step": 597
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.079673941882639e-06,
+ "loss": 0.206,
+ "step": 598
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.04909677983872e-06,
+ "loss": 0.1813,
+ "step": 599
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.018538568006027e-06,
+ "loss": 0.1867,
+ "step": 600
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.987999603213518e-06,
+ "loss": 0.1704,
+ "step": 601
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.957480182103198e-06,
+ "loss": 0.2217,
+ "step": 602
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.926980601127225e-06,
+ "loss": 0.1829,
+ "step": 603
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.896501156545044e-06,
+ "loss": 0.1965,
+ "step": 604
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.866042144420502e-06,
+ "loss": 0.1398,
+ "step": 605
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.835603860618973e-06,
+ "loss": 0.2116,
+ "step": 606
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.805186600804489e-06,
+ "loss": 0.2355,
+ "step": 607
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.774790660436857e-06,
+ "loss": 0.1545,
+ "step": 608
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.744416334768809e-06,
+ "loss": 0.2143,
+ "step": 609
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.714063918843106e-06,
+ "loss": 0.1995,
+ "step": 610
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.6837337074897e-06,
+ "loss": 0.1734,
+ "step": 611
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.653425995322852e-06,
+ "loss": 0.2309,
+ "step": 612
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.623141076738271e-06,
+ "loss": 0.1931,
+ "step": 613
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.592879245910273e-06,
+ "loss": 0.1763,
+ "step": 614
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.562640796788893e-06,
+ "loss": 0.1522,
+ "step": 615
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.532426023097063e-06,
+ "loss": 0.2333,
+ "step": 616
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.50223521832773e-06,
+ "loss": 0.2096,
+ "step": 617
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.472068675741024e-06,
+ "loss": 0.2204,
+ "step": 618
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.4419266883614e-06,
+ "loss": 0.1771,
+ "step": 619
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.411809548974792e-06,
+ "loss": 0.2191,
+ "step": 620
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.38171755012578e-06,
+ "loss": 0.1693,
+ "step": 621
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.3516509841147276e-06,
+ "loss": 0.1972,
+ "step": 622
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.321610142994971e-06,
+ "loss": 0.2129,
+ "step": 623
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.291595318569951e-06,
+ "loss": 0.2208,
+ "step": 624
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2616068023904e-06,
+ "loss": 0.2697,
+ "step": 625
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2316448857515076e-06,
+ "loss": 0.193,
+ "step": 626
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.201709859690081e-06,
+ "loss": 0.1959,
+ "step": 627
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.171802014981726e-06,
+ "loss": 0.2187,
+ "step": 628
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.141921642138025e-06,
+ "loss": 0.2032,
+ "step": 629
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.112069031403704e-06,
+ "loss": 0.2083,
+ "step": 630
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.082244472753823e-06,
+ "loss": 0.2207,
+ "step": 631
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.052448255890958e-06,
+ "loss": 0.2002,
+ "step": 632
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.022680670242387e-06,
+ "loss": 0.1906,
+ "step": 633
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.992942004957271e-06,
+ "loss": 0.1959,
+ "step": 634
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.963232548903853e-06,
+ "loss": 0.1622,
+ "step": 635
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.933552590666659e-06,
+ "loss": 0.1788,
+ "step": 636
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.903902418543671e-06,
+ "loss": 0.1988,
+ "step": 637
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.874282320543557e-06,
+ "loss": 0.202,
+ "step": 638
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.844692584382848e-06,
+ "loss": 0.2255,
+ "step": 639
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.815133497483157e-06,
+ "loss": 0.171,
+ "step": 640
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.785605346968387e-06,
+ "loss": 0.1838,
+ "step": 641
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.7561084196619306e-06,
+ "loss": 0.2332,
+ "step": 642
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.7266430020839e-06,
+ "loss": 0.2092,
+ "step": 643
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.697209380448333e-06,
+ "loss": 0.2098,
+ "step": 644
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.66780784066041e-06,
+ "loss": 0.2083,
+ "step": 645
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.638438668313695e-06,
+ "loss": 0.1681,
+ "step": 646
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.609102148687333e-06,
+ "loss": 0.2084,
+ "step": 647
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.579798566743314e-06,
+ "loss": 0.1673,
+ "step": 648
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.550528207123667e-06,
+ "loss": 0.2185,
+ "step": 649
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.521291354147727e-06,
+ "loss": 0.2223,
+ "step": 650
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.492088291809355e-06,
+ "loss": 0.2381,
+ "step": 651
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.462919303774186e-06,
+ "loss": 0.2142,
+ "step": 652
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.43378467337687e-06,
+ "loss": 0.2025,
+ "step": 653
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.404684683618325e-06,
+ "loss": 0.1884,
+ "step": 654
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.375619617162985e-06,
+ "loss": 0.203,
+ "step": 655
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.34658975633605e-06,
+ "loss": 0.1916,
+ "step": 656
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.317595383120756e-06,
+ "loss": 0.2355,
+ "step": 657
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.288636779155621e-06,
+ "loss": 0.2231,
+ "step": 658
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.2597142257317185e-06,
+ "loss": 0.1941,
+ "step": 659
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.230828003789949e-06,
+ "loss": 0.2253,
+ "step": 660
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.201978393918291e-06,
+ "loss": 0.1972,
+ "step": 661
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.173165676349103e-06,
+ "loss": 0.1977,
+ "step": 662
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.144390130956384e-06,
+ "loss": 0.2046,
+ "step": 663
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.115652037253054e-06,
+ "loss": 0.2065,
+ "step": 664
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.086951674388252e-06,
+ "loss": 0.2271,
+ "step": 665
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.058289321144608e-06,
+ "loss": 0.194,
+ "step": 666
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.02966525593555e-06,
+ "loss": 0.1689,
+ "step": 667
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.001079756802592e-06,
+ "loss": 0.2036,
+ "step": 668
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.97253310141263e-06,
+ "loss": 0.217,
+ "step": 669
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.944025567055251e-06,
+ "loss": 0.2404,
+ "step": 670
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.91555743064004e-06,
+ "loss": 0.1747,
+ "step": 671
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.887128968693887e-06,
+ "loss": 0.2309,
+ "step": 672
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.858740457358298e-06,
+ "loss": 0.1857,
+ "step": 673
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.830392172386723e-06,
+ "loss": 0.2371,
+ "step": 674
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.802084389141862e-06,
+ "loss": 0.227,
+ "step": 675
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.773817382593008e-06,
+ "loss": 0.1936,
+ "step": 676
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.745591427313365e-06,
+ "loss": 0.1955,
+ "step": 677
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.717406797477371e-06,
+ "loss": 0.1936,
+ "step": 678
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.689263766858072e-06,
+ "loss": 0.1452,
+ "step": 679
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.66116260882442e-06,
+ "loss": 0.2363,
+ "step": 680
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.633103596338631e-06,
+ "loss": 0.1828,
+ "step": 681
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.6050870019535496e-06,
+ "loss": 0.2113,
+ "step": 682
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.5771130978099896e-06,
+ "loss": 0.2595,
+ "step": 683
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.549182155634076e-06,
+ "loss": 0.2211,
+ "step": 684
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.521294446734637e-06,
+ "loss": 0.2158,
+ "step": 685
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.493450242000546e-06,
+ "loss": 0.2128,
+ "step": 686
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.465649811898098e-06,
+ "loss": 0.1775,
+ "step": 687
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.43789342646837e-06,
+ "loss": 0.1736,
+ "step": 688
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.410181355324622e-06,
+ "loss": 0.2185,
+ "step": 689
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.382513867649663e-06,
+ "loss": 0.1752,
+ "step": 690
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.354891232193225e-06,
+ "loss": 0.1928,
+ "step": 691
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.32731371726938e-06,
+ "loss": 0.1714,
+ "step": 692
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.299781590753916e-06,
+ "loss": 0.1689,
+ "step": 693
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.2722951200817315e-06,
+ "loss": 0.2085,
+ "step": 694
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.244854572244249e-06,
+ "loss": 0.2364,
+ "step": 695
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.217460213786822e-06,
+ "loss": 0.2148,
+ "step": 696
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.190112310806126e-06,
+ "loss": 0.1739,
+ "step": 697
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.1628111289476025e-06,
+ "loss": 0.2347,
+ "step": 698
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.135556933402862e-06,
+ "loss": 0.1907,
+ "step": 699
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.108349988907111e-06,
+ "loss": 0.1803,
+ "step": 700
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.081190559736569e-06,
+ "loss": 0.1942,
+ "step": 701
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.054078909705926e-06,
+ "loss": 0.1947,
+ "step": 702
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.027015302165768e-06,
+ "loss": 0.1577,
+ "step": 703
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.000000000000003e-06,
+ "loss": 0.189,
+ "step": 704
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.973033265623333e-06,
+ "loss": 0.2052,
+ "step": 705
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.946115360978696e-06,
+ "loss": 0.2366,
+ "step": 706
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.919246547534709e-06,
+ "loss": 0.2132,
+ "step": 707
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.892427086283147e-06,
+ "loss": 0.2123,
+ "step": 708
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.865657237736397e-06,
+ "loss": 0.1999,
+ "step": 709
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.838937261924933e-06,
+ "loss": 0.2001,
+ "step": 710
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.812267418394784e-06,
+ "loss": 0.1651,
+ "step": 711
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.78564796620502e-06,
+ "loss": 0.1869,
+ "step": 712
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.759079163925223e-06,
+ "loss": 0.2353,
+ "step": 713
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.732561269632992e-06,
+ "loss": 0.2414,
+ "step": 714
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.706094540911429e-06,
+ "loss": 0.1988,
+ "step": 715
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.679679234846636e-06,
+ "loss": 0.2301,
+ "step": 716
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.6533156080252076e-06,
+ "loss": 0.1993,
+ "step": 717
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.627003916531761e-06,
+ "loss": 0.2173,
+ "step": 718
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.600744415946438e-06,
+ "loss": 0.1742,
+ "step": 719
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.5745373613424075e-06,
+ "loss": 0.2356,
+ "step": 720
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.548383007283412e-06,
+ "loss": 0.1649,
+ "step": 721
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.522281607821288e-06,
+ "loss": 0.1899,
+ "step": 722
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.496233416493481e-06,
+ "loss": 0.2329,
+ "step": 723
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.470238686320606e-06,
+ "loss": 0.2132,
+ "step": 724
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.444297669803981e-06,
+ "loss": 0.2018,
+ "step": 725
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.418410618923163e-06,
+ "loss": 0.1757,
+ "step": 726
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.392577785133521e-06,
+ "loss": 0.2443,
+ "step": 727
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.3667994193637794e-06,
+ "loss": 0.2331,
+ "step": 728
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.3410757720135886e-06,
+ "loss": 0.1758,
+ "step": 729
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.315407092951078e-06,
+ "loss": 0.1929,
+ "step": 730
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.289793631510449e-06,
+ "loss": 0.1916,
+ "step": 731
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.264235636489542e-06,
+ "loss": 0.1892,
+ "step": 732
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.238733356147414e-06,
+ "loss": 0.2,
+ "step": 733
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.213287038201943e-06,
+ "loss": 0.1583,
+ "step": 734
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.187896929827414e-06,
+ "loss": 0.205,
+ "step": 735
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.162563277652104e-06,
+ "loss": 0.2001,
+ "step": 736
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.137286327755913e-06,
+ "loss": 0.2072,
+ "step": 737
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.112066325667954e-06,
+ "loss": 0.2074,
+ "step": 738
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.086903516364179e-06,
+ "loss": 0.2027,
+ "step": 739
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.061798144264986e-06,
+ "loss": 0.1431,
+ "step": 740
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 4.03675045323286e-06,
+ "loss": 0.227,
+ "step": 741
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 4.0117606865699975e-06,
+ "loss": 0.216,
+ "step": 742
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.986829087015941e-06,
+ "loss": 0.1839,
+ "step": 743
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.961955896745224e-06,
+ "loss": 0.1985,
+ "step": 744
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.937141357365023e-06,
+ "loss": 0.1422,
+ "step": 745
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.912385709912794e-06,
+ "loss": 0.1922,
+ "step": 746
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.887689194853951e-06,
+ "loss": 0.2179,
+ "step": 747
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.8630520520795275e-06,
+ "loss": 0.2293,
+ "step": 748
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.838474520903825e-06,
+ "loss": 0.2173,
+ "step": 749
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.8139568400621184e-06,
+ "loss": 0.1848,
+ "step": 750
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.7894992477083226e-06,
+ "loss": 0.2151,
+ "step": 751
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7651019814126656e-06,
+ "loss": 0.1771,
+ "step": 752
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7407652781594094e-06,
+ "loss": 0.1714,
+ "step": 753
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7164893743445274e-06,
+ "loss": 0.1896,
+ "step": 754
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.692274505773419e-06,
+ "loss": 0.1457,
+ "step": 755
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.6681209076586035e-06,
+ "loss": 0.1821,
+ "step": 756
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.644028814617454e-06,
+ "loss": 0.191,
+ "step": 757
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.619998460669916e-06,
+ "loss": 0.1967,
+ "step": 758
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5960300792362124e-06,
+ "loss": 0.2058,
+ "step": 759
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5721239031346067e-06,
+ "loss": 0.1895,
+ "step": 760
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5482801645791266e-06,
+ "loss": 0.1761,
+ "step": 761
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.5244990951772972e-06,
+ "loss": 0.2294,
+ "step": 762
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.5007809259279146e-06,
+ "loss": 0.1868,
+ "step": 763
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.4771258872187917e-06,
+ "loss": 0.1913,
+ "step": 764
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.453534208824507e-06,
+ "loss": 0.1957,
+ "step": 765
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.4300061199041967e-06,
+ "loss": 0.1747,
+ "step": 766
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.4065418489993118e-06,
+ "loss": 0.1912,
+ "step": 767
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3831416240314085e-06,
+ "loss": 0.1802,
+ "step": 768
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3598056722999185e-06,
+ "loss": 0.2157,
+ "step": 769
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3365342204799613e-06,
+ "loss": 0.1837,
+ "step": 770
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3133274946201333e-06,
+ "loss": 0.213,
+ "step": 771
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.290185720140301e-06,
+ "loss": 0.1603,
+ "step": 772
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.267109121829428e-06,
+ "loss": 0.2029,
+ "step": 773
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.2440979238433977e-06,
+ "loss": 0.1911,
+ "step": 774
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.221152349702802e-06,
+ "loss": 0.2118,
+ "step": 775
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.1982726222908046e-06,
+ "loss": 0.1601,
+ "step": 776
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.1754589638509647e-06,
+ "loss": 0.2012,
+ "step": 777
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.152711595985065e-06,
+ "loss": 0.2384,
+ "step": 778
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.1300307396509833e-06,
+ "loss": 0.1736,
+ "step": 779
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.10741661516053e-06,
+ "loss": 0.193,
+ "step": 780
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.0848694421773075e-06,
+ "loss": 0.1988,
+ "step": 781
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.0623894397145837e-06,
+ "loss": 0.2081,
+ "step": 782
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 3.0399768261331664e-06,
+ "loss": 0.2049,
+ "step": 783
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 3.017631819139273e-06,
+ "loss": 0.1893,
+ "step": 784
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.995354635782417e-06,
+ "loss": 0.1898,
+ "step": 785
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.9731454924533086e-06,
+ "loss": 0.2051,
+ "step": 786
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.95100460488175e-06,
+ "loss": 0.1529,
+ "step": 787
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.9289321881345257e-06,
+ "loss": 0.2096,
+ "step": 788
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.906928456613336e-06,
+ "loss": 0.2139,
+ "step": 789
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.884993624052701e-06,
+ "loss": 0.2012,
+ "step": 790
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.8631279035178796e-06,
+ "loss": 0.178,
+ "step": 791
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.8413315074028157e-06,
+ "loss": 0.199,
+ "step": 792
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.819604647428067e-06,
+ "loss": 0.208,
+ "step": 793
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7979475346387363e-06,
+ "loss": 0.193,
+ "step": 794
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.776360379402445e-06,
+ "loss": 0.2211,
+ "step": 795
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7548433914072736e-06,
+ "loss": 0.1784,
+ "step": 796
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7333967796597317e-06,
+ "loss": 0.1604,
+ "step": 797
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.712020752482717e-06,
+ "loss": 0.211,
+ "step": 798
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.690715517513508e-06,
+ "loss": 0.1897,
+ "step": 799
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.669481281701739e-06,
+ "loss": 0.1731,
+ "step": 800
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 41743906406400.0,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/training_args.bin b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..1db02bc253be0ccb38137b90117e8cf432939218
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-800/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:573a026c745bf25ae971945f26077f5567afd7eec66a80e3591209bf82e13ca8
+size 6712
diff --git a/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-900/rng_state_0.pth b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-900/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.1/checkpoint-900/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model-00002-of-00004.safetensors b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..705d80df3ac8181a9e255832fd07c2401b0b506b
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6398fc95ae0219aceb85a40b2701b9272033c0d62b32ef5ff2c7a9c88539e300
+size 4999819336
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model-00003-of-00004.safetensors b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..98cd79162338c9199c74f790cdf8c904637b295a
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:883aded4f659a43984be926e61a2ad0f49a4019dc46b5c1802bb208c2e7da54a
+size 4927408360
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model-00002-of-00004.safetensors b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..705d80df3ac8181a9e255832fd07c2401b0b506b
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6398fc95ae0219aceb85a40b2701b9272033c0d62b32ef5ff2c7a9c88539e300
+size 4999819336
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model-00003-of-00004.safetensors b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..98cd79162338c9199c74f790cdf8c904637b295a
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:883aded4f659a43984be926e61a2ad0f49a4019dc46b5c1802bb208c2e7da54a
+size 4927408360