diff --git a/llava-v1.6-13b-unk-vqa-v1.0/config.json b/llava-v1.6-13b-unk-vqa-v1.0/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..88130fe3488dc2d74a11bd28b53c2962f8467c80
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.0/config.json
@@ -0,0 +1,74 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-vicuna-13b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 13824,
+ "max_length": 4096,
+ "max_position_embeddings": 4096,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 40,
+ "num_key_value_heads": 40,
+ "pad_token_id": 0,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": true,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-13b-unk-vqa-v1.0/special_tokens_map.json b/llava-v1.6-13b-unk-vqa-v1.0/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-13b-unk-vqa-v1.0/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/latest
new file mode 100644
index 0000000000000000000000000000000000000000..744ae7dbad571b6f37ec6c7066549494261bb59e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/latest
@@ -0,0 +1 @@
+global_step100
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_0.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b6473612e41c5cfd6973c2e71fa5f3ad2b2bcad1
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:575119a228f98110923ffa2dedcb50e3317251b26054355d015e0b2240d566f2
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_1.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8506e00431b6ac7067699c0ea4f59adb6fa0ba20
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0728b56dab7abb5ef8a0d4bae3519c5767c97467bdd886d26bf19cc8599d0312
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_2.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ea499e285c97cca07fedd34662c3d4ab44ff6f47
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4e481d4ef1546694da7337f6bb6c658b866dcb79b85deeb477da0d27ebe851e
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_3.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..aeb38f92f106ac3f08bae4f82179a8a12243bccb
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:353c60be37ea56fc992fca446598ceca5d1fd002aa3bd6dbb9ad740e6f47ebb3
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_4.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9d5856cb7a3f15092fa5593507022316916f648e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9107fe964ba7205e354084b85210e5a5ea1c98cfd4d38adb9cd3926945dcae4
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_5.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b824ee24d256695aad4a69a62d8e7125f51a17f2
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69d1bb1abee38b92e53f3f23549b642ce0f1edcdccf7b6129847ac61636e96d5
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_6.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a9fd0364bb8f1a8e91eca45be5e1b6672b4d9afd
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afd5516048e20f36959601574e29e40106085a7d3cdc7bf425ce5e84633490e6
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_7.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e80125fd18efcb1097384319888b699f4dce7e7
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e2c46927fc06939b4c976a01e4b95dec1f8b98ceaea86d31a5d756fc30ff006
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/scheduler.pt b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fb6372408918017849d562bdfead314b0390dd30
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4357adac8296985cb4b98d4cc54cbe1338e42459aa4150e5eb8b32da703ed47
+size 1064
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/tokenizer.model b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..c20d476681dde7c51f92015a7f1821f819eb5721
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/trainer_state.json
@@ -0,0 +1,621 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.19230769230769232,
+ "eval_steps": 500,
+ "global_step": 100,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 4.989387064056218e+16,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/training_args.bin b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ca4d892afdd453b26723a9aa94e432cb44cc953
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63da3a2d0bf1dde543b68e123590fcd7c42f45ec7eb68e86c6eadd439321f902
+size 6264
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-100/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/latest
new file mode 100644
index 0000000000000000000000000000000000000000..753e24e10f3a2489150f458205cf759fd8b6081f
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/latest
@@ -0,0 +1 @@
+global_step200
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_2.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ea499e285c97cca07fedd34662c3d4ab44ff6f47
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4e481d4ef1546694da7337f6bb6c658b866dcb79b85deeb477da0d27ebe851e
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_3.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..aeb38f92f106ac3f08bae4f82179a8a12243bccb
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:353c60be37ea56fc992fca446598ceca5d1fd002aa3bd6dbb9ad740e6f47ebb3
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_4.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9d5856cb7a3f15092fa5593507022316916f648e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9107fe964ba7205e354084b85210e5a5ea1c98cfd4d38adb9cd3926945dcae4
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_5.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b824ee24d256695aad4a69a62d8e7125f51a17f2
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69d1bb1abee38b92e53f3f23549b642ce0f1edcdccf7b6129847ac61636e96d5
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_6.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a9fd0364bb8f1a8e91eca45be5e1b6672b4d9afd
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afd5516048e20f36959601574e29e40106085a7d3cdc7bf425ce5e84633490e6
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_7.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e80125fd18efcb1097384319888b699f4dce7e7
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e2c46927fc06939b4c976a01e4b95dec1f8b98ceaea86d31a5d756fc30ff006
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/scheduler.pt b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3832bd7d37c4d3f00a384304110b47fa49ad2648
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:004f6458d0b8aee88b5696089ea7003c7a93a0c86986070575b2c66175ae173b
+size 1064
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/tokenizer.model b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3a6b3edf200cb503a0ba77061d9ef715763ed26
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/trainer_state.json
@@ -0,0 +1,1221 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.38461538461538464,
+ "eval_steps": 500,
+ "global_step": 200,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.0,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.0,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.0,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.0,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.0,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.0,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.0,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.0,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.0,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.0,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.0,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.0,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.0,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.0,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.0,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.0,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.0,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.0,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.0,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.0,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.0,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.0,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.0,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.0,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.0,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.0,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.0,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.0,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.0,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.0,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.0,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.0,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.0,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.0,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.0,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.0,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.0,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.0,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.0,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.0,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.0,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.0,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.0,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.0,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.0,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.0,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.0,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.0,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.0,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.0,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.0,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.0,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.0,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.0,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.0,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.0,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.0,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.0,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.0,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.0,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.0,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.0,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.0,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.0,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.0,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.0,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.0,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.0,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.0,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.0,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.0,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.0,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.0,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.0,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.0,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.0,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.0,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.0,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.0,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.0,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.0,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.0,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.0,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.0,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.0,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.0,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.0,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.0,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.0,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.0,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.0,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.0,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.0,
+ "step": 200
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 9.983413310193664e+16,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/training_args.bin b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ca4d892afdd453b26723a9aa94e432cb44cc953
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63da3a2d0bf1dde543b68e123590fcd7c42f45ec7eb68e86c6eadd439321f902
+size 6264
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-200/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/latest
new file mode 100644
index 0000000000000000000000000000000000000000..6761b575fffac7f1984044dcb6446b3a51da04c8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/latest
@@ -0,0 +1 @@
+global_step300
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_0.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b6473612e41c5cfd6973c2e71fa5f3ad2b2bcad1
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:575119a228f98110923ffa2dedcb50e3317251b26054355d015e0b2240d566f2
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_1.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8506e00431b6ac7067699c0ea4f59adb6fa0ba20
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0728b56dab7abb5ef8a0d4bae3519c5767c97467bdd886d26bf19cc8599d0312
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_2.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ea499e285c97cca07fedd34662c3d4ab44ff6f47
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4e481d4ef1546694da7337f6bb6c658b866dcb79b85deeb477da0d27ebe851e
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_3.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..aeb38f92f106ac3f08bae4f82179a8a12243bccb
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:353c60be37ea56fc992fca446598ceca5d1fd002aa3bd6dbb9ad740e6f47ebb3
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_4.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9d5856cb7a3f15092fa5593507022316916f648e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9107fe964ba7205e354084b85210e5a5ea1c98cfd4d38adb9cd3926945dcae4
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_5.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b824ee24d256695aad4a69a62d8e7125f51a17f2
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69d1bb1abee38b92e53f3f23549b642ce0f1edcdccf7b6129847ac61636e96d5
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_6.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a9fd0364bb8f1a8e91eca45be5e1b6672b4d9afd
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afd5516048e20f36959601574e29e40106085a7d3cdc7bf425ce5e84633490e6
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_7.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4e80125fd18efcb1097384319888b699f4dce7e7
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8e2c46927fc06939b4c976a01e4b95dec1f8b98ceaea86d31a5d756fc30ff006
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/scheduler.pt b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..07c142e4fa627f224d9f4f0e1b661f274a96532f
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d8e0c47defe64b52adb69462bbb40710426836b1ff0a9bd9ee95694e9751adbc
+size 1064
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/tokenizer.model b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..4df39cdb9767c5ef34b8152b4b12212c3aad2357
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/trainer_state.json
@@ -0,0 +1,1821 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.5769230769230769,
+ "eval_steps": 500,
+ "global_step": 300,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.0,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.0,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.0,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.0,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.0,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.0,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.0,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.0,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.0,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.0,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.0,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.0,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.0,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.0,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.0,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.0,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.0,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.0,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.0,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.0,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.0,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.0,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.0,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.0,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.0,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.0,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.0,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.0,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.0,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.0,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.0,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.0,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.0,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.0,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.0,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.0,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.0,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.0,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.0,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.0,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.0,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.0,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.0,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.0,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.0,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.0,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.0,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.0,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.0,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.0,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.0,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.0,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.0,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.0,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.0,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.0,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.0,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.0,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.0,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.0,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.0,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.0,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.0,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.0,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.0,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.0,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.0,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.0,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.0,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.0,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.0,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.0,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.0,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.0,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.0,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.0,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.0,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.0,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.0,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.0,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.0,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.0,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.0,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.0,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.0,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.0,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.0,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.0,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.0,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.0,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.0,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.0,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.0,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.0,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.0,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.0,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.0,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.0,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.0,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.0,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.0,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.0,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.0,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.0,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.0,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.0,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.0,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.0,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.0,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.0,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.0,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.0,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.0,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.0,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.0,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.0,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.0,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.0,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.0,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.0,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.0,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.0,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.0,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.0,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.0,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.0,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.0,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.0,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.0,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.0,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.0,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.0,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.0,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.0,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.0,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.0,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.0,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.0,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.0,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.0,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.0,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.0,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.0,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.0,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.0,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.0,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.0,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.0,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.0,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.0,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.0,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.0,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.0,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.0,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.0,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.0,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.0,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.0,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.0,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.0,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.0,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.0,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.0,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.0,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.0,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.0,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.0,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.0,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.0,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.0,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.0,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.0,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.0,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.0,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.0,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.0,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.0,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.0,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.0,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.0,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.0,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.0,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.0,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.0,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.0,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.0,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.0,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.0,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.0,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.0,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.0,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.0,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.0,
+ "step": 300
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 1.4961737722966835e+17,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/training_args.bin b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ca4d892afdd453b26723a9aa94e432cb44cc953
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63da3a2d0bf1dde543b68e123590fcd7c42f45ec7eb68e86c6eadd439321f902
+size 6264
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-300/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/latest
new file mode 100644
index 0000000000000000000000000000000000000000..e5bdf58d4f29d34e909da25905fad376f73e7c29
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/latest
@@ -0,0 +1 @@
+global_step400
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..69a7fe721a444c4237dd0c9a70ed2cffc710e495
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/trainer_state.json
@@ -0,0 +1,2421 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.7692307692307693,
+ "eval_steps": 500,
+ "global_step": 400,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.0,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.0,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.0,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.0,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.0,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.0,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.0,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.0,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.0,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.0,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.0,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.0,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.0,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.0,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.0,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.0,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.0,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.0,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.0,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.0,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.0,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.0,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.0,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.0,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.0,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.0,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.0,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.0,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.0,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.0,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.0,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.0,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.0,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.0,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.0,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.0,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.0,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.0,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.0,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.0,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.0,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.0,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.0,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.0,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.0,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.0,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.0,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.0,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.0,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.0,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.0,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.0,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.0,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.0,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.0,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.0,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.0,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.0,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.0,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.0,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.0,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.0,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.0,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.0,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.0,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.0,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.0,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.0,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.0,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.0,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.0,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.0,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.0,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.0,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.0,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.0,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.0,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.0,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.0,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.0,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.0,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.0,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.0,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.0,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.0,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.0,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.0,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.0,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.0,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.0,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.0,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.0,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.0,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.0,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.0,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.0,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.0,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.0,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.0,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.0,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.0,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.0,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.0,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.0,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.0,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.0,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.0,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.0,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.0,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.0,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.0,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.0,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.0,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.0,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.0,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.0,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.0,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.0,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.0,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.0,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.0,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.0,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.0,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.0,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.0,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.0,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.0,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.0,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.0,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.0,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.0,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.0,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.0,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.0,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.0,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.0,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.0,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.0,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.0,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.0,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.0,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.0,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.0,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.0,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.0,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.0,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.0,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.0,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.0,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.0,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.0,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.0,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.0,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.0,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.0,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.0,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.0,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.0,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.0,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.0,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.0,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.0,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.0,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.0,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.0,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.0,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.0,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.0,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.0,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.0,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.0,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.0,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.0,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.0,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.0,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.0,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.0,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.0,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.0,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.0,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.0,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.0,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.0,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.0,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.0,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.0,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.0,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.0,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.0,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.0,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.0,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.0,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.0,
+ "step": 300
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.668667250537987e-05,
+ "loss": 0.0,
+ "step": 301
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6663465779520042e-05,
+ "loss": 0.0,
+ "step": 302
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6640194327700087e-05,
+ "loss": 0.0,
+ "step": 303
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6616858375968596e-05,
+ "loss": 0.0,
+ "step": 304
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.659345815100069e-05,
+ "loss": 0.0,
+ "step": 305
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6569993880095807e-05,
+ "loss": 0.0,
+ "step": 306
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6546465791175498e-05,
+ "loss": 0.0,
+ "step": 307
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6522874112781213e-05,
+ "loss": 0.0,
+ "step": 308
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6499219074072087e-05,
+ "loss": 0.0,
+ "step": 309
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6475500904822707e-05,
+ "loss": 0.0,
+ "step": 310
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.645171983542088e-05,
+ "loss": 0.0,
+ "step": 311
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6427876096865394e-05,
+ "loss": 0.0,
+ "step": 312
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.640396992076379e-05,
+ "loss": 0.0,
+ "step": 313
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6380001539330088e-05,
+ "loss": 0.0,
+ "step": 314
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6355971185382547e-05,
+ "loss": 0.0,
+ "step": 315
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6331879092341402e-05,
+ "loss": 0.0,
+ "step": 316
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6307725494226586e-05,
+ "loss": 0.0,
+ "step": 317
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6283510625655474e-05,
+ "loss": 0.0,
+ "step": 318
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6259234721840595e-05,
+ "loss": 0.0,
+ "step": 319
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6234898018587336e-05,
+ "loss": 0.0,
+ "step": 320
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6210500752291682e-05,
+ "loss": 0.0,
+ "step": 321
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6186043159937884e-05,
+ "loss": 0.0,
+ "step": 322
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.616152547909618e-05,
+ "loss": 0.0,
+ "step": 323
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6136947947920477e-05,
+ "loss": 0.0,
+ "step": 324
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.611231080514605e-05,
+ "loss": 0.0,
+ "step": 325
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.608761429008721e-05,
+ "loss": 0.0,
+ "step": 326
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.606285864263498e-05,
+ "loss": 0.0,
+ "step": 327
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.6038044103254775e-05,
+ "loss": 0.0,
+ "step": 328
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.601317091298406e-05,
+ "loss": 0.0,
+ "step": 329
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.5988239313430004e-05,
+ "loss": 0.0,
+ "step": 330
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5963249546767144e-05,
+ "loss": 0.0,
+ "step": 331
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5938201855735017e-05,
+ "loss": 0.0,
+ "step": 332
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5913096483635827e-05,
+ "loss": 0.0,
+ "step": 333
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5887933674332048e-05,
+ "loss": 0.0,
+ "step": 334
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5862713672244092e-05,
+ "loss": 0.0,
+ "step": 335
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5837436722347902e-05,
+ "loss": 0.0,
+ "step": 336
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5812103070172592e-05,
+ "loss": 0.0,
+ "step": 337
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.578671296179806e-05,
+ "loss": 0.0,
+ "step": 338
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5761266643852587e-05,
+ "loss": 0.0,
+ "step": 339
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.573576436351046e-05,
+ "loss": 0.0,
+ "step": 340
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5710206368489555e-05,
+ "loss": 0.0,
+ "step": 341
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5684592907048925e-05,
+ "loss": 0.0,
+ "step": 342
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5658924227986415e-05,
+ "loss": 0.0,
+ "step": 343
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.563320058063622e-05,
+ "loss": 0.0,
+ "step": 344
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.560742221486648e-05,
+ "loss": 0.0,
+ "step": 345
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5581589381076843e-05,
+ "loss": 0.0,
+ "step": 346
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5555702330196024e-05,
+ "loss": 0.0,
+ "step": 347
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5529761313679396e-05,
+ "loss": 0.0,
+ "step": 348
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5503766583506522e-05,
+ "loss": 0.0,
+ "step": 349
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5477718392178716e-05,
+ "loss": 0.0,
+ "step": 350
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.545161699271659e-05,
+ "loss": 0.0,
+ "step": 351
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5425462638657597e-05,
+ "loss": 0.0,
+ "step": 352
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5399255584053568e-05,
+ "loss": 0.0,
+ "step": 353
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5372996083468242e-05,
+ "loss": 0.0,
+ "step": 354
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5346684391974792e-05,
+ "loss": 0.0,
+ "step": 355
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5320320765153367e-05,
+ "loss": 0.0,
+ "step": 356
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.529390545908857e-05,
+ "loss": 0.0,
+ "step": 357
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.526743873036701e-05,
+ "loss": 0.0,
+ "step": 358
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5240920836074777e-05,
+ "loss": 0.0,
+ "step": 359
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5214352033794981e-05,
+ "loss": 0.0,
+ "step": 360
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5187732581605217e-05,
+ "loss": 0.0,
+ "step": 361
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5161062738075068e-05,
+ "loss": 0.0,
+ "step": 362
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5134342762263606e-05,
+ "loss": 0.0,
+ "step": 363
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5107572913716859e-05,
+ "loss": 0.0,
+ "step": 364
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5080753452465296e-05,
+ "loss": 0.0,
+ "step": 365
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.505388463902131e-05,
+ "loss": 0.0,
+ "step": 366
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.502696673437667e-05,
+ "loss": 0.0,
+ "step": 367
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 368
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4972984697834238e-05,
+ "loss": 0.0,
+ "step": 369
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4945921090294076e-05,
+ "loss": 0.0,
+ "step": 370
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4918809440263435e-05,
+ "loss": 0.0,
+ "step": 371
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4891650011092896e-05,
+ "loss": 0.0,
+ "step": 372
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.486444306659714e-05,
+ "loss": 0.0,
+ "step": 373
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4837188871052399e-05,
+ "loss": 0.0,
+ "step": 374
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4809887689193878e-05,
+ "loss": 0.0,
+ "step": 375
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4782539786213184e-05,
+ "loss": 0.0,
+ "step": 376
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4755145427755755e-05,
+ "loss": 0.0,
+ "step": 377
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4727704879918272e-05,
+ "loss": 0.0,
+ "step": 378
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4700218409246087e-05,
+ "loss": 0.0,
+ "step": 379
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4672686282730622e-05,
+ "loss": 0.0,
+ "step": 380
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4645108767806778e-05,
+ "loss": 0.0,
+ "step": 381
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4617486132350343e-05,
+ "loss": 0.0,
+ "step": 382
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4589818644675378e-05,
+ "loss": 0.0,
+ "step": 383
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4562106573531632e-05,
+ "loss": 0.0,
+ "step": 384
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4534350188101905e-05,
+ "loss": 0.0,
+ "step": 385
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4506549757999456e-05,
+ "loss": 0.0,
+ "step": 386
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4478705553265363e-05,
+ "loss": 0.0,
+ "step": 387
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4450817844365924e-05,
+ "loss": 0.0,
+ "step": 388
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4422886902190014e-05,
+ "loss": 0.0,
+ "step": 389
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4394912998046451e-05,
+ "loss": 0.0,
+ "step": 390
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.436689640366137e-05,
+ "loss": 0.0,
+ "step": 391
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4338837391175582e-05,
+ "loss": 0.0,
+ "step": 392
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4310736233141926e-05,
+ "loss": 0.0,
+ "step": 393
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4282593202522627e-05,
+ "loss": 0.0,
+ "step": 394
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4254408572686642e-05,
+ "loss": 0.0,
+ "step": 395
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4226182617406996e-05,
+ "loss": 0.0,
+ "step": 396
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4197915610858143e-05,
+ "loss": 0.0,
+ "step": 397
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4169607827613284e-05,
+ "loss": 0.0,
+ "step": 398
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4141259542641706e-05,
+ "loss": 0.0,
+ "step": 399
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4112871031306118e-05,
+ "loss": 0.0,
+ "step": 400
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 1.995362282724393e+17,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-400/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/latest
new file mode 100644
index 0000000000000000000000000000000000000000..f0b47ce15fff9a01b2a416a473b2148085048a50
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/latest
@@ -0,0 +1 @@
+global_step500
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/unk_vqa_test_pred_3_0.jsonl b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/unk_vqa_test_pred_3_0.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..6c8d6c2539d8a57b7d14db75b5c5af282f193a2b
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/unk_vqa_test_pred_3_0.jsonl
@@ -0,0 +1,206 @@
+{"question_id": 68236, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What material is the laminate?", "text": "", "answer_id": "Unxsh7TKS3ynFb3uwFYRB7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68237, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why are there wires in the black plastic?", "text": "", "answer_id": "KRVAGcpjSsiyReAyV5FL7P", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68238, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of cheese is that?", "text": "", "answer_id": "ZmPLmVJG9ntFu6A9fPu4ap", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68239, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What two different birds are there?", "text": "", "answer_id": "KJxCgFDHny6Gbt6fC7BiaW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68240, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What sport is this girl playing?", "text": "", "answer_id": "YcnmXxjqTwUbuWPzdMXDRJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68241, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color shirt is she wearing?", "text": "", "answer_id": "hrsnuyPCGRFYFTCBHYK2sd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68242, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which hand holds the backhand?", "text": "", "answer_id": "87bGnwPU22ThP2un22Anfo", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68243, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the boy's shorts?", "text": "", "answer_id": "EFJ2QuDCiDThtMTN6dHV7i", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68244, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the woman's attention focused?", "text": "", "answer_id": "GgKSB37NgfRxrRLr5NuovK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68245, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What branch of the military are the soldiers from?", "text": "", "answer_id": "oVsXhuPRjS4xeQE237zSno", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68246, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the girls kite resemble?", "text": "", "answer_id": "iBvJRDgxM5SyASevRsNSXo", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68247, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the spoon?", "text": "", "answer_id": "JM7VyU9XvtZgVDmNSHgLKe", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68249, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the man riding?", "text": "", "answer_id": "FVdyLTF6BFBbZqxgFLQVNg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68250, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the man's back?", "text": "", "answer_id": "dXFz7pdqk8nxQsMpXqVEZh", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68251, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which pineapples are ripe?", "text": "", "answer_id": "QccjLUCTd9iy9HeQXQu4c2", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68252, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of car is in the picture?", "text": "", "answer_id": "EjZdvhXgqfAvB4etZK9aWF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68253, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is his jacket?", "text": "", "answer_id": "BfYBVyhjmNKfGmqowSwjbx", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68254, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the boy riding?", "text": "", "answer_id": "dNVt2yXBYqjyqHFU8sZHRF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68255, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of food is this?", "text": "", "answer_id": "iiE6rNzo4AMmPsMnkuMxH4", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68256, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the door frame?", "text": "", "answer_id": "dXaJPC5aFueVCG7EcSFd5K", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68257, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of room is this?", "text": "", "answer_id": "3nrvqvKndsQU5YqihpWcv6", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68258, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the wall in the bathroom?", "text": "", "answer_id": "5VF4CkjmgMMTXuu9wez78K", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68260, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the sofas made of?", "text": "", "answer_id": "b6bL3d2urPMNBWEaSs9LqK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68261, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the sofas's cushions?", "text": "", "answer_id": "FgqNEWDYG4Mrh5aq7wLDKR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68262, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which pet is more likely to blend in to the surroundings?", "text": "", "answer_id": "NyNwDbCepxVX4544b5eUsF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68263, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Are the others adults or children?", "text": "", "answer_id": "FZgs2dUE2Ai2QuGDqeTUC5", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68265, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What easter are they celebrating?", "text": "", "answer_id": "6fzx8hZKfC6vsmMqdJLWsz", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68266, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why are the doughnuts in the gold pan?", "text": "", "answer_id": "cpgWTxjzHsiVuXky86g3sQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68267, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in the containers on the table?", "text": "", "answer_id": "JcGUriDFf9i9V5EW6UQxoQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68269, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of aircraft is this?", "text": "", "answer_id": "UypzGxHntnecyWPWq6Lf9E", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68270, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the bicycle carrying?", "text": "", "answer_id": "MqGvdB4idwhvY9CnRHLa5K", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68271, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the bike?", "text": "", "answer_id": "UKRWYwPDs3UoQaubSAmLtG", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68272, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What's on the daughter's back?", "text": "", "answer_id": "Nh7sr8tStYLf7WnA988BY7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68273, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the person on left's shirt?", "text": "", "answer_id": "A7R72fntc4K6mNTGRF2sXs", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68274, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why would someone want to keep these empty containers?", "text": "", "answer_id": "4U2xeTqdBbg9ZdjkyXNzPe", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68276, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What style of art is shown?", "text": "", "answer_id": "gykzmrb2xNL4KJpMDhsEGy", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68279, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is being kicked up by the ostriches?", "text": "", "answer_id": "RUcohZHkaG9Q79mVWDcrBW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68280, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What year is it?", "text": "", "answer_id": "BXeYSGeyeakNzwWM4gXiDL", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68281, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the person doing?", "text": "", "answer_id": "5W55EviM4dvHVwmDsMBJFc", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68283, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of car is this?", "text": "", "answer_id": "dbtGL3sYLY6PLUNTikebTg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68284, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the woman holding the frisbee wearing?", "text": "", "answer_id": "kVgyryhhKHJKgAB45F52VQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68285, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on his jaw?", "text": "", "answer_id": "ErpzxvXXNsn8ASfoam4Dhz", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68286, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is pulling the locomotive?", "text": "", "answer_id": "nZ4BfLDDsKBhRSEuCzgAvE", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68288, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which is the smallest quagga?", "text": "", "answer_id": "i9a2VXgEvEbCedZZH8Qkwn", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68291, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the dog wearing on its head?", "text": "", "answer_id": "oFfjafp96aofeXrk6Fp8wT", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68294, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the camels?", "text": "", "answer_id": "7tZ7uTXLfUfTsMnpHFoJSV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68295, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What position did the wrecked vehicle end up in?", "text": "", "answer_id": "iPdGT3L3sRULHAbjd7tZFL", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68297, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the aircraft taking off or landing?", "text": "", "answer_id": "M44Pa2oLvVVg2LNHpeDszH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68298, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the brown thing on the bed?", "text": "", "answer_id": "Jqc8uXNFYxuXphzg93PBVU", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68299, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of car is this?", "text": "", "answer_id": "KeYactoWDX5XG8UnZgL66X", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68300, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is written on the side of the truck?", "text": "", "answer_id": "7wcRPFk92ZsdqQDFxwkcYV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68302, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of food is this?", "text": "", "answer_id": "2gW4KeepjEbvJpbn5dx6XZ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68303, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What material is the room made out of?", "text": "", "answer_id": "7PTTfMDNYtiT3aMgXkGzFH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68304, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What device is on the left?", "text": "", "answer_id": "hNie5qpQdFk7sCvUedX29m", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68306, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the barrier?", "text": "", "answer_id": "TfNAkRCo9i79jahs5pkuyh", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68308, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the yellow element called?", "text": "", "answer_id": "Z9ro4SfwgGN77afybs62br", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68309, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What colors is the f-16 jet painted?", "text": "", "answer_id": "QUo9nbggQf3sc8v4io8AFa", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68310, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the screen?", "text": "", "answer_id": "4nknfpZMH5gYDvN5kwha3s", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68312, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Who is the grizzly dressed as?", "text": "", "answer_id": "TGJAtaWsMb5hfSUYeRBLzR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68313, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What appears on the vultures wings?", "text": "", "answer_id": "oBmoNC2CmmoXZNDU23NQNs", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68314, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the man holding?", "text": "", "answer_id": "bnrY4YxsF7a9goPA2hyrqw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68317, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What wall is this?", "text": "", "answer_id": "VkU9x5Je6Uo2ELTygRsChq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68319, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Are the animals on the photograph facing each other or away?", "text": "", "answer_id": "3BpsBwiV948sg9WWAEj2es", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68321, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the cat?", "text": "", "answer_id": "SqFGnX396oEeNTUrhuokmL", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68322, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl doing?", "text": "", "answer_id": "naxXC68zmBfYAyPKmenU92", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68325, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "3XA6HfwgndYoLYGAvGr47d", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68327, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which of the birds is the largest?", "text": "", "answer_id": "bZM223YftKmNhb2LYB2Dib", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68329, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is this man doing?", "text": "", "answer_id": "46bgdoaVzpesCWqYEBtaXR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68330, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "TuANLuRL7bFQ43S7y5pbci", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68335, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Who owns this vehicle?", "text": "", "answer_id": "XgzeRuFyzBYZ6ChDesMTyJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68337, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of car is this?", "text": "", "answer_id": "DoUSN3VhcujGKHxp93RRat", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68339, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman on the back of the boat doing?", "text": "", "answer_id": "YtPCfbXugNCaM7PFazroTH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68340, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of car is this?", "text": "", "answer_id": "Ff7ui6hnCuNyPXPXwxRTc7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68344, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where are the printing plates?", "text": "", "answer_id": "MLGSCQNh7TQhqRQm48JdbG", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68346, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the facility?", "text": "", "answer_id": "KTPccHR39pSLJM2kzqZJrk", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68347, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the bus primarily?", "text": "", "answer_id": "8EntC34ZBK3c6LBWtr2rQW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68348, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the bus?", "text": "", "answer_id": "dx2ppnnLWtvQz6NXQBxZZu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68349, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "GCAyAu8xrT3RELhRx73LSN", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68351, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of vehicle is behind the child?", "text": "", "answer_id": "fXFxDHb3zJXNxsLLDpBZRp", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68352, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What sort of lot are these women standing in?", "text": "", "answer_id": "CJVxdWX5riPTM8CSDLJheC", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68353, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color(s) are the four ovoid next to the truck?", "text": "", "answer_id": "gT7jLPKCPUCeZHga9J52Gw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68354, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of protective gear are four soldiers wearing?", "text": "", "answer_id": "CnBQLEsb8FLXcjsxWHghZn", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68355, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is on the fuselage?", "text": "", "answer_id": "mxu5GN95zH8ehYVquLVQpB", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68356, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What brand is this wallet?", "text": "", "answer_id": "BwzpQPsCB2ompVh5oDAavJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68357, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on it's front?", "text": "", "answer_id": "m62U4o29qQFnC8e3MbaLJW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68359, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding in her right hand?", "text": "", "answer_id": "kz3cS3uQ64g4nkgN6uLD86", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68360, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is this woman dressed up as?", "text": "", "answer_id": "RD3gnMskHdgH2BVLxumdfU", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68361, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the tributary?", "text": "", "answer_id": "4JwaZg3XGiWHLZSGGFoxJB", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68363, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the white sensor do?", "text": "", "answer_id": "b58jZuMGUG7qVHaXSS7fZZ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68365, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the animals in the cylinder shaped cage?", "text": "", "answer_id": "SdoMefbka8tcEyt64jhnr4", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68367, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the girl have on her neck?", "text": "", "answer_id": "LbkEPY56oqF8AsVLC64yBE", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68368, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type is the cat in the picture?", "text": "", "answer_id": "kTBbhgvTLPnJPKTWSETTWX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68369, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is this ship used for?", "text": "", "answer_id": "KDwjRssUz3rWrLY85Fe9cn", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68370, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the second word in the name of the ship?", "text": "", "answer_id": "2wQ24Ev8h8AqJ9CA9vTZ22", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68372, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: The judo is green and what color all the way around?", "text": "", "answer_id": "m5pJKmHq876WAybmnC3i62", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68374, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What game are the girls playing?", "text": "", "answer_id": "S8JiVcz8PgdQhHYSnoZjtW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68376, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the dog?", "text": "", "answer_id": "5cZQ6LhncYZyMdgzCfHKRR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68377, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of shirt is she wearing?", "text": "", "answer_id": "L5aLUCazPXmjLkpB22epWu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68378, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the kid holding?", "text": "", "answer_id": "kCveiZGukrrCycTnHaMJNP", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68380, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the petals?", "text": "", "answer_id": "FAcDAYUqC4hh3yNre5HvJK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68381, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the kid trying to do?", "text": "", "answer_id": "gJ3uojaugfQ3QocK7tHcLY", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68382, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the snowboarders holding in their hands?", "text": "", "answer_id": "5iqb6bhMTnTsHbrF26sQ4e", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68383, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: The dog is covered by what object?", "text": "", "answer_id": "6b9Vbszo9WZrh5Dt2mvLHe", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68384, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What topping is on this pepperoni?", "text": "", "answer_id": "Vb2txg7qLTBNiVWwqs6Zia", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68385, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the boy riding?", "text": "", "answer_id": "A7X65iVvEd5XCxDwJHc3MB", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68386, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is this dog wearing?", "text": "", "answer_id": "P7q3uqAey6FXUrGYykRQkQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68387, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: How long has the girl been standing there?", "text": "", "answer_id": "9AjdXwn6K9MwmTV955uYGJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68388, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the agreement say?", "text": "", "answer_id": "X9QnQnp6E7MHe9d2Tbju7U", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68389, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in the photo?", "text": "", "answer_id": "7YgKTRLgRwvXjTuqVPUqbM", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68390, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is written on the screen?", "text": "", "answer_id": "2yXb2RezF6M7QLwRgYU2mu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68391, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the car parked?", "text": "", "answer_id": "iAAt6sdisqVdCCjSba5HqW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68392, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of entree is shown here?", "text": "", "answer_id": "SHrMruxf7qNPdbKscftD95", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68394, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What colors are the supplies?", "text": "", "answer_id": "KVEVFqHaJJPUEBnZ8dXKj8", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68396, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the person doing?", "text": "", "answer_id": "ZGWy9SDRxUqE9zhbGvPnfJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68397, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the hippopotamus doing?", "text": "", "answer_id": "mYMLDZKJWr4DKEtKwNJURe", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68398, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What would the water smell like?", "text": "", "answer_id": "hLxVr4tZiqV2FQ9zcsP5ae", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68399, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the kid skating?", "text": "", "answer_id": "XK6KB9NFFfrTM2aAu92ykb", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68400, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color of t shirt is the kid wearing?", "text": "", "answer_id": "ZbfX8V7gjeKBZrdYHFxzLd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68401, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the tub?", "text": "", "answer_id": "QiSXauhimyQAEif8m7akxi", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68403, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are they about the ball?", "text": "", "answer_id": "PX5cNPtwYsG4qb4masn8M5", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68404, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Who is wearing red trousers?", "text": "", "answer_id": "HXHGxcdnV8Tzmoht4y7KiU", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68406, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What colors is the bus?", "text": "", "answer_id": "mnhTsM2A6kjUkPdRa8CSjj", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68409, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the slabs?", "text": "", "answer_id": "WdpN2WUtrzUeQTaFWv2GKB", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68410, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of beverages are in the glasses?", "text": "", "answer_id": "kxdGEEaQ6kER4YtgcRQ9ij", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68412, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the large briefcase?", "text": "", "answer_id": "G2y3cvrDm9CmaPjcS55eAq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68413, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the birds?", "text": "", "answer_id": "UmKf7yc6nUv6r37f4fBDJf", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68415, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the green tomato?", "text": "", "answer_id": "JBXnwbpX5qZHg5mBBFC6mi", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68416, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Does this look to be a highway or unincorporated road?", "text": "", "answer_id": "BkANDHtrbJHpvrEaxBMhdn", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68417, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What all of the rearview in this picture called?", "text": "", "answer_id": "CW4sJnPdu8FxFHQ42Hg4s6", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68418, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the matches made of?", "text": "", "answer_id": "WkUq8oLrxiWHxjAdyxajav", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68419, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the person doing standing on the sidewalk?", "text": "", "answer_id": "gz24PqXpmYMKkiKbWpJBFt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68420, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What shaped mascot is on the plane?", "text": "", "answer_id": "Zj22GpW3UxPKx3AK3JYkDA", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68421, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the porcelain?", "text": "", "answer_id": "4X4ar5cjEmmMrwih99qnHP", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68422, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the earthenware?", "text": "", "answer_id": "PXd8Yk63pVqMbaTk8bhQPq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68423, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the name of the type of painting on the wall behind them?", "text": "", "answer_id": "SoDLWE3gjRRem2rkp5n7ZU", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68424, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the butthole board?", "text": "", "answer_id": "WgKuhYSA9F9NQiizZtCu3e", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68426, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "4GYncc8xtSAWeFpatNkepk", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68427, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why are the others wearing wetsuits?", "text": "", "answer_id": "gbzSgMqZeJerFqakwKy9do", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68428, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is this topic?", "text": "", "answer_id": "6HxjvJDUx8hvU74wY7HfgM", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68429, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the cup placed on top of the oven?", "text": "", "answer_id": "DMrhhMMPQbL8bcXbN8KW3K", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68431, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the refrigerator say?", "text": "", "answer_id": "GhTs6QYnTLazi4vDQia52i", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68436, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding in her hands?", "text": "", "answer_id": "6qYPW9ysZELwCz2iqUH7m9", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68440, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which font does the boy have?", "text": "", "answer_id": "kR69WpxoEiPdkUCpeAAm5c", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68441, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the people loading on the train?", "text": "", "answer_id": "3Cbzme5iwGg3ozduCD9fpr", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68444, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where are the eyeglasses?", "text": "", "answer_id": "Bbhrw2qMM2KsG8j7LjgGuV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68446, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What did the girl just throw?", "text": "", "answer_id": "hpyQqWv2YojmjtApWHUQac", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68447, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the platter?", "text": "", "answer_id": "MxVnp4AWmyPiQf5Ry8W8WG", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68449, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the oven?", "text": "", "answer_id": "FoL7EaHpomPrNG6ZQctp6A", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68455, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "byohE8bbPEczoYWVDXftQt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68457, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the man trying to do?", "text": "", "answer_id": "7ULoqEydiFD8NUsvfbtJuW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68458, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What position is the skateboarder in?", "text": "", "answer_id": "aZWkTvKLSKpwpq9QM4rUXF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68459, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the skateboarder?", "text": "", "answer_id": "5PzBEj8j3eEiidLsEJZj6V", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68460, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Who is holding the skateboarder?", "text": "", "answer_id": "fMnUWXgSTSk4mGb8Fa2C9H", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68463, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: How are the eyes?", "text": "", "answer_id": "dTBtctjmPLjBuN2qMmVDfX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68464, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Who has spectacles?", "text": "", "answer_id": "FkWUKv36xwrGfqyqyPQGoo", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68465, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the badminton court?", "text": "", "answer_id": "MHe2E9kWXG5uFARPKvAeYW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68466, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: How high are the boulders?", "text": "", "answer_id": "FpBShbT35gFtYhsW7FtuuH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68467, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is it day or afternoon in the image?", "text": "", "answer_id": "4NjJovCowpQhUbVjDosGRD", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68470, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What colors are on the kite feathers?", "text": "", "answer_id": "EmcuqszajgPpwV78szFiP7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68471, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What things would be inside the briefcase?", "text": "", "answer_id": "BvmeKyA7DMDLhdhukjR6P9", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68473, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the dog moving around?", "text": "", "answer_id": "m4U5KKeHpLxP9fZVzTkPuq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68474, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the style of the dog's mane?", "text": "", "answer_id": "gxMAYoyke37HGHeCw8Xqea", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68475, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the man riding?", "text": "", "answer_id": "meKB97ptU9MRaP8j6j78Xb", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68476, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which hand is the boy using to hold the umbrella?", "text": "", "answer_id": "khu5qsjZNAemmMnBHyZ5nt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68477, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of flower is in the background?", "text": "", "answer_id": "dWXHgXTWgLpeQFF8B6rYPH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68481, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is at the top of the cupola?", "text": "", "answer_id": "92x4PANxhpfQ5HMyXTGP9e", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68483, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the grounder made of?", "text": "", "answer_id": "L4DtWkrKLU6NoZufwMwzA7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68485, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the pooch sitting on?", "text": "", "answer_id": "LfwcoU2VZPBcrDENDby2bg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68486, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What design has the room cover?", "text": "", "answer_id": "asHRfTzDhqdoNgynNBwth5", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68487, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the food menu?", "text": "", "answer_id": "h2Ft4Xv2E7hVeTLQq293BX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68488, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What birds are in the photo?", "text": "", "answer_id": "DqNj8JdzEosgNXL23ai5gY", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68492, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What car is that?", "text": "", "answer_id": "YZ6V5EsxM2o3uZCZehnSnt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68493, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in the reflection in the mirror on the right side of the photograph?", "text": "", "answer_id": "AxPHTW6Ak6mQQtfKu2Fdx7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68494, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What brand of toothpaste is the child using?", "text": "", "answer_id": "5RzbdHChpd9VKKd8YS7zUw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68495, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "FdjEjWJ224zigC8KEWaR2q", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68496, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the girl's shirt?", "text": "", "answer_id": "dk7BZALhz3cEKpCh8hs5Uo", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68497, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the baby holding on the right?", "text": "", "answer_id": "kTy9z7rkLLXouAgCnXwkWm", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68498, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the girl's hand?", "text": "", "answer_id": "miVfw3e5Q6cJuYGr4FWhiw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68500, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is this room?", "text": "", "answer_id": "6Z2qwhABtmYz5QmxMYS9jj", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68501, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the front kids cleats?", "text": "", "answer_id": "Lmn3gd88rFhbWhfHCaMGzH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68502, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What room are the others in?", "text": "", "answer_id": "FVsD6EicCvtDBFJWMDTRgd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68504, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is hanging above the girl's head?", "text": "", "answer_id": "jVsbSn2CcfSihUiPtA6ZrG", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68505, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the dog laying on?", "text": "", "answer_id": "cHp2yByWrK8Z2gFuFWJ4Ho", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68507, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the slalom slanted to the left or right?", "text": "", "answer_id": "8LAk4qMU72PJ2fznay3oxE", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68509, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the vegetables?", "text": "", "answer_id": "eZBstzHjuCfASXG4hd8jj4", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68510, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What article of clothing identifies the male?", "text": "", "answer_id": "Eaz4ppkxwXoCVZfuU2Hpkd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68511, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the crowd holding?", "text": "", "answer_id": "H76BGfUNB6Jb2mXVaYb2uX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68514, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the little girls head?", "text": "", "answer_id": "bioRJUf9hPBnhfsHfQf2cH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68515, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of emotion does the girl have?", "text": "", "answer_id": "hTiZdt6dHkfVxTk8mfgnk2", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68517, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the hallway made out of?", "text": "", "answer_id": "P2fhZqewweEbWErrofkUkh", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68518, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of services is shown?", "text": "", "answer_id": "ZPaU5BjS2PMBnQn6zDoTVr", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68519, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the script called?", "text": "", "answer_id": "8NPwXym7CEHDE8zhuifUHs", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68520, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What's on his chest?", "text": "", "answer_id": "gErbTCqPzqsKqdXX4o3h7Q", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68521, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What brand is the badminton racket?", "text": "", "answer_id": "Lyv527Qp6esEcPnqi9hDiR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68522, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which knee does the man have a sweatband on?", "text": "", "answer_id": "7K685RwXDjyDKSuxNSnqPg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68525, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of cars are shown?", "text": "", "answer_id": "gqqiFvF6sHHXA6aABN9QqU", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68526, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the trains?", "text": "", "answer_id": "FQQbvXmk8eztcBJnmQ8RJB", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68527, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What colors are the bus?", "text": "", "answer_id": "7BMQr6wWzSDreJoq5fYRdT", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68528, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the bus?", "text": "", "answer_id": "DjEBzuNAn85rENJsxgVDoC", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68529, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the oven?", "text": "", "answer_id": "RHUhgRMkJyb3KRVDY8Gc4m", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68531, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the drawers made out of?", "text": "", "answer_id": "PnBVP7tDK9nX5KSRZUGz2x", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68533, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the shears?", "text": "", "answer_id": "gEND5HCcyy2t2oxWJGpewD", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68536, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the bladder?", "text": "", "answer_id": "f67xrPsBm2Wsjf6roYaSs7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68537, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: These pastries are wrapped in what?", "text": "", "answer_id": "gD87xZsHuXFU2jL8g4asVV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68538, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What dinner is this?", "text": "", "answer_id": "3AiMJgm7Zsa3chgFHuxouT", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 68539, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the French hamburgers?", "text": "", "answer_id": "BnvdDryjodJSjKhXsoAN2N", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/unk_vqa_test_pred_3_1.jsonl b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/unk_vqa_test_pred_3_1.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..523c3dd6c7d03e1f9cafff4d170988c2b6ca559d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/unk_vqa_test_pred_3_1.jsonl
@@ -0,0 +1,204 @@
+{"question_id": 79657, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the dog hiding in?", "text": "", "answer_id": "k4Tj6rnAmjS8nt3gL8ER5h", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79660, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the sun to the photojournalist's left or right?", "text": "", "answer_id": "4GT6dk9LQDLEvL8yKsFAU4", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79661, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the incandescent?", "text": "", "answer_id": "i5RGLdtYawRoysoCH8JeCk", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79662, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the incandescent?", "text": "", "answer_id": "h5X9Mi3FGvMmmc9DjmAthC", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79663, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What print is on the 3-3?", "text": "", "answer_id": "8jDBDJTvg7wqdkvhVhksCh", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79664, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the kid wearing?", "text": "", "answer_id": "TVnsmGzuucNKCq4PRqMui2", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79667, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What direction are the soldiers looking?", "text": "", "answer_id": "7Ukgp4PWbsmi7qt8UVEzzV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79668, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the barrier?", "text": "", "answer_id": "L9fKwamGq9tUcrhuQmxyKQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79670, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What birds are seen here?", "text": "", "answer_id": "74ucYMiXp6iYHZPZbaZhbw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79671, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What birds are in the picture?", "text": "", "answer_id": "X5nDjecNUVtNAKKyxtZjph", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79672, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "b9rq3tFGAxFghUJo8wzUkK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79673, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman sitting on?", "text": "", "answer_id": "d6fAQBaveuWdHtQ6z4Jds8", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79675, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What beer is in the cup?", "text": "", "answer_id": "fMWywwVf9CpoNuNG37wLLt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79676, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the casserole?", "text": "", "answer_id": "gocYPem6KMBtPmQ3h9oj2T", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79677, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why is the woman wearing gloves?", "text": "", "answer_id": "XBRyjqvBiE4vtnpWJyQ6fQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79678, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of food is this?", "text": "", "answer_id": "XjKxqgwj2HUWaifsxAXd8f", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79679, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of breads are in the blue bin?", "text": "", "answer_id": "LL7ZrdshMizMWFVDkeFbLe", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79680, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on top of the train?", "text": "", "answer_id": "JRkRQxoe5jTYrTfq6YBUM6", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79681, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the train?", "text": "", "answer_id": "G7bDQ4nQJwncvspfbKKqbW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79682, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the make of car?", "text": "", "answer_id": "QS6Xi3JB32bCRpCH8CXWyH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79683, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the doors made of?", "text": "", "answer_id": "U2yBbzy255mJAboDXGfqgq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79685, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl doing?", "text": "", "answer_id": "HEBy26y2dsP9xjLaq8Cpd6", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79687, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What screen is used in this traffic light?", "text": "", "answer_id": "9zXz9doN2fmhmp3phXPc7K", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79688, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the leadoff's jersey?", "text": "", "answer_id": "BARDVjBvdN362Tw7wwtXzq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79689, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the pitcher's uniform?", "text": "", "answer_id": "KijRsz5cnLM2TeFD643AGm", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79690, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What many of the bananas are green?", "text": "", "answer_id": "h7g7t5mfksmz94iqTZMPNm", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79692, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where are the ships?", "text": "", "answer_id": "3wHQ2ANhiuQa57BAJ9o3rK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79693, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman doing?", "text": "", "answer_id": "H6cX2Gkh6cFFR34EzUv6Mm", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79695, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in front of the vehicle?", "text": "", "answer_id": "K2KyLzNxMKd8MkWcuKpV2r", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79698, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the dog?", "text": "", "answer_id": "Euo2j6voKwizjCXQxK484k", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79700, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the luggage?", "text": "", "answer_id": "oYnGCdh4ugAJRQ8f3SYsGF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79701, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of basement is in the picture?", "text": "", "answer_id": "CsuWqZCY842KfrRbUBq5xA", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79702, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the dude have on?", "text": "", "answer_id": "38Rd8hupYBk7iRb6JubRu5", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79703, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the 3-3?", "text": "", "answer_id": "6DQTDdvdBP7gHrfErV85vo", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79704, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the room made of?", "text": "", "answer_id": "R5uivRzbHAcnAQfcBYxcfC", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79705, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of pots hang on the wall?", "text": "", "answer_id": "Z9ocMYKm4ijsiha6fEbErg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79706, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What year is this?", "text": "", "answer_id": "Ki8HZPxtpbtuyR6npkWSMw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79708, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is this cat?", "text": "", "answer_id": "b2xqA4zsqkRuKpuBCErGbt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79709, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the room made of?", "text": "", "answer_id": "AYJKvfpnV99Kts7dYDwHfW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79710, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the humans head?", "text": "", "answer_id": "dfo57Us66kzvt99YEpXsKi", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79711, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the man wearing?", "text": "", "answer_id": "j7eQidJVUz4xEYdguW6wei", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79712, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why is the girl smiling?", "text": "", "answer_id": "Ud9suz7Sdain33JPzkkCuB", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79713, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman riding on?", "text": "", "answer_id": "87Zksc7pUB4c4SDoxykuRR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79714, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the profession of the woman?", "text": "", "answer_id": "jVp4T9RTvtsfEk8Kt7sQJL", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79716, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the bus?", "text": "", "answer_id": "GWdjmgWwgXmkeBqzudrBs5", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79717, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the zebras?", "text": "", "answer_id": "oN74gL42RYf3Uui4qhnBc3", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79720, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the dogs in the photo?", "text": "", "answer_id": "EBFwgvBGig2nisNFoceoyu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79721, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: As far as pet tails go, does this cat have a short or a long tail?", "text": "", "answer_id": "AS3gMatzpQcRzNf3VPoauJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79722, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are group does this female exist in?", "text": "", "answer_id": "UVPLJfGiAR46aUGohb4XG4", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79724, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the man playing?", "text": "", "answer_id": "5boVDtKaWeWd4fFkJH83BP", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79727, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the giraffes doing with their trunks?", "text": "", "answer_id": "ic4hddR2b96m8eHq7hnsXH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79728, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the orange fruit?", "text": "", "answer_id": "Jh2nZocyXALbDKoeVwPMHJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79729, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of carrots are in the tinfoil?", "text": "", "answer_id": "6uPjhFFXp8BngCGdHTYGzE", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79731, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the color of the cloth?", "text": "", "answer_id": "duCTJZCXrNdnWakpptM5CJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79732, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is painted under the skateboarder?", "text": "", "answer_id": "QQCffR9HSeWDMi5w2WzCeD", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79733, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: If you were inside this screen, what country do you think you are in?", "text": "", "answer_id": "4YbLXDiZjdx7XvyqXDf7Kb", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79734, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What sport is this woman playing?", "text": "", "answer_id": "S5pjJ6isbiSvRvnWfqptfV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79737, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the paper slabs?", "text": "", "answer_id": "dbapnPpgBThzipE3p4qRRy", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79738, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the dog?", "text": "", "answer_id": "7w4yRNWpQfpbDM9gTbKeeP", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79739, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the tree verdant or bare?", "text": "", "answer_id": "e5yQHGuTJVTJY8aYZZrXVY", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79740, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: How long is the girl's hair?", "text": "", "answer_id": "X6Q4videAf7x2Yhro2PoQd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79742, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the porcelain?", "text": "", "answer_id": "bXhhQoYkUGNkFA8w2QZQ2e", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79743, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the bouquets?", "text": "", "answer_id": "35KqEbjYKgFA55t8FQPLhV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79745, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the dunes?", "text": "", "answer_id": "dBNJpQ2jJAzUfq4qwzgUFg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79747, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the human here?", "text": "", "answer_id": "NfE6sgRqbg8JRtp6LDRkt2", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79748, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What meaning is in blue?", "text": "", "answer_id": "kaHUT9ha4YHVCm6UkK5zQn", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79749, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of building is visible in the background?", "text": "", "answer_id": "4sHi6kBfhH9cHJNyJPWFuq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79753, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the dog?", "text": "", "answer_id": "SRQyZeE3g5yVgGowwE9udP", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79755, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the man have on his eyes?", "text": "", "answer_id": "5xHLcXAYYxwWoeVp3iPttg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79757, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the bouquets?", "text": "", "answer_id": "nWXYsEgXZBqh6jNj2wAoAr", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79758, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of rooms do they have?", "text": "", "answer_id": "o5K3VUFo2DEgbSeQmM3ZrC", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79759, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What bus line is shown?", "text": "", "answer_id": "XogYs5Eb7TNQaTGiLiY8CX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79760, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the knobs on the electronics for?", "text": "", "answer_id": "TTsSjVR88Vw3Lf9v6VhFt9", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79761, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What charitable cause is represented on #35's dress?", "text": "", "answer_id": "9RgEFTGsi7sPd6d8g4VakD", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79764, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: How long is this man's hair?", "text": "", "answer_id": "8HwD9okYUShunbwVN3x2wt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79766, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why does the cat have to stay inside?", "text": "", "answer_id": "bNSXVBFSYujSE9tEat6Xo6", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79767, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of clouds are visible in the sky?", "text": "", "answer_id": "TV9kqetiCSdeWc96VgZmYD", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79768, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl doing?", "text": "", "answer_id": "4SAxvLm55qFQUnMaZwejRc", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79769, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the girl happy or hungry?", "text": "", "answer_id": "C7HADt28qCxJFSwyPgovKy", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79770, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl doing?", "text": "", "answer_id": "VhEHfwB3AYQVwvnK7bzJE7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79772, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color shoes are on the skateboarder?", "text": "", "answer_id": "oMaRai4Qa33T6yhG9EwscG", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79773, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the skateboarder?", "text": "", "answer_id": "HkZdTLCJfryQJzHRny5gwF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79776, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on top of the pantry?", "text": "", "answer_id": "XYrqSVXNyS5evzb3oWC8Ld", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79777, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the year?", "text": "", "answer_id": "nZDkmiovqG34hW4VezzFaa", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79779, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the teal colored construction material?", "text": "", "answer_id": "PTyR2uD3xHNJ8se2Gcvtvf", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79780, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What town is this?", "text": "", "answer_id": "4GZgbme3FfDAAFtVbqy2hH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79781, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of vehicle is this?", "text": "", "answer_id": "SjMzsQwbGMFacyRDH3w6hr", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79785, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the frock?", "text": "", "answer_id": "AV2uFsTREe2WJTZJcc88t5", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79786, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is that a very big or a small restaurant?", "text": "", "answer_id": "ey62Z2fAeWaY8vo5QS8NPc", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79787, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the structure of the clock?", "text": "", "answer_id": "WmqffZRTeQyb989Dx8Lbnf", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79788, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is above the bus and railroad tracks?", "text": "", "answer_id": "5bqmKBa225m42dvUsA8DuB", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79789, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the dog?", "text": "", "answer_id": "dXh2SbLHYAUjzd9fy5VcFQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79791, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the market made of?", "text": "", "answer_id": "DcnxRcW6v2QGRU2VJ5tCPf", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79794, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of herb is on the left?", "text": "", "answer_id": "kFkoPARMhLfrjEZRDsmTHX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79795, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What brand of vehicle is parked on the right?", "text": "", "answer_id": "hvif9TpPhUw9Geoz4dtXzb", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79797, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of car is this?", "text": "", "answer_id": "9WDxzsFZtA3YyyAGM3PHcX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79798, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of event are these people dressed for?", "text": "", "answer_id": "NtNXUfEEjiCaCJKU2ZTjUP", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79800, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of socks is she wearing?", "text": "", "answer_id": "icxQFVM4prQRNS3FvRy9vY", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79801, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What colors are the investors?", "text": "", "answer_id": "TqZTMDoa29Gf8MgnA4WYU2", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79806, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What company sponsors this game?", "text": "", "answer_id": "jz9dkWUGn7ga7i2AxhrPpp", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79807, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the man about to do?", "text": "", "answer_id": "NGjPepWk5dncrQuxZEJnrC", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79809, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What easter would you guess is coming based on this photo?", "text": "", "answer_id": "FCYr4ETN3ERpLeNVYTqeWK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79810, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color shirt does the kid with the bigger pumpkin have?", "text": "", "answer_id": "RgioESmBhTj65txakN7mD5", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79811, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where are the anchovies?", "text": "", "answer_id": "chU62Ku4wb3o2EbGGQWmWX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79813, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the guy wearing?", "text": "", "answer_id": "YeRDJ4iDo4vvc3KdnxReyu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79817, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the cloud on the sidewalk of?", "text": "", "answer_id": "RWjJKaqNQ6eGhNkEMfSBTu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79818, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the oven electric of gas?", "text": "", "answer_id": "8Dovg72W2CakWi5m2DBPyg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79819, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the silver sensor used for?", "text": "", "answer_id": "KYMPYzPebjRY2nfxZwPS88", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79821, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What direction must the motorcycle take?", "text": "", "answer_id": "fehHi5QEbM4dp96HTBTxTF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79822, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Who is on the ride?", "text": "", "answer_id": "WBupBsizRj6m3q93aUrQLZ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79823, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on her wrist?", "text": "", "answer_id": "VUqmv9CNC5YivuL4CafmKN", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79824, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is decorating the room?", "text": "", "answer_id": "Hz3XjnCCxzLPfjvfqXYrHb", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79825, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the window made of?", "text": "", "answer_id": "VWECrdQETGVvSoVRfurBDK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79826, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the doors made of?", "text": "", "answer_id": "LMuYRxaeTkN949zR85TtrM", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79831, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where are the birds looking?", "text": "", "answer_id": "QrTrv6ZXxYP62RqkDAtmSQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79833, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the kid eating?", "text": "", "answer_id": "RMXdoddZUKg9Pga2foANx9", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79834, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: How are the two signs attached to the vaulter?", "text": "", "answer_id": "LgrRyycr3afvcDMrvnAeCd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79837, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the train?", "text": "", "answer_id": "7yiPKPQ3ZsJrr9RmgsfZbt", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79840, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What colors are the barbed?", "text": "", "answer_id": "RNHFM6xRMwbU5ZuGxmrnoa", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79843, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the scarf?", "text": "", "answer_id": "TASLubmvgWzRu5shbMDnkq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79844, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of bird are those?", "text": "", "answer_id": "iiZvKbUQ4KVsHMucpJT4vv", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79846, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the woman's back?", "text": "", "answer_id": "hMsqY7EU94iAn27LhW2nki", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79848, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of clothing is the woman wearing?", "text": "", "answer_id": "mvrTekSCUNS5gJoGMCyuRE", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79849, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of bench is in the picture?", "text": "", "answer_id": "neRGrP3HoaJSnj7PcGEVwF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79850, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the bus?", "text": "", "answer_id": "2SHLVNsborRrvtwzdfiWvd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79851, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman standing on?", "text": "", "answer_id": "6dLdBriXuD9XmCNcFtjjmj", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79854, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the person's board?", "text": "", "answer_id": "89sfjYerzT5LNrAt4xa7Cy", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79856, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the name on the flag?", "text": "", "answer_id": "6QnLerZQdomXSRXrcLHPMe", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79857, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What snowfall has fallen from the sky?", "text": "", "answer_id": "HKXFyqbewYXZMqsux3w5hV", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79858, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the synthesizer?", "text": "", "answer_id": "8R97CGPDjFckVq3sPSoiQS", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79861, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why is one hippopotamus's face darker than the other's?", "text": "", "answer_id": "NBYppsQjW5FmCqXVAPcTrT", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79862, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What are the zebras doing?", "text": "", "answer_id": "MLu4sz4aNKvp6BCtpUDBwx", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79864, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What do the girl in the background hold in her hands?", "text": "", "answer_id": "e3jzdCUBE8tCJGMWzZ7TLd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79865, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is printed on the boy's shirt?", "text": "", "answer_id": "eHaoc49RvNegp3czF4fuBN", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79867, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman on the left holding?", "text": "", "answer_id": "BvpH4tEcM9v2kb54oaqREF", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79868, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman holding in her hand?", "text": "", "answer_id": "5TZHkSmfWyPNwmfrNgjaQH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79869, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of animal is this?", "text": "", "answer_id": "cHPNKoZqJ4tzV7bPXweetv", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79870, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the grizzly?", "text": "", "answer_id": "7QME7w9hbQsM4DUVi4B64H", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79871, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the ground made of rock or mud?", "text": "", "answer_id": "NkjG3N6k9jFdkJJMa9eKJM", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79873, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the hand cloths?", "text": "", "answer_id": "9ujVwbp5MoydfiVd6W3cBg", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79874, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the air made of?", "text": "", "answer_id": "eb64eXrsav93xsEievaMFR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79875, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Which company's building is in the foreground?", "text": "", "answer_id": "5a3R4oGmEWDDGeip3DDgwp", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79878, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Was this a spring gathering or winter gathering?", "text": "", "answer_id": "msxUkSx3STST63CSvrqh3B", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79879, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the napkins under the donuts?", "text": "", "answer_id": "KgmrrEHCSpDJN3UkzRq4kf", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79880, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Is the briefcase open or closed?", "text": "", "answer_id": "g5f9n4whNSHgPzoxZfhbDi", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79882, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why is the room white?", "text": "", "answer_id": "6KDA8fniKmUJqku6Mnfcxa", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79884, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the person's wetsuit?", "text": "", "answer_id": "9cuRaw9fFDDHCLXvYRnMym", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79885, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What other fruit is shown with the green one?", "text": "", "answer_id": "JwJV4okwtXHFJ4XFFp76tR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79887, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the badminton court?", "text": "", "answer_id": "kmhNS7DYYVGiqsQT3jh4ka", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79888, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in the man's right hand?", "text": "", "answer_id": "GhCwQxsyYjikhas2o8p8DM", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79889, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl holding?", "text": "", "answer_id": "GGoJBicN5HRniB6MedazwQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79893, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What was the center green dough designed to look like?", "text": "", "answer_id": "RoFMTKZePCq9WWBeTaWxzS", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79896, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in front of the zebra?", "text": "", "answer_id": "WQBLDnVS7QyHTMnhhzFKVu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79899, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the person's hat on the right?", "text": "", "answer_id": "9Snw67pT8y49LeB5KaRwxA", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79900, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the main color of the bus?", "text": "", "answer_id": "NySeeTtHbHZCGxUdrKehsa", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79901, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of carpeting is behind them?", "text": "", "answer_id": "9zKZJw3Q6u9DFfr24xZrF3", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79902, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color gloves is the woman wearing?", "text": "", "answer_id": "HfzQcWKWNBjLeQSzLEXZZK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79906, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What electronics are on the drawers?", "text": "", "answer_id": "XXcFcGpCmcu7A2vkNoewer", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79908, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the bus?", "text": "", "answer_id": "kJHiQvWhAhGc2edUhoZ9m4", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79909, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of sofas are these?", "text": "", "answer_id": "fEaho9vvRLEejTQmYUeBuH", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79911, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Why are there so many headphones?", "text": "", "answer_id": "Wje9nKr9pv7nYRJ3h8YtA6", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79912, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the woman standing?", "text": "", "answer_id": "6p62yQowEVvmS2woJDCnpW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79914, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the dog?", "text": "", "answer_id": "8iDYSqcD8PsnqPoGqo2avQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79915, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in the background?", "text": "", "answer_id": "fLXj7QbscZUTNJ2fqLMkgi", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79916, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Will someone throw this toilet in the trash or keep it here?", "text": "", "answer_id": "5M7BKGpiLgtt6iyhu33faX", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79917, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the machine on the right?", "text": "", "answer_id": "JUSNL7aHJ8KjDvrhbZdUPa", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79918, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the girl about to touch?", "text": "", "answer_id": "Fxama39wE6aD7gV6VZRTg9", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79921, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What does the woman wear on her right so she will know when it's about to get dark?", "text": "", "answer_id": "JVtKUJBgkT6fygkZ8kePH7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79922, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the fruit platter made of?", "text": "", "answer_id": "AFUtjKAeWBuNqZhHgihicw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79923, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the berries placed in?", "text": "", "answer_id": "ZeAMKr5QsbohanxZxVU3ko", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79924, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the women's backhand touching?", "text": "", "answer_id": "TJfPUB8rGs4WwL2p4LJx9d", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79925, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the judge?", "text": "", "answer_id": "AQ9QmzssRfb7rgPnh6k3Uh", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79926, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the room?", "text": "", "answer_id": "5hyKHjg3XHPjBU2wucxj7e", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79927, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the room?", "text": "", "answer_id": "TmP7fWXyHy8JRNu2638xC4", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79928, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of noodle is that?", "text": "", "answer_id": "W7BKsCLfxTfqQVY28FwKgJ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79931, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is a blue lamp?", "text": "", "answer_id": "cA2S3vnYKK2LQVRepRySYk", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79932, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is in the bowl in the background?", "text": "", "answer_id": "gXi4vvYEz8SxfydQupHjzK", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79935, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the shears?", "text": "", "answer_id": "Sdo9NSzBfJMzpQsc4mAfXU", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79939, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the bus?", "text": "", "answer_id": "29ZfRwRPTTZmnpJjtvnGxZ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79940, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the commuter front?", "text": "", "answer_id": "NtkbeVzb8sfhryUm5hhbT7", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79943, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of art is this?", "text": "", "answer_id": "WJaCojRSnLrhGsDZTnDQjw", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79945, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What meat is visible?", "text": "", "answer_id": "TZYKUYBfVmLwNKq33nHRth", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79946, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of vehicle is in the picture?", "text": "", "answer_id": "SH8ryfwG7t2h8GSiLsqmnq", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79947, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of bag is shown in this photo?", "text": "", "answer_id": "2rXiSgpyxN4etji4RukCzu", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79949, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the white element?", "text": "", "answer_id": "P8NUrCF3rno3Mb6i5kQ2rb", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79950, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is behind the throws?", "text": "", "answer_id": "mSgrjaTHHFT7L436QresSc", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79951, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is large object is visible in the foreground?", "text": "", "answer_id": "eyVGafYtFpNTPB3sKMk9De", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79952, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What screen effect was used to create this image?", "text": "", "answer_id": "YR3JczZKxzPgvrZJuyDcnd", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79953, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the woman looking at?", "text": "", "answer_id": "hJjKLnK3YXdz7kmQPwSinn", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79954, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is picking up boulders?", "text": "", "answer_id": "nBTQspPwtbteg8wQGBxg6N", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79955, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is on the back of the car?", "text": "", "answer_id": "XG9dNcejbn9JBKqAkfCtpf", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79957, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the cat?", "text": "", "answer_id": "eKXgJHA4ioDgboZP7hpUtv", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79958, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What is the dog looking at?", "text": "", "answer_id": "RpYtsdpN5YWcLPZuYh3kYC", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79959, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the rectangles?", "text": "", "answer_id": "7m9hJ2oYpMUfgN7oG7AKqW", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79960, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What holiday is this person most likely cooking for?", "text": "", "answer_id": "a6dQ85GojxoPyWbjNaS7LU", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79961, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of hotel is shown?", "text": "", "answer_id": "PjW2UzRzVXNt6XgASJC8KR", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79963, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What type of car is there?", "text": "", "answer_id": "eYT39YEb6HQd3VuNSqKzUZ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79964, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the dog?", "text": "", "answer_id": "DX8yY4hCqAscKk5zySeEqZ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79965, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color are the bater's jeans?", "text": "", "answer_id": "CUx3qchTwjuUL9GbydD8CE", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79966, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What's on the platter?", "text": "", "answer_id": "7Kmtkb9VLZMByCS9fw2B6w", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79969, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What color is the round deal?", "text": "", "answer_id": "46iEkwvneym3j7fVmJzoUQ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79970, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What kind of sink is this?", "text": "", "answer_id": "kwvHcYU5VyfkFjLT8ZNGqZ", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79971, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: Where is the facility located at?", "text": "", "answer_id": "9Xa3vtwtC6nvSgfPnXEsqy", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
+{"question_id": 79973, "prompt": "I will show you an image and one question. Please try to answer it by directly generate the answer.\n\nQuestion: What side is the bandanna on?", "text": "", "answer_id": "ByTZNQRXjr2RzbKPNDR4rr", "model_id": "llava-v1.6-mistral-7b-unk-vqa-v1.0_checkpoint-500", "metadata": {}}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-500/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/latest
new file mode 100644
index 0000000000000000000000000000000000000000..12cae1adf3af8546b4141c6f62261c8e99839a54
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/latest
@@ -0,0 +1 @@
+global_step600
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_0.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_1.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..68f3c6994456cb8d0592a5375d99503c8924b1c4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_2.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be044f6ceeed587d30e80c2f72d5aa19fdc9947b
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_3.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fc825249656a9b858782542bd3f4386250f1dfe0
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_4.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30f52a44be563c152ae09db6ae934da6da0d3ed
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_5.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c8715d27ab23ae545d58039cf949cc44ecc1da5e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_6.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ed791b6ef76eadf0b0c55a5733411771e2ae027
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_7.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..800c3bbbc5edf7db01a8316069d439c5fb8d8c30
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/scheduler.pt b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bc8e09dd6529621e2bdc33ce74d3188db2d8ddae
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74b6cace88e962cf0b702e06db892454cfafe5f879159db76fc15fe0d18d83b0
+size 1064
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/tokenizer.model b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..9896d385cff31e34c7d631b81f233f92d830b965
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/trainer_state.json
@@ -0,0 +1,3621 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.1538461538461537,
+ "eval_steps": 500,
+ "global_step": 600,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.0,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.0,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.0,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.0,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.0,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.0,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.0,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.0,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.0,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.0,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.0,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.0,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.0,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.0,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.0,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.0,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.0,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.0,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.0,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.0,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.0,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.0,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.0,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.0,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.0,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.0,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.0,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.0,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.0,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.0,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.0,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.0,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.0,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.0,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.0,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.0,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.0,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.0,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.0,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.0,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.0,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.0,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.0,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.0,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.0,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.0,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.0,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.0,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.0,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.0,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.0,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.0,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.0,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.0,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.0,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.0,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.0,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.0,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.0,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.0,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.0,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.0,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.0,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.0,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.0,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.0,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.0,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.0,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.0,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.0,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.0,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.0,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.0,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.0,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.0,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.0,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.0,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.0,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.0,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.0,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.0,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.0,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.0,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.0,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.0,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.0,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.0,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.0,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.0,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.0,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.0,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.0,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.0,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.0,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.0,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.0,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.0,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.0,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.0,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.0,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.0,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.0,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.0,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.0,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.0,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.0,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.0,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.0,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.0,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.0,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.0,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.0,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.0,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.0,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.0,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.0,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.0,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.0,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.0,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.0,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.0,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.0,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.0,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.0,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.0,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.0,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.0,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.0,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.0,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.0,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.0,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.0,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.0,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.0,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.0,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.0,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.0,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.0,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.0,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.0,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.0,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.0,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.0,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.0,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.0,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.0,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.0,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.0,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.0,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.0,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.0,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.0,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.0,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.0,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.0,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.0,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.0,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.0,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.0,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.0,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.0,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.0,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.0,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.0,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.0,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.0,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.0,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.0,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.0,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.0,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.0,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.0,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.0,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.0,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.0,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.0,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.0,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.0,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.0,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.0,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.0,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.0,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.0,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.0,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.0,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.0,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.0,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.0,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.0,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.0,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.0,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.0,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.0,
+ "step": 300
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.668667250537987e-05,
+ "loss": 0.0,
+ "step": 301
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6663465779520042e-05,
+ "loss": 0.0,
+ "step": 302
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6640194327700087e-05,
+ "loss": 0.0,
+ "step": 303
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6616858375968596e-05,
+ "loss": 0.0,
+ "step": 304
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.659345815100069e-05,
+ "loss": 0.0,
+ "step": 305
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6569993880095807e-05,
+ "loss": 0.0,
+ "step": 306
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6546465791175498e-05,
+ "loss": 0.0,
+ "step": 307
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6522874112781213e-05,
+ "loss": 0.0,
+ "step": 308
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6499219074072087e-05,
+ "loss": 0.0,
+ "step": 309
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6475500904822707e-05,
+ "loss": 0.0,
+ "step": 310
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.645171983542088e-05,
+ "loss": 0.0,
+ "step": 311
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6427876096865394e-05,
+ "loss": 0.0,
+ "step": 312
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.640396992076379e-05,
+ "loss": 0.0,
+ "step": 313
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6380001539330088e-05,
+ "loss": 0.0,
+ "step": 314
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6355971185382547e-05,
+ "loss": 0.0,
+ "step": 315
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6331879092341402e-05,
+ "loss": 0.0,
+ "step": 316
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6307725494226586e-05,
+ "loss": 0.0,
+ "step": 317
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6283510625655474e-05,
+ "loss": 0.0,
+ "step": 318
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6259234721840595e-05,
+ "loss": 0.0,
+ "step": 319
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6234898018587336e-05,
+ "loss": 0.0,
+ "step": 320
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6210500752291682e-05,
+ "loss": 0.0,
+ "step": 321
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6186043159937884e-05,
+ "loss": 0.0,
+ "step": 322
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.616152547909618e-05,
+ "loss": 0.0,
+ "step": 323
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6136947947920477e-05,
+ "loss": 0.0,
+ "step": 324
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.611231080514605e-05,
+ "loss": 0.0,
+ "step": 325
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.608761429008721e-05,
+ "loss": 0.0,
+ "step": 326
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.606285864263498e-05,
+ "loss": 0.0,
+ "step": 327
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.6038044103254775e-05,
+ "loss": 0.0,
+ "step": 328
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.601317091298406e-05,
+ "loss": 0.0,
+ "step": 329
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.5988239313430004e-05,
+ "loss": 0.0,
+ "step": 330
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5963249546767144e-05,
+ "loss": 0.0,
+ "step": 331
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5938201855735017e-05,
+ "loss": 0.0,
+ "step": 332
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5913096483635827e-05,
+ "loss": 0.0,
+ "step": 333
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5887933674332048e-05,
+ "loss": 0.0,
+ "step": 334
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5862713672244092e-05,
+ "loss": 0.0,
+ "step": 335
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5837436722347902e-05,
+ "loss": 0.0,
+ "step": 336
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5812103070172592e-05,
+ "loss": 0.0,
+ "step": 337
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.578671296179806e-05,
+ "loss": 0.0,
+ "step": 338
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5761266643852587e-05,
+ "loss": 0.0,
+ "step": 339
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.573576436351046e-05,
+ "loss": 0.0,
+ "step": 340
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5710206368489555e-05,
+ "loss": 0.0,
+ "step": 341
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5684592907048925e-05,
+ "loss": 0.0,
+ "step": 342
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5658924227986415e-05,
+ "loss": 0.0,
+ "step": 343
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.563320058063622e-05,
+ "loss": 0.0,
+ "step": 344
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.560742221486648e-05,
+ "loss": 0.0,
+ "step": 345
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5581589381076843e-05,
+ "loss": 0.0,
+ "step": 346
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5555702330196024e-05,
+ "loss": 0.0,
+ "step": 347
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5529761313679396e-05,
+ "loss": 0.0,
+ "step": 348
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5503766583506522e-05,
+ "loss": 0.0,
+ "step": 349
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5477718392178716e-05,
+ "loss": 0.0,
+ "step": 350
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.545161699271659e-05,
+ "loss": 0.0,
+ "step": 351
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5425462638657597e-05,
+ "loss": 0.0,
+ "step": 352
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5399255584053568e-05,
+ "loss": 0.0,
+ "step": 353
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5372996083468242e-05,
+ "loss": 0.0,
+ "step": 354
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5346684391974792e-05,
+ "loss": 0.0,
+ "step": 355
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5320320765153367e-05,
+ "loss": 0.0,
+ "step": 356
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.529390545908857e-05,
+ "loss": 0.0,
+ "step": 357
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.526743873036701e-05,
+ "loss": 0.0,
+ "step": 358
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5240920836074777e-05,
+ "loss": 0.0,
+ "step": 359
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5214352033794981e-05,
+ "loss": 0.0,
+ "step": 360
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5187732581605217e-05,
+ "loss": 0.0,
+ "step": 361
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5161062738075068e-05,
+ "loss": 0.0,
+ "step": 362
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5134342762263606e-05,
+ "loss": 0.0,
+ "step": 363
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5107572913716859e-05,
+ "loss": 0.0,
+ "step": 364
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5080753452465296e-05,
+ "loss": 0.0,
+ "step": 365
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.505388463902131e-05,
+ "loss": 0.0,
+ "step": 366
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.502696673437667e-05,
+ "loss": 0.0,
+ "step": 367
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 368
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4972984697834238e-05,
+ "loss": 0.0,
+ "step": 369
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4945921090294076e-05,
+ "loss": 0.0,
+ "step": 370
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4918809440263435e-05,
+ "loss": 0.0,
+ "step": 371
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4891650011092896e-05,
+ "loss": 0.0,
+ "step": 372
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.486444306659714e-05,
+ "loss": 0.0,
+ "step": 373
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4837188871052399e-05,
+ "loss": 0.0,
+ "step": 374
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4809887689193878e-05,
+ "loss": 0.0,
+ "step": 375
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4782539786213184e-05,
+ "loss": 0.0,
+ "step": 376
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4755145427755755e-05,
+ "loss": 0.0,
+ "step": 377
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4727704879918272e-05,
+ "loss": 0.0,
+ "step": 378
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4700218409246087e-05,
+ "loss": 0.0,
+ "step": 379
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4672686282730622e-05,
+ "loss": 0.0,
+ "step": 380
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4645108767806778e-05,
+ "loss": 0.0,
+ "step": 381
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4617486132350343e-05,
+ "loss": 0.0,
+ "step": 382
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4589818644675378e-05,
+ "loss": 0.0,
+ "step": 383
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4562106573531632e-05,
+ "loss": 0.0,
+ "step": 384
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4534350188101905e-05,
+ "loss": 0.0,
+ "step": 385
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4506549757999456e-05,
+ "loss": 0.0,
+ "step": 386
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4478705553265363e-05,
+ "loss": 0.0,
+ "step": 387
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4450817844365924e-05,
+ "loss": 0.0,
+ "step": 388
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4422886902190014e-05,
+ "loss": 0.0,
+ "step": 389
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4394912998046451e-05,
+ "loss": 0.0,
+ "step": 390
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.436689640366137e-05,
+ "loss": 0.0,
+ "step": 391
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4338837391175582e-05,
+ "loss": 0.0,
+ "step": 392
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4310736233141926e-05,
+ "loss": 0.0,
+ "step": 393
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4282593202522627e-05,
+ "loss": 0.0,
+ "step": 394
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4254408572686642e-05,
+ "loss": 0.0,
+ "step": 395
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4226182617406996e-05,
+ "loss": 0.0,
+ "step": 396
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4197915610858143e-05,
+ "loss": 0.0,
+ "step": 397
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4169607827613284e-05,
+ "loss": 0.0,
+ "step": 398
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4141259542641706e-05,
+ "loss": 0.0,
+ "step": 399
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4112871031306118e-05,
+ "loss": 0.0,
+ "step": 400
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4084442569359964e-05,
+ "loss": 0.0,
+ "step": 401
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4055974432944753e-05,
+ "loss": 0.0,
+ "step": 402
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.4027466898587375e-05,
+ "loss": 0.0,
+ "step": 403
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3998920243197408e-05,
+ "loss": 0.0,
+ "step": 404
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3970334744064451e-05,
+ "loss": 0.0,
+ "step": 405
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3941710678855396e-05,
+ "loss": 0.0,
+ "step": 406
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.391304832561175e-05,
+ "loss": 0.0,
+ "step": 407
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3884347962746949e-05,
+ "loss": 0.0,
+ "step": 408
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3855609869043618e-05,
+ "loss": 0.0,
+ "step": 409
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3826834323650899e-05,
+ "loss": 0.0,
+ "step": 410
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3798021606081713e-05,
+ "loss": 0.0,
+ "step": 411
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3769171996210053e-05,
+ "loss": 0.0,
+ "step": 412
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3740285774268282e-05,
+ "loss": 0.0,
+ "step": 413
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.371136322084438e-05,
+ "loss": 0.0,
+ "step": 414
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3682404616879246e-05,
+ "loss": 0.0,
+ "step": 415
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3653410243663953e-05,
+ "loss": 0.0,
+ "step": 416
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3624380382837017e-05,
+ "loss": 0.0,
+ "step": 417
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3595315316381676e-05,
+ "loss": 0.0,
+ "step": 418
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3566215326623131e-05,
+ "loss": 0.0,
+ "step": 419
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3537080696225815e-05,
+ "loss": 0.0,
+ "step": 420
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3507911708190646e-05,
+ "loss": 0.0,
+ "step": 421
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3478708645852272e-05,
+ "loss": 0.0,
+ "step": 422
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3449471792876333e-05,
+ "loss": 0.0,
+ "step": 423
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.342020143325669e-05,
+ "loss": 0.0,
+ "step": 424
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3390897851312667e-05,
+ "loss": 0.0,
+ "step": 425
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.336156133168631e-05,
+ "loss": 0.0,
+ "step": 426
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3332192159339595e-05,
+ "loss": 0.0,
+ "step": 427
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3302790619551673e-05,
+ "loss": 0.0,
+ "step": 428
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3273356997916106e-05,
+ "loss": 0.0,
+ "step": 429
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3243891580338074e-05,
+ "loss": 0.0,
+ "step": 430
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3214394653031616e-05,
+ "loss": 0.0,
+ "step": 431
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3184866502516846e-05,
+ "loss": 0.0,
+ "step": 432
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3155307415617156e-05,
+ "loss": 0.0,
+ "step": 433
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3125717679456447e-05,
+ "loss": 0.0,
+ "step": 434
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.309609758145633e-05,
+ "loss": 0.0,
+ "step": 435
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3066447409333345e-05,
+ "loss": 0.0,
+ "step": 436
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3036767451096148e-05,
+ "loss": 0.0,
+ "step": 437
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.300705799504273e-05,
+ "loss": 0.0,
+ "step": 438
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.2977319329757616e-05,
+ "loss": 0.0,
+ "step": 439
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2947551744109044e-05,
+ "loss": 0.0,
+ "step": 440
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2917755527246179e-05,
+ "loss": 0.0,
+ "step": 441
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.28879309685963e-05,
+ "loss": 0.0,
+ "step": 442
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2858078357861979e-05,
+ "loss": 0.0,
+ "step": 443
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2828197985018276e-05,
+ "loss": 0.0,
+ "step": 444
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2798290140309924e-05,
+ "loss": 0.0,
+ "step": 445
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2768355114248493e-05,
+ "loss": 0.0,
+ "step": 446
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2738393197609602e-05,
+ "loss": 0.0,
+ "step": 447
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2708404681430054e-05,
+ "loss": 0.0,
+ "step": 448
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2678389857005033e-05,
+ "loss": 0.0,
+ "step": 449
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2648349015885272e-05,
+ "loss": 0.0,
+ "step": 450
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2618282449874221e-05,
+ "loss": 0.0,
+ "step": 451
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2588190451025209e-05,
+ "loss": 0.0,
+ "step": 452
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2558073311638604e-05,
+ "loss": 0.0,
+ "step": 453
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2527931324258975e-05,
+ "loss": 0.0,
+ "step": 454
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.249776478167227e-05,
+ "loss": 0.0,
+ "step": 455
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2467573976902936e-05,
+ "loss": 0.0,
+ "step": 456
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2437359203211109e-05,
+ "loss": 0.0,
+ "step": 457
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2407120754089733e-05,
+ "loss": 0.0,
+ "step": 458
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2376858923261732e-05,
+ "loss": 0.0,
+ "step": 459
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2346574004677154e-05,
+ "loss": 0.0,
+ "step": 460
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2316266292510305e-05,
+ "loss": 0.0,
+ "step": 461
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2285936081156897e-05,
+ "loss": 0.0,
+ "step": 462
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2255583665231196e-05,
+ "loss": 0.0,
+ "step": 463
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2225209339563144e-05,
+ "loss": 0.0,
+ "step": 464
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2194813399195518e-05,
+ "loss": 0.0,
+ "step": 465
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2164396139381029e-05,
+ "loss": 0.0,
+ "step": 466
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2133957855579501e-05,
+ "loss": 0.0,
+ "step": 467
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.210349884345496e-05,
+ "loss": 0.0,
+ "step": 468
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2073019398872778e-05,
+ "loss": 0.0,
+ "step": 469
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2042519817896805e-05,
+ "loss": 0.0,
+ "step": 470
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.2012000396786485e-05,
+ "loss": 0.0,
+ "step": 471
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1981461431993978e-05,
+ "loss": 0.0,
+ "step": 472
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1950903220161286e-05,
+ "loss": 0.0,
+ "step": 473
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1920326058117364e-05,
+ "loss": 0.0,
+ "step": 474
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1889730242875243e-05,
+ "loss": 0.0,
+ "step": 475
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1859116071629148e-05,
+ "loss": 0.0,
+ "step": 476
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1828483841751597e-05,
+ "loss": 0.0,
+ "step": 477
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1797833850790527e-05,
+ "loss": 0.0,
+ "step": 478
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1767166396466404e-05,
+ "loss": 0.0,
+ "step": 479
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1736481776669307e-05,
+ "loss": 0.0,
+ "step": 480
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1705780289456069e-05,
+ "loss": 0.0,
+ "step": 481
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1675062233047365e-05,
+ "loss": 0.0,
+ "step": 482
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1644327905824808e-05,
+ "loss": 0.0,
+ "step": 483
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1613577606328068e-05,
+ "loss": 0.0,
+ "step": 484
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1582811633251949e-05,
+ "loss": 0.0,
+ "step": 485
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1552030285443516e-05,
+ "loss": 0.0,
+ "step": 486
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1521233861899168e-05,
+ "loss": 0.0,
+ "step": 487
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1490422661761744e-05,
+ "loss": 0.0,
+ "step": 488
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1459596984317622e-05,
+ "loss": 0.0,
+ "step": 489
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1428757128993801e-05,
+ "loss": 0.0,
+ "step": 490
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1397903395354996e-05,
+ "loss": 0.0,
+ "step": 491
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1367036083100735e-05,
+ "loss": 0.0,
+ "step": 492
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1336155492062439e-05,
+ "loss": 0.0,
+ "step": 493
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.130526192220052e-05,
+ "loss": 0.0,
+ "step": 494
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1274355673601446e-05,
+ "loss": 0.0,
+ "step": 495
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1243437046474854e-05,
+ "loss": 0.0,
+ "step": 496
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1212506341150615e-05,
+ "loss": 0.0,
+ "step": 497
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.118156385807593e-05,
+ "loss": 0.0,
+ "step": 498
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1150609897812387e-05,
+ "loss": 0.0,
+ "step": 499
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1119644761033079e-05,
+ "loss": 0.0,
+ "step": 500
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1088668748519646e-05,
+ "loss": 0.0,
+ "step": 501
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.105768216115938e-05,
+ "loss": 0.0,
+ "step": 502
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.1026685299942286e-05,
+ "loss": 0.0,
+ "step": 503
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0995678465958168e-05,
+ "loss": 0.0,
+ "step": 504
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0964661960393703e-05,
+ "loss": 0.0,
+ "step": 505
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0933636084529507e-05,
+ "loss": 0.0,
+ "step": 506
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0902601139737225e-05,
+ "loss": 0.0,
+ "step": 507
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0871557427476585e-05,
+ "loss": 0.0,
+ "step": 508
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0840505249292477e-05,
+ "loss": 0.0,
+ "step": 509
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0809444906812034e-05,
+ "loss": 0.0,
+ "step": 510
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0778376701741688e-05,
+ "loss": 0.0,
+ "step": 511
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0747300935864245e-05,
+ "loss": 0.0,
+ "step": 512
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0716217911035952e-05,
+ "loss": 0.0,
+ "step": 513
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0685127929183567e-05,
+ "loss": 0.0,
+ "step": 514
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0654031292301432e-05,
+ "loss": 0.0,
+ "step": 515
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0622928302448523e-05,
+ "loss": 0.0,
+ "step": 516
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0591819261745528e-05,
+ "loss": 0.0,
+ "step": 517
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0560704472371919e-05,
+ "loss": 0.0,
+ "step": 518
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0529584236562995e-05,
+ "loss": 0.0,
+ "step": 519
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0498458856606972e-05,
+ "loss": 0.0,
+ "step": 520
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0467328634842024e-05,
+ "loss": 0.0,
+ "step": 521
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0436193873653362e-05,
+ "loss": 0.0,
+ "step": 522
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0405054875470287e-05,
+ "loss": 0.0,
+ "step": 523
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.037391194276326e-05,
+ "loss": 0.0,
+ "step": 524
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0342765378040953e-05,
+ "loss": 0.0,
+ "step": 525
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0311615483847333e-05,
+ "loss": 0.0,
+ "step": 526
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.028046256275869e-05,
+ "loss": 0.0,
+ "step": 527
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0249306917380731e-05,
+ "loss": 0.0,
+ "step": 528
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0218148850345613e-05,
+ "loss": 0.0,
+ "step": 529
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0186988664309023e-05,
+ "loss": 0.0,
+ "step": 530
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0155826661947232e-05,
+ "loss": 0.0,
+ "step": 531
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0124663145954152e-05,
+ "loss": 0.0,
+ "step": 532
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0093498419038394e-05,
+ "loss": 0.0,
+ "step": 533
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0062332783920337e-05,
+ "loss": 0.0,
+ "step": 534
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0031166543329179e-05,
+ "loss": 0.0,
+ "step": 535
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 536
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.968833456670824e-06,
+ "loss": 0.0,
+ "step": 537
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.937667216079665e-06,
+ "loss": 0.0,
+ "step": 538
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.90650158096161e-06,
+ "loss": 0.0,
+ "step": 539
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.87533685404585e-06,
+ "loss": 0.0,
+ "step": 540
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.844173338052771e-06,
+ "loss": 0.0,
+ "step": 541
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.81301133569098e-06,
+ "loss": 0.0,
+ "step": 542
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.78185114965439e-06,
+ "loss": 0.0,
+ "step": 543
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.750693082619274e-06,
+ "loss": 0.0,
+ "step": 544
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.719537437241311e-06,
+ "loss": 0.0,
+ "step": 545
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.68838451615267e-06,
+ "loss": 0.0,
+ "step": 546
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.65723462195905e-06,
+ "loss": 0.0,
+ "step": 547
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.626088057236745e-06,
+ "loss": 0.0,
+ "step": 548
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.594945124529718e-06,
+ "loss": 0.0,
+ "step": 549
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.563806126346643e-06,
+ "loss": 0.0,
+ "step": 550
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.532671365157979e-06,
+ "loss": 0.0,
+ "step": 551
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.501541143393028e-06,
+ "loss": 0.0,
+ "step": 552
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.470415763437003e-06,
+ "loss": 0.0,
+ "step": 553
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.439295527628083e-06,
+ "loss": 0.0,
+ "step": 554
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.408180738254472e-06,
+ "loss": 0.0,
+ "step": 555
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.377071697551479e-06,
+ "loss": 0.0,
+ "step": 556
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.34596870769857e-06,
+ "loss": 0.0,
+ "step": 557
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.314872070816435e-06,
+ "loss": 0.0,
+ "step": 558
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.28378208896405e-06,
+ "loss": 0.0,
+ "step": 559
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.252699064135759e-06,
+ "loss": 0.0,
+ "step": 560
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.221623298258315e-06,
+ "loss": 0.0,
+ "step": 561
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.190555093187968e-06,
+ "loss": 0.0,
+ "step": 562
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.159494750707527e-06,
+ "loss": 0.0,
+ "step": 563
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.128442572523418e-06,
+ "loss": 0.0,
+ "step": 564
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.097398860262777e-06,
+ "loss": 0.0,
+ "step": 565
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.066363915470494e-06,
+ "loss": 0.0,
+ "step": 566
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.0353380396063e-06,
+ "loss": 0.0,
+ "step": 567
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.004321534041836e-06,
+ "loss": 0.0,
+ "step": 568
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 8.973314700057717e-06,
+ "loss": 0.0,
+ "step": 569
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.942317838840625e-06,
+ "loss": 0.0,
+ "step": 570
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.911331251480357e-06,
+ "loss": 0.0,
+ "step": 571
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.880355238966923e-06,
+ "loss": 0.0,
+ "step": 572
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.849390102187615e-06,
+ "loss": 0.0,
+ "step": 573
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.818436141924072e-06,
+ "loss": 0.0,
+ "step": 574
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.787493658849387e-06,
+ "loss": 0.0,
+ "step": 575
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.756562953525151e-06,
+ "loss": 0.0,
+ "step": 576
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.72564432639856e-06,
+ "loss": 0.0,
+ "step": 577
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.694738077799487e-06,
+ "loss": 0.0,
+ "step": 578
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.663844507937563e-06,
+ "loss": 0.0,
+ "step": 579
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.632963916899268e-06,
+ "loss": 0.0,
+ "step": 580
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.602096604645009e-06,
+ "loss": 0.0,
+ "step": 581
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.571242871006202e-06,
+ "loss": 0.0,
+ "step": 582
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.540403015682382e-06,
+ "loss": 0.0,
+ "step": 583
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.509577338238255e-06,
+ "loss": 0.0,
+ "step": 584
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.478766138100834e-06,
+ "loss": 0.0,
+ "step": 585
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.447969714556484e-06,
+ "loss": 0.0,
+ "step": 586
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.417188366748051e-06,
+ "loss": 0.0,
+ "step": 587
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.386422393671934e-06,
+ "loss": 0.0,
+ "step": 588
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.355672094175192e-06,
+ "loss": 0.0,
+ "step": 589
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.324937766952638e-06,
+ "loss": 0.0,
+ "step": 590
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.294219710543931e-06,
+ "loss": 0.0,
+ "step": 591
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.263518223330698e-06,
+ "loss": 0.0,
+ "step": 592
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.232833603533601e-06,
+ "loss": 0.0,
+ "step": 593
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.202166149209475e-06,
+ "loss": 0.0,
+ "step": 594
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.171516158248406e-06,
+ "loss": 0.0,
+ "step": 595
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.140883928370855e-06,
+ "loss": 0.0,
+ "step": 596
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.11026975712476e-06,
+ "loss": 0.0,
+ "step": 597
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.079673941882639e-06,
+ "loss": 0.0,
+ "step": 598
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.04909677983872e-06,
+ "loss": 0.0,
+ "step": 599
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.018538568006027e-06,
+ "loss": 0.0,
+ "step": 600
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 2.9934895019065344e+17,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/training_args.bin b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ca4d892afdd453b26723a9aa94e432cb44cc953
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63da3a2d0bf1dde543b68e123590fcd7c42f45ec7eb68e86c6eadd439321f902
+size 6264
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-600/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/latest
new file mode 100644
index 0000000000000000000000000000000000000000..a0f3e526aa9af2ac647b278f006bb9616843c5d6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/latest
@@ -0,0 +1 @@
+global_step700
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_0.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_1.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..68f3c6994456cb8d0592a5375d99503c8924b1c4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_2.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be044f6ceeed587d30e80c2f72d5aa19fdc9947b
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_3.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fc825249656a9b858782542bd3f4386250f1dfe0
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_4.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30f52a44be563c152ae09db6ae934da6da0d3ed
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_5.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c8715d27ab23ae545d58039cf949cc44ecc1da5e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_6.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ed791b6ef76eadf0b0c55a5733411771e2ae027
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_7.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..800c3bbbc5edf7db01a8316069d439c5fb8d8c30
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/scheduler.pt b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f4f219b467c7f86dc30578d432dc929a974130dd
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f67660f58d8561b9a3c49bde4665e923c52d9ef702ce56c7a14933d136d90092
+size 1064
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/tokenizer.model b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..23845b857f7cc28742a1f65ae0af8a21026642fd
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/trainer_state.json
@@ -0,0 +1,4221 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.3461538461538463,
+ "eval_steps": 500,
+ "global_step": 700,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.0,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.0,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.0,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.0,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.0,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.0,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.0,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.0,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.0,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.0,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.0,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.0,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.0,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.0,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.0,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.0,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.0,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.0,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.0,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.0,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.0,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.0,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.0,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.0,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.0,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.0,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.0,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.0,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.0,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.0,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.0,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.0,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.0,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.0,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.0,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.0,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.0,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.0,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.0,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.0,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.0,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.0,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.0,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.0,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.0,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.0,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.0,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.0,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.0,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.0,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.0,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.0,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.0,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.0,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.0,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.0,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.0,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.0,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.0,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.0,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.0,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.0,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.0,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.0,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.0,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.0,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.0,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.0,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.0,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.0,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.0,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.0,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.0,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.0,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.0,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.0,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.0,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.0,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.0,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.0,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.0,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.0,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.0,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.0,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.0,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.0,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.0,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.0,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.0,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.0,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.0,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.0,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.0,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.0,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.0,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.0,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.0,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.0,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.0,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.0,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.0,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.0,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.0,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.0,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.0,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.0,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.0,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.0,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.0,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.0,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.0,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.0,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.0,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.0,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.0,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.0,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.0,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.0,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.0,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.0,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.0,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.0,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.0,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.0,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.0,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.0,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.0,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.0,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.0,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.0,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.0,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.0,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.0,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.0,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.0,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.0,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.0,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.0,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.0,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.0,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.0,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.0,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.0,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.0,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.0,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.0,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.0,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.0,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.0,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.0,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.0,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.0,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.0,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.0,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.0,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.0,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.0,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.0,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.0,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.0,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.0,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.0,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.0,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.0,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.0,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.0,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.0,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.0,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.0,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.0,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.0,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.0,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.0,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.0,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.0,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.0,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.0,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.0,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.0,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.0,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.0,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.0,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.0,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.0,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.0,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.0,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.0,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.0,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.0,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.0,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.0,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.0,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.0,
+ "step": 300
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.668667250537987e-05,
+ "loss": 0.0,
+ "step": 301
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6663465779520042e-05,
+ "loss": 0.0,
+ "step": 302
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6640194327700087e-05,
+ "loss": 0.0,
+ "step": 303
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6616858375968596e-05,
+ "loss": 0.0,
+ "step": 304
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.659345815100069e-05,
+ "loss": 0.0,
+ "step": 305
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6569993880095807e-05,
+ "loss": 0.0,
+ "step": 306
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6546465791175498e-05,
+ "loss": 0.0,
+ "step": 307
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6522874112781213e-05,
+ "loss": 0.0,
+ "step": 308
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6499219074072087e-05,
+ "loss": 0.0,
+ "step": 309
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6475500904822707e-05,
+ "loss": 0.0,
+ "step": 310
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.645171983542088e-05,
+ "loss": 0.0,
+ "step": 311
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6427876096865394e-05,
+ "loss": 0.0,
+ "step": 312
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.640396992076379e-05,
+ "loss": 0.0,
+ "step": 313
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6380001539330088e-05,
+ "loss": 0.0,
+ "step": 314
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6355971185382547e-05,
+ "loss": 0.0,
+ "step": 315
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6331879092341402e-05,
+ "loss": 0.0,
+ "step": 316
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6307725494226586e-05,
+ "loss": 0.0,
+ "step": 317
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6283510625655474e-05,
+ "loss": 0.0,
+ "step": 318
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6259234721840595e-05,
+ "loss": 0.0,
+ "step": 319
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6234898018587336e-05,
+ "loss": 0.0,
+ "step": 320
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6210500752291682e-05,
+ "loss": 0.0,
+ "step": 321
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6186043159937884e-05,
+ "loss": 0.0,
+ "step": 322
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.616152547909618e-05,
+ "loss": 0.0,
+ "step": 323
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6136947947920477e-05,
+ "loss": 0.0,
+ "step": 324
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.611231080514605e-05,
+ "loss": 0.0,
+ "step": 325
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.608761429008721e-05,
+ "loss": 0.0,
+ "step": 326
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.606285864263498e-05,
+ "loss": 0.0,
+ "step": 327
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.6038044103254775e-05,
+ "loss": 0.0,
+ "step": 328
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.601317091298406e-05,
+ "loss": 0.0,
+ "step": 329
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.5988239313430004e-05,
+ "loss": 0.0,
+ "step": 330
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5963249546767144e-05,
+ "loss": 0.0,
+ "step": 331
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5938201855735017e-05,
+ "loss": 0.0,
+ "step": 332
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5913096483635827e-05,
+ "loss": 0.0,
+ "step": 333
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5887933674332048e-05,
+ "loss": 0.0,
+ "step": 334
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5862713672244092e-05,
+ "loss": 0.0,
+ "step": 335
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5837436722347902e-05,
+ "loss": 0.0,
+ "step": 336
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5812103070172592e-05,
+ "loss": 0.0,
+ "step": 337
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.578671296179806e-05,
+ "loss": 0.0,
+ "step": 338
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5761266643852587e-05,
+ "loss": 0.0,
+ "step": 339
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.573576436351046e-05,
+ "loss": 0.0,
+ "step": 340
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5710206368489555e-05,
+ "loss": 0.0,
+ "step": 341
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5684592907048925e-05,
+ "loss": 0.0,
+ "step": 342
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5658924227986415e-05,
+ "loss": 0.0,
+ "step": 343
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.563320058063622e-05,
+ "loss": 0.0,
+ "step": 344
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.560742221486648e-05,
+ "loss": 0.0,
+ "step": 345
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5581589381076843e-05,
+ "loss": 0.0,
+ "step": 346
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5555702330196024e-05,
+ "loss": 0.0,
+ "step": 347
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5529761313679396e-05,
+ "loss": 0.0,
+ "step": 348
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5503766583506522e-05,
+ "loss": 0.0,
+ "step": 349
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5477718392178716e-05,
+ "loss": 0.0,
+ "step": 350
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.545161699271659e-05,
+ "loss": 0.0,
+ "step": 351
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5425462638657597e-05,
+ "loss": 0.0,
+ "step": 352
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5399255584053568e-05,
+ "loss": 0.0,
+ "step": 353
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5372996083468242e-05,
+ "loss": 0.0,
+ "step": 354
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5346684391974792e-05,
+ "loss": 0.0,
+ "step": 355
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5320320765153367e-05,
+ "loss": 0.0,
+ "step": 356
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.529390545908857e-05,
+ "loss": 0.0,
+ "step": 357
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.526743873036701e-05,
+ "loss": 0.0,
+ "step": 358
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5240920836074777e-05,
+ "loss": 0.0,
+ "step": 359
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5214352033794981e-05,
+ "loss": 0.0,
+ "step": 360
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5187732581605217e-05,
+ "loss": 0.0,
+ "step": 361
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5161062738075068e-05,
+ "loss": 0.0,
+ "step": 362
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5134342762263606e-05,
+ "loss": 0.0,
+ "step": 363
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5107572913716859e-05,
+ "loss": 0.0,
+ "step": 364
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5080753452465296e-05,
+ "loss": 0.0,
+ "step": 365
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.505388463902131e-05,
+ "loss": 0.0,
+ "step": 366
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.502696673437667e-05,
+ "loss": 0.0,
+ "step": 367
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 368
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4972984697834238e-05,
+ "loss": 0.0,
+ "step": 369
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4945921090294076e-05,
+ "loss": 0.0,
+ "step": 370
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4918809440263435e-05,
+ "loss": 0.0,
+ "step": 371
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4891650011092896e-05,
+ "loss": 0.0,
+ "step": 372
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.486444306659714e-05,
+ "loss": 0.0,
+ "step": 373
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4837188871052399e-05,
+ "loss": 0.0,
+ "step": 374
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4809887689193878e-05,
+ "loss": 0.0,
+ "step": 375
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4782539786213184e-05,
+ "loss": 0.0,
+ "step": 376
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4755145427755755e-05,
+ "loss": 0.0,
+ "step": 377
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4727704879918272e-05,
+ "loss": 0.0,
+ "step": 378
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4700218409246087e-05,
+ "loss": 0.0,
+ "step": 379
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4672686282730622e-05,
+ "loss": 0.0,
+ "step": 380
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4645108767806778e-05,
+ "loss": 0.0,
+ "step": 381
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4617486132350343e-05,
+ "loss": 0.0,
+ "step": 382
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4589818644675378e-05,
+ "loss": 0.0,
+ "step": 383
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4562106573531632e-05,
+ "loss": 0.0,
+ "step": 384
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4534350188101905e-05,
+ "loss": 0.0,
+ "step": 385
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4506549757999456e-05,
+ "loss": 0.0,
+ "step": 386
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4478705553265363e-05,
+ "loss": 0.0,
+ "step": 387
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4450817844365924e-05,
+ "loss": 0.0,
+ "step": 388
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4422886902190014e-05,
+ "loss": 0.0,
+ "step": 389
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4394912998046451e-05,
+ "loss": 0.0,
+ "step": 390
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.436689640366137e-05,
+ "loss": 0.0,
+ "step": 391
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4338837391175582e-05,
+ "loss": 0.0,
+ "step": 392
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4310736233141926e-05,
+ "loss": 0.0,
+ "step": 393
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4282593202522627e-05,
+ "loss": 0.0,
+ "step": 394
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4254408572686642e-05,
+ "loss": 0.0,
+ "step": 395
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4226182617406996e-05,
+ "loss": 0.0,
+ "step": 396
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4197915610858143e-05,
+ "loss": 0.0,
+ "step": 397
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4169607827613284e-05,
+ "loss": 0.0,
+ "step": 398
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4141259542641706e-05,
+ "loss": 0.0,
+ "step": 399
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4112871031306118e-05,
+ "loss": 0.0,
+ "step": 400
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4084442569359964e-05,
+ "loss": 0.0,
+ "step": 401
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4055974432944753e-05,
+ "loss": 0.0,
+ "step": 402
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.4027466898587375e-05,
+ "loss": 0.0,
+ "step": 403
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3998920243197408e-05,
+ "loss": 0.0,
+ "step": 404
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3970334744064451e-05,
+ "loss": 0.0,
+ "step": 405
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3941710678855396e-05,
+ "loss": 0.0,
+ "step": 406
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.391304832561175e-05,
+ "loss": 0.0,
+ "step": 407
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3884347962746949e-05,
+ "loss": 0.0,
+ "step": 408
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3855609869043618e-05,
+ "loss": 0.0,
+ "step": 409
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3826834323650899e-05,
+ "loss": 0.0,
+ "step": 410
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3798021606081713e-05,
+ "loss": 0.0,
+ "step": 411
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3769171996210053e-05,
+ "loss": 0.0,
+ "step": 412
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3740285774268282e-05,
+ "loss": 0.0,
+ "step": 413
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.371136322084438e-05,
+ "loss": 0.0,
+ "step": 414
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3682404616879246e-05,
+ "loss": 0.0,
+ "step": 415
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3653410243663953e-05,
+ "loss": 0.0,
+ "step": 416
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3624380382837017e-05,
+ "loss": 0.0,
+ "step": 417
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3595315316381676e-05,
+ "loss": 0.0,
+ "step": 418
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3566215326623131e-05,
+ "loss": 0.0,
+ "step": 419
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3537080696225815e-05,
+ "loss": 0.0,
+ "step": 420
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3507911708190646e-05,
+ "loss": 0.0,
+ "step": 421
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3478708645852272e-05,
+ "loss": 0.0,
+ "step": 422
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3449471792876333e-05,
+ "loss": 0.0,
+ "step": 423
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.342020143325669e-05,
+ "loss": 0.0,
+ "step": 424
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3390897851312667e-05,
+ "loss": 0.0,
+ "step": 425
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.336156133168631e-05,
+ "loss": 0.0,
+ "step": 426
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3332192159339595e-05,
+ "loss": 0.0,
+ "step": 427
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3302790619551673e-05,
+ "loss": 0.0,
+ "step": 428
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3273356997916106e-05,
+ "loss": 0.0,
+ "step": 429
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3243891580338074e-05,
+ "loss": 0.0,
+ "step": 430
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3214394653031616e-05,
+ "loss": 0.0,
+ "step": 431
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3184866502516846e-05,
+ "loss": 0.0,
+ "step": 432
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3155307415617156e-05,
+ "loss": 0.0,
+ "step": 433
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3125717679456447e-05,
+ "loss": 0.0,
+ "step": 434
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.309609758145633e-05,
+ "loss": 0.0,
+ "step": 435
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3066447409333345e-05,
+ "loss": 0.0,
+ "step": 436
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3036767451096148e-05,
+ "loss": 0.0,
+ "step": 437
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.300705799504273e-05,
+ "loss": 0.0,
+ "step": 438
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.2977319329757616e-05,
+ "loss": 0.0,
+ "step": 439
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2947551744109044e-05,
+ "loss": 0.0,
+ "step": 440
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2917755527246179e-05,
+ "loss": 0.0,
+ "step": 441
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.28879309685963e-05,
+ "loss": 0.0,
+ "step": 442
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2858078357861979e-05,
+ "loss": 0.0,
+ "step": 443
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2828197985018276e-05,
+ "loss": 0.0,
+ "step": 444
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2798290140309924e-05,
+ "loss": 0.0,
+ "step": 445
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2768355114248493e-05,
+ "loss": 0.0,
+ "step": 446
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2738393197609602e-05,
+ "loss": 0.0,
+ "step": 447
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2708404681430054e-05,
+ "loss": 0.0,
+ "step": 448
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2678389857005033e-05,
+ "loss": 0.0,
+ "step": 449
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2648349015885272e-05,
+ "loss": 0.0,
+ "step": 450
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2618282449874221e-05,
+ "loss": 0.0,
+ "step": 451
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2588190451025209e-05,
+ "loss": 0.0,
+ "step": 452
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2558073311638604e-05,
+ "loss": 0.0,
+ "step": 453
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2527931324258975e-05,
+ "loss": 0.0,
+ "step": 454
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.249776478167227e-05,
+ "loss": 0.0,
+ "step": 455
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2467573976902936e-05,
+ "loss": 0.0,
+ "step": 456
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2437359203211109e-05,
+ "loss": 0.0,
+ "step": 457
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2407120754089733e-05,
+ "loss": 0.0,
+ "step": 458
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2376858923261732e-05,
+ "loss": 0.0,
+ "step": 459
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2346574004677154e-05,
+ "loss": 0.0,
+ "step": 460
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2316266292510305e-05,
+ "loss": 0.0,
+ "step": 461
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2285936081156897e-05,
+ "loss": 0.0,
+ "step": 462
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2255583665231196e-05,
+ "loss": 0.0,
+ "step": 463
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2225209339563144e-05,
+ "loss": 0.0,
+ "step": 464
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2194813399195518e-05,
+ "loss": 0.0,
+ "step": 465
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2164396139381029e-05,
+ "loss": 0.0,
+ "step": 466
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2133957855579501e-05,
+ "loss": 0.0,
+ "step": 467
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.210349884345496e-05,
+ "loss": 0.0,
+ "step": 468
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2073019398872778e-05,
+ "loss": 0.0,
+ "step": 469
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2042519817896805e-05,
+ "loss": 0.0,
+ "step": 470
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.2012000396786485e-05,
+ "loss": 0.0,
+ "step": 471
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1981461431993978e-05,
+ "loss": 0.0,
+ "step": 472
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1950903220161286e-05,
+ "loss": 0.0,
+ "step": 473
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1920326058117364e-05,
+ "loss": 0.0,
+ "step": 474
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1889730242875243e-05,
+ "loss": 0.0,
+ "step": 475
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1859116071629148e-05,
+ "loss": 0.0,
+ "step": 476
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1828483841751597e-05,
+ "loss": 0.0,
+ "step": 477
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1797833850790527e-05,
+ "loss": 0.0,
+ "step": 478
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1767166396466404e-05,
+ "loss": 0.0,
+ "step": 479
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1736481776669307e-05,
+ "loss": 0.0,
+ "step": 480
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1705780289456069e-05,
+ "loss": 0.0,
+ "step": 481
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1675062233047365e-05,
+ "loss": 0.0,
+ "step": 482
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1644327905824808e-05,
+ "loss": 0.0,
+ "step": 483
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1613577606328068e-05,
+ "loss": 0.0,
+ "step": 484
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1582811633251949e-05,
+ "loss": 0.0,
+ "step": 485
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1552030285443516e-05,
+ "loss": 0.0,
+ "step": 486
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1521233861899168e-05,
+ "loss": 0.0,
+ "step": 487
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1490422661761744e-05,
+ "loss": 0.0,
+ "step": 488
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1459596984317622e-05,
+ "loss": 0.0,
+ "step": 489
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1428757128993801e-05,
+ "loss": 0.0,
+ "step": 490
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1397903395354996e-05,
+ "loss": 0.0,
+ "step": 491
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1367036083100735e-05,
+ "loss": 0.0,
+ "step": 492
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1336155492062439e-05,
+ "loss": 0.0,
+ "step": 493
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.130526192220052e-05,
+ "loss": 0.0,
+ "step": 494
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1274355673601446e-05,
+ "loss": 0.0,
+ "step": 495
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1243437046474854e-05,
+ "loss": 0.0,
+ "step": 496
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1212506341150615e-05,
+ "loss": 0.0,
+ "step": 497
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.118156385807593e-05,
+ "loss": 0.0,
+ "step": 498
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1150609897812387e-05,
+ "loss": 0.0,
+ "step": 499
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1119644761033079e-05,
+ "loss": 0.0,
+ "step": 500
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1088668748519646e-05,
+ "loss": 0.0,
+ "step": 501
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.105768216115938e-05,
+ "loss": 0.0,
+ "step": 502
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.1026685299942286e-05,
+ "loss": 0.0,
+ "step": 503
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0995678465958168e-05,
+ "loss": 0.0,
+ "step": 504
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0964661960393703e-05,
+ "loss": 0.0,
+ "step": 505
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0933636084529507e-05,
+ "loss": 0.0,
+ "step": 506
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0902601139737225e-05,
+ "loss": 0.0,
+ "step": 507
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0871557427476585e-05,
+ "loss": 0.0,
+ "step": 508
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0840505249292477e-05,
+ "loss": 0.0,
+ "step": 509
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0809444906812034e-05,
+ "loss": 0.0,
+ "step": 510
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0778376701741688e-05,
+ "loss": 0.0,
+ "step": 511
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0747300935864245e-05,
+ "loss": 0.0,
+ "step": 512
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0716217911035952e-05,
+ "loss": 0.0,
+ "step": 513
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0685127929183567e-05,
+ "loss": 0.0,
+ "step": 514
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0654031292301432e-05,
+ "loss": 0.0,
+ "step": 515
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0622928302448523e-05,
+ "loss": 0.0,
+ "step": 516
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0591819261745528e-05,
+ "loss": 0.0,
+ "step": 517
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0560704472371919e-05,
+ "loss": 0.0,
+ "step": 518
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0529584236562995e-05,
+ "loss": 0.0,
+ "step": 519
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0498458856606972e-05,
+ "loss": 0.0,
+ "step": 520
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0467328634842024e-05,
+ "loss": 0.0,
+ "step": 521
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0436193873653362e-05,
+ "loss": 0.0,
+ "step": 522
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0405054875470287e-05,
+ "loss": 0.0,
+ "step": 523
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.037391194276326e-05,
+ "loss": 0.0,
+ "step": 524
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0342765378040953e-05,
+ "loss": 0.0,
+ "step": 525
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0311615483847333e-05,
+ "loss": 0.0,
+ "step": 526
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.028046256275869e-05,
+ "loss": 0.0,
+ "step": 527
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0249306917380731e-05,
+ "loss": 0.0,
+ "step": 528
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0218148850345613e-05,
+ "loss": 0.0,
+ "step": 529
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0186988664309023e-05,
+ "loss": 0.0,
+ "step": 530
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0155826661947232e-05,
+ "loss": 0.0,
+ "step": 531
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0124663145954152e-05,
+ "loss": 0.0,
+ "step": 532
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0093498419038394e-05,
+ "loss": 0.0,
+ "step": 533
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0062332783920337e-05,
+ "loss": 0.0,
+ "step": 534
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0031166543329179e-05,
+ "loss": 0.0,
+ "step": 535
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 536
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.968833456670824e-06,
+ "loss": 0.0,
+ "step": 537
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.937667216079665e-06,
+ "loss": 0.0,
+ "step": 538
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.90650158096161e-06,
+ "loss": 0.0,
+ "step": 539
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.87533685404585e-06,
+ "loss": 0.0,
+ "step": 540
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.844173338052771e-06,
+ "loss": 0.0,
+ "step": 541
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.81301133569098e-06,
+ "loss": 0.0,
+ "step": 542
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.78185114965439e-06,
+ "loss": 0.0,
+ "step": 543
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.750693082619274e-06,
+ "loss": 0.0,
+ "step": 544
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.719537437241311e-06,
+ "loss": 0.0,
+ "step": 545
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.68838451615267e-06,
+ "loss": 0.0,
+ "step": 546
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.65723462195905e-06,
+ "loss": 0.0,
+ "step": 547
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.626088057236745e-06,
+ "loss": 0.0,
+ "step": 548
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.594945124529718e-06,
+ "loss": 0.0,
+ "step": 549
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.563806126346643e-06,
+ "loss": 0.0,
+ "step": 550
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.532671365157979e-06,
+ "loss": 0.0,
+ "step": 551
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.501541143393028e-06,
+ "loss": 0.0,
+ "step": 552
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.470415763437003e-06,
+ "loss": 0.0,
+ "step": 553
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.439295527628083e-06,
+ "loss": 0.0,
+ "step": 554
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.408180738254472e-06,
+ "loss": 0.0,
+ "step": 555
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.377071697551479e-06,
+ "loss": 0.0,
+ "step": 556
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.34596870769857e-06,
+ "loss": 0.0,
+ "step": 557
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.314872070816435e-06,
+ "loss": 0.0,
+ "step": 558
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.28378208896405e-06,
+ "loss": 0.0,
+ "step": 559
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.252699064135759e-06,
+ "loss": 0.0,
+ "step": 560
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.221623298258315e-06,
+ "loss": 0.0,
+ "step": 561
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.190555093187968e-06,
+ "loss": 0.0,
+ "step": 562
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.159494750707527e-06,
+ "loss": 0.0,
+ "step": 563
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.128442572523418e-06,
+ "loss": 0.0,
+ "step": 564
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.097398860262777e-06,
+ "loss": 0.0,
+ "step": 565
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.066363915470494e-06,
+ "loss": 0.0,
+ "step": 566
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.0353380396063e-06,
+ "loss": 0.0,
+ "step": 567
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.004321534041836e-06,
+ "loss": 0.0,
+ "step": 568
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 8.973314700057717e-06,
+ "loss": 0.0,
+ "step": 569
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.942317838840625e-06,
+ "loss": 0.0,
+ "step": 570
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.911331251480357e-06,
+ "loss": 0.0,
+ "step": 571
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.880355238966923e-06,
+ "loss": 0.0,
+ "step": 572
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.849390102187615e-06,
+ "loss": 0.0,
+ "step": 573
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.818436141924072e-06,
+ "loss": 0.0,
+ "step": 574
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.787493658849387e-06,
+ "loss": 0.0,
+ "step": 575
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.756562953525151e-06,
+ "loss": 0.0,
+ "step": 576
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.72564432639856e-06,
+ "loss": 0.0,
+ "step": 577
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.694738077799487e-06,
+ "loss": 0.0,
+ "step": 578
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.663844507937563e-06,
+ "loss": 0.0,
+ "step": 579
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.632963916899268e-06,
+ "loss": 0.0,
+ "step": 580
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.602096604645009e-06,
+ "loss": 0.0,
+ "step": 581
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.571242871006202e-06,
+ "loss": 0.0,
+ "step": 582
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.540403015682382e-06,
+ "loss": 0.0,
+ "step": 583
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.509577338238255e-06,
+ "loss": 0.0,
+ "step": 584
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.478766138100834e-06,
+ "loss": 0.0,
+ "step": 585
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.447969714556484e-06,
+ "loss": 0.0,
+ "step": 586
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.417188366748051e-06,
+ "loss": 0.0,
+ "step": 587
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.386422393671934e-06,
+ "loss": 0.0,
+ "step": 588
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.355672094175192e-06,
+ "loss": 0.0,
+ "step": 589
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.324937766952638e-06,
+ "loss": 0.0,
+ "step": 590
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.294219710543931e-06,
+ "loss": 0.0,
+ "step": 591
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.263518223330698e-06,
+ "loss": 0.0,
+ "step": 592
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.232833603533601e-06,
+ "loss": 0.0,
+ "step": 593
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.202166149209475e-06,
+ "loss": 0.0,
+ "step": 594
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.171516158248406e-06,
+ "loss": 0.0,
+ "step": 595
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.140883928370855e-06,
+ "loss": 0.0,
+ "step": 596
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.11026975712476e-06,
+ "loss": 0.0,
+ "step": 597
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.079673941882639e-06,
+ "loss": 0.0,
+ "step": 598
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.04909677983872e-06,
+ "loss": 0.0,
+ "step": 599
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.018538568006027e-06,
+ "loss": 0.0,
+ "step": 600
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.987999603213518e-06,
+ "loss": 0.0,
+ "step": 601
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.957480182103198e-06,
+ "loss": 0.0,
+ "step": 602
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.926980601127225e-06,
+ "loss": 0.0,
+ "step": 603
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.896501156545044e-06,
+ "loss": 0.0,
+ "step": 604
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.866042144420502e-06,
+ "loss": 0.0,
+ "step": 605
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.835603860618973e-06,
+ "loss": 0.0,
+ "step": 606
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.805186600804489e-06,
+ "loss": 0.0,
+ "step": 607
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.774790660436857e-06,
+ "loss": 0.0,
+ "step": 608
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.744416334768809e-06,
+ "loss": 0.0,
+ "step": 609
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.714063918843106e-06,
+ "loss": 0.0,
+ "step": 610
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.6837337074897e-06,
+ "loss": 0.0,
+ "step": 611
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.653425995322852e-06,
+ "loss": 0.0,
+ "step": 612
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.623141076738271e-06,
+ "loss": 0.0,
+ "step": 613
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.592879245910273e-06,
+ "loss": 0.0,
+ "step": 614
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.562640796788893e-06,
+ "loss": 0.0,
+ "step": 615
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.532426023097063e-06,
+ "loss": 0.0,
+ "step": 616
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.50223521832773e-06,
+ "loss": 0.0,
+ "step": 617
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.472068675741024e-06,
+ "loss": 0.0,
+ "step": 618
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.4419266883614e-06,
+ "loss": 0.0,
+ "step": 619
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.411809548974792e-06,
+ "loss": 0.0,
+ "step": 620
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.38171755012578e-06,
+ "loss": 0.0,
+ "step": 621
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.3516509841147276e-06,
+ "loss": 0.0,
+ "step": 622
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.321610142994971e-06,
+ "loss": 0.0,
+ "step": 623
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.291595318569951e-06,
+ "loss": 0.0,
+ "step": 624
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2616068023904e-06,
+ "loss": 0.0,
+ "step": 625
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2316448857515076e-06,
+ "loss": 0.0,
+ "step": 626
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.201709859690081e-06,
+ "loss": 0.0,
+ "step": 627
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.171802014981726e-06,
+ "loss": 0.0,
+ "step": 628
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.141921642138025e-06,
+ "loss": 0.0,
+ "step": 629
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.112069031403704e-06,
+ "loss": 0.0,
+ "step": 630
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.082244472753823e-06,
+ "loss": 0.0,
+ "step": 631
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.052448255890958e-06,
+ "loss": 0.0,
+ "step": 632
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.022680670242387e-06,
+ "loss": 0.0,
+ "step": 633
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.992942004957271e-06,
+ "loss": 0.0,
+ "step": 634
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.963232548903853e-06,
+ "loss": 0.0,
+ "step": 635
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.933552590666659e-06,
+ "loss": 0.0,
+ "step": 636
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.903902418543671e-06,
+ "loss": 0.0,
+ "step": 637
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.874282320543557e-06,
+ "loss": 0.0,
+ "step": 638
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.844692584382848e-06,
+ "loss": 0.0,
+ "step": 639
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.815133497483157e-06,
+ "loss": 0.0,
+ "step": 640
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.785605346968387e-06,
+ "loss": 0.0,
+ "step": 641
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.7561084196619306e-06,
+ "loss": 0.0,
+ "step": 642
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.7266430020839e-06,
+ "loss": 0.0,
+ "step": 643
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.697209380448333e-06,
+ "loss": 0.0,
+ "step": 644
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.66780784066041e-06,
+ "loss": 0.0,
+ "step": 645
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.638438668313695e-06,
+ "loss": 0.0,
+ "step": 646
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.609102148687333e-06,
+ "loss": 0.0,
+ "step": 647
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.579798566743314e-06,
+ "loss": 0.0,
+ "step": 648
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.550528207123667e-06,
+ "loss": 0.0,
+ "step": 649
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.521291354147727e-06,
+ "loss": 0.0,
+ "step": 650
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.492088291809355e-06,
+ "loss": 0.0,
+ "step": 651
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.462919303774186e-06,
+ "loss": 0.0,
+ "step": 652
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.43378467337687e-06,
+ "loss": 0.0,
+ "step": 653
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.404684683618325e-06,
+ "loss": 0.0,
+ "step": 654
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.375619617162985e-06,
+ "loss": 0.0,
+ "step": 655
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.34658975633605e-06,
+ "loss": 0.0,
+ "step": 656
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.317595383120756e-06,
+ "loss": 0.0,
+ "step": 657
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.288636779155621e-06,
+ "loss": 0.0,
+ "step": 658
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.2597142257317185e-06,
+ "loss": 0.0,
+ "step": 659
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.230828003789949e-06,
+ "loss": 0.0,
+ "step": 660
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.201978393918291e-06,
+ "loss": 0.0,
+ "step": 661
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.173165676349103e-06,
+ "loss": 0.0,
+ "step": 662
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.144390130956384e-06,
+ "loss": 0.0,
+ "step": 663
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.115652037253054e-06,
+ "loss": 0.0,
+ "step": 664
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.086951674388252e-06,
+ "loss": 0.0,
+ "step": 665
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.058289321144608e-06,
+ "loss": 0.0,
+ "step": 666
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.02966525593555e-06,
+ "loss": 0.0,
+ "step": 667
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.001079756802592e-06,
+ "loss": 0.0,
+ "step": 668
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.97253310141263e-06,
+ "loss": 0.0,
+ "step": 669
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.944025567055251e-06,
+ "loss": 0.0,
+ "step": 670
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.91555743064004e-06,
+ "loss": 0.0,
+ "step": 671
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.887128968693887e-06,
+ "loss": 0.0,
+ "step": 672
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.858740457358298e-06,
+ "loss": 0.0,
+ "step": 673
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.830392172386723e-06,
+ "loss": 0.0,
+ "step": 674
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.802084389141862e-06,
+ "loss": 0.0,
+ "step": 675
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.773817382593008e-06,
+ "loss": 0.0,
+ "step": 676
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.745591427313365e-06,
+ "loss": 0.0,
+ "step": 677
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.717406797477371e-06,
+ "loss": 0.0,
+ "step": 678
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.689263766858072e-06,
+ "loss": 0.0,
+ "step": 679
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.66116260882442e-06,
+ "loss": 0.0,
+ "step": 680
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.633103596338631e-06,
+ "loss": 0.0,
+ "step": 681
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.6050870019535496e-06,
+ "loss": 0.0,
+ "step": 682
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.5771130978099896e-06,
+ "loss": 0.0,
+ "step": 683
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.549182155634076e-06,
+ "loss": 0.0,
+ "step": 684
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.521294446734637e-06,
+ "loss": 0.0,
+ "step": 685
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.493450242000546e-06,
+ "loss": 0.0,
+ "step": 686
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.465649811898098e-06,
+ "loss": 0.0,
+ "step": 687
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.43789342646837e-06,
+ "loss": 0.0,
+ "step": 688
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.410181355324622e-06,
+ "loss": 0.0,
+ "step": 689
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.382513867649663e-06,
+ "loss": 0.0,
+ "step": 690
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.354891232193225e-06,
+ "loss": 0.0,
+ "step": 691
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.32731371726938e-06,
+ "loss": 0.0,
+ "step": 692
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.299781590753916e-06,
+ "loss": 0.0,
+ "step": 693
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.2722951200817315e-06,
+ "loss": 0.0,
+ "step": 694
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.244854572244249e-06,
+ "loss": 0.0,
+ "step": 695
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.217460213786822e-06,
+ "loss": 0.0,
+ "step": 696
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.190112310806126e-06,
+ "loss": 0.0,
+ "step": 697
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.1628111289476025e-06,
+ "loss": 0.0,
+ "step": 698
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.135556933402862e-06,
+ "loss": 0.0,
+ "step": 699
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.108349988907111e-06,
+ "loss": 0.0,
+ "step": 700
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 3.4919286073144115e+17,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/training_args.bin b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ca4d892afdd453b26723a9aa94e432cb44cc953
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63da3a2d0bf1dde543b68e123590fcd7c42f45ec7eb68e86c6eadd439321f902
+size 6264
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-700/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/latest
new file mode 100644
index 0000000000000000000000000000000000000000..57729c0be88118cbd582c8c68b4149cee821f0b4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/latest
@@ -0,0 +1 @@
+global_step800
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_0.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_1.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..68f3c6994456cb8d0592a5375d99503c8924b1c4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_2.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be044f6ceeed587d30e80c2f72d5aa19fdc9947b
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_3.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fc825249656a9b858782542bd3f4386250f1dfe0
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_4.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30f52a44be563c152ae09db6ae934da6da0d3ed
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_5.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c8715d27ab23ae545d58039cf949cc44ecc1da5e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_6.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ed791b6ef76eadf0b0c55a5733411771e2ae027
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_7.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..800c3bbbc5edf7db01a8316069d439c5fb8d8c30
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/scheduler.pt b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..522b5e783e41d1fdf39b58aa3a02b807ebc04907
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f69074328a85426f71aa98590d9b0521e0c534d0df8d3a277a9e97971cc12fd
+size 1064
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/tokenizer.model b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b519216429a5688abf3b0d5b01ea48ed0577e73d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/trainer_state.json
@@ -0,0 +1,4821 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.5384615384615383,
+ "eval_steps": 500,
+ "global_step": 800,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.0,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.0,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.0,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.0,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.0,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.0,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.0,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.0,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.0,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.0,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.0,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.0,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.0,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.0,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.0,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.0,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.0,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.0,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.0,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.0,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.0,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.0,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.0,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.0,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.0,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.0,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.0,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.0,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.0,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.0,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.0,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.0,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.0,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.0,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.0,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.0,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.0,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.0,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.0,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.0,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.0,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.0,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.0,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.0,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.0,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.0,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.0,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.0,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.0,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.0,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.0,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.0,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.0,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.0,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.0,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.0,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.0,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.0,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.0,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.0,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.0,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.0,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.0,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.0,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.0,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.0,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.0,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.0,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.0,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.0,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.0,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.0,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.0,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.0,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.0,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.0,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.0,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.0,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.0,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.0,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.0,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.0,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.0,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.0,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.0,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.0,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.0,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.0,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.0,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.0,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.0,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.0,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.0,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.0,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.0,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.0,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.0,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.0,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.0,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.0,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.0,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.0,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.0,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.0,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.0,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.0,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.0,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.0,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.0,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.0,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.0,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.0,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.0,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.0,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.0,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.0,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.0,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.0,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.0,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.0,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.0,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.0,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.0,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.0,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.0,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.0,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.0,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.0,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.0,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.0,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.0,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.0,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.0,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.0,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.0,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.0,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.0,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.0,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.0,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.0,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.0,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.0,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.0,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.0,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.0,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.0,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.0,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.0,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.0,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.0,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.0,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.0,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.0,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.0,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.0,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.0,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.0,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.0,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.0,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.0,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.0,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.0,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.0,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.0,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.0,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.0,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.0,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.0,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.0,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.0,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.0,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.0,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.0,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.0,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.0,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.0,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.0,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.0,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.0,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.0,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.0,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.0,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.0,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.0,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.0,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.0,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.0,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.0,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.0,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.0,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.0,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.0,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.0,
+ "step": 300
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.668667250537987e-05,
+ "loss": 0.0,
+ "step": 301
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6663465779520042e-05,
+ "loss": 0.0,
+ "step": 302
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6640194327700087e-05,
+ "loss": 0.0,
+ "step": 303
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6616858375968596e-05,
+ "loss": 0.0,
+ "step": 304
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.659345815100069e-05,
+ "loss": 0.0,
+ "step": 305
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6569993880095807e-05,
+ "loss": 0.0,
+ "step": 306
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6546465791175498e-05,
+ "loss": 0.0,
+ "step": 307
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6522874112781213e-05,
+ "loss": 0.0,
+ "step": 308
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6499219074072087e-05,
+ "loss": 0.0,
+ "step": 309
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6475500904822707e-05,
+ "loss": 0.0,
+ "step": 310
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.645171983542088e-05,
+ "loss": 0.0,
+ "step": 311
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6427876096865394e-05,
+ "loss": 0.0,
+ "step": 312
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.640396992076379e-05,
+ "loss": 0.0,
+ "step": 313
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6380001539330088e-05,
+ "loss": 0.0,
+ "step": 314
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6355971185382547e-05,
+ "loss": 0.0,
+ "step": 315
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6331879092341402e-05,
+ "loss": 0.0,
+ "step": 316
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6307725494226586e-05,
+ "loss": 0.0,
+ "step": 317
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6283510625655474e-05,
+ "loss": 0.0,
+ "step": 318
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6259234721840595e-05,
+ "loss": 0.0,
+ "step": 319
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6234898018587336e-05,
+ "loss": 0.0,
+ "step": 320
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6210500752291682e-05,
+ "loss": 0.0,
+ "step": 321
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6186043159937884e-05,
+ "loss": 0.0,
+ "step": 322
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.616152547909618e-05,
+ "loss": 0.0,
+ "step": 323
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6136947947920477e-05,
+ "loss": 0.0,
+ "step": 324
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.611231080514605e-05,
+ "loss": 0.0,
+ "step": 325
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.608761429008721e-05,
+ "loss": 0.0,
+ "step": 326
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.606285864263498e-05,
+ "loss": 0.0,
+ "step": 327
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.6038044103254775e-05,
+ "loss": 0.0,
+ "step": 328
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.601317091298406e-05,
+ "loss": 0.0,
+ "step": 329
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.5988239313430004e-05,
+ "loss": 0.0,
+ "step": 330
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5963249546767144e-05,
+ "loss": 0.0,
+ "step": 331
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5938201855735017e-05,
+ "loss": 0.0,
+ "step": 332
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5913096483635827e-05,
+ "loss": 0.0,
+ "step": 333
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5887933674332048e-05,
+ "loss": 0.0,
+ "step": 334
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5862713672244092e-05,
+ "loss": 0.0,
+ "step": 335
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5837436722347902e-05,
+ "loss": 0.0,
+ "step": 336
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5812103070172592e-05,
+ "loss": 0.0,
+ "step": 337
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.578671296179806e-05,
+ "loss": 0.0,
+ "step": 338
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5761266643852587e-05,
+ "loss": 0.0,
+ "step": 339
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.573576436351046e-05,
+ "loss": 0.0,
+ "step": 340
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5710206368489555e-05,
+ "loss": 0.0,
+ "step": 341
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5684592907048925e-05,
+ "loss": 0.0,
+ "step": 342
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5658924227986415e-05,
+ "loss": 0.0,
+ "step": 343
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.563320058063622e-05,
+ "loss": 0.0,
+ "step": 344
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.560742221486648e-05,
+ "loss": 0.0,
+ "step": 345
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5581589381076843e-05,
+ "loss": 0.0,
+ "step": 346
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5555702330196024e-05,
+ "loss": 0.0,
+ "step": 347
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5529761313679396e-05,
+ "loss": 0.0,
+ "step": 348
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5503766583506522e-05,
+ "loss": 0.0,
+ "step": 349
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5477718392178716e-05,
+ "loss": 0.0,
+ "step": 350
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.545161699271659e-05,
+ "loss": 0.0,
+ "step": 351
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5425462638657597e-05,
+ "loss": 0.0,
+ "step": 352
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5399255584053568e-05,
+ "loss": 0.0,
+ "step": 353
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5372996083468242e-05,
+ "loss": 0.0,
+ "step": 354
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5346684391974792e-05,
+ "loss": 0.0,
+ "step": 355
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5320320765153367e-05,
+ "loss": 0.0,
+ "step": 356
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.529390545908857e-05,
+ "loss": 0.0,
+ "step": 357
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.526743873036701e-05,
+ "loss": 0.0,
+ "step": 358
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5240920836074777e-05,
+ "loss": 0.0,
+ "step": 359
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5214352033794981e-05,
+ "loss": 0.0,
+ "step": 360
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5187732581605217e-05,
+ "loss": 0.0,
+ "step": 361
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5161062738075068e-05,
+ "loss": 0.0,
+ "step": 362
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5134342762263606e-05,
+ "loss": 0.0,
+ "step": 363
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5107572913716859e-05,
+ "loss": 0.0,
+ "step": 364
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5080753452465296e-05,
+ "loss": 0.0,
+ "step": 365
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.505388463902131e-05,
+ "loss": 0.0,
+ "step": 366
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.502696673437667e-05,
+ "loss": 0.0,
+ "step": 367
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 368
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4972984697834238e-05,
+ "loss": 0.0,
+ "step": 369
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4945921090294076e-05,
+ "loss": 0.0,
+ "step": 370
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4918809440263435e-05,
+ "loss": 0.0,
+ "step": 371
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4891650011092896e-05,
+ "loss": 0.0,
+ "step": 372
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.486444306659714e-05,
+ "loss": 0.0,
+ "step": 373
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4837188871052399e-05,
+ "loss": 0.0,
+ "step": 374
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4809887689193878e-05,
+ "loss": 0.0,
+ "step": 375
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4782539786213184e-05,
+ "loss": 0.0,
+ "step": 376
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4755145427755755e-05,
+ "loss": 0.0,
+ "step": 377
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4727704879918272e-05,
+ "loss": 0.0,
+ "step": 378
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4700218409246087e-05,
+ "loss": 0.0,
+ "step": 379
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4672686282730622e-05,
+ "loss": 0.0,
+ "step": 380
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4645108767806778e-05,
+ "loss": 0.0,
+ "step": 381
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4617486132350343e-05,
+ "loss": 0.0,
+ "step": 382
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4589818644675378e-05,
+ "loss": 0.0,
+ "step": 383
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4562106573531632e-05,
+ "loss": 0.0,
+ "step": 384
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4534350188101905e-05,
+ "loss": 0.0,
+ "step": 385
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4506549757999456e-05,
+ "loss": 0.0,
+ "step": 386
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4478705553265363e-05,
+ "loss": 0.0,
+ "step": 387
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4450817844365924e-05,
+ "loss": 0.0,
+ "step": 388
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4422886902190014e-05,
+ "loss": 0.0,
+ "step": 389
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4394912998046451e-05,
+ "loss": 0.0,
+ "step": 390
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.436689640366137e-05,
+ "loss": 0.0,
+ "step": 391
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4338837391175582e-05,
+ "loss": 0.0,
+ "step": 392
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4310736233141926e-05,
+ "loss": 0.0,
+ "step": 393
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4282593202522627e-05,
+ "loss": 0.0,
+ "step": 394
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4254408572686642e-05,
+ "loss": 0.0,
+ "step": 395
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4226182617406996e-05,
+ "loss": 0.0,
+ "step": 396
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4197915610858143e-05,
+ "loss": 0.0,
+ "step": 397
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4169607827613284e-05,
+ "loss": 0.0,
+ "step": 398
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4141259542641706e-05,
+ "loss": 0.0,
+ "step": 399
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4112871031306118e-05,
+ "loss": 0.0,
+ "step": 400
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4084442569359964e-05,
+ "loss": 0.0,
+ "step": 401
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4055974432944753e-05,
+ "loss": 0.0,
+ "step": 402
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.4027466898587375e-05,
+ "loss": 0.0,
+ "step": 403
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3998920243197408e-05,
+ "loss": 0.0,
+ "step": 404
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3970334744064451e-05,
+ "loss": 0.0,
+ "step": 405
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3941710678855396e-05,
+ "loss": 0.0,
+ "step": 406
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.391304832561175e-05,
+ "loss": 0.0,
+ "step": 407
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3884347962746949e-05,
+ "loss": 0.0,
+ "step": 408
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3855609869043618e-05,
+ "loss": 0.0,
+ "step": 409
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3826834323650899e-05,
+ "loss": 0.0,
+ "step": 410
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3798021606081713e-05,
+ "loss": 0.0,
+ "step": 411
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3769171996210053e-05,
+ "loss": 0.0,
+ "step": 412
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3740285774268282e-05,
+ "loss": 0.0,
+ "step": 413
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.371136322084438e-05,
+ "loss": 0.0,
+ "step": 414
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3682404616879246e-05,
+ "loss": 0.0,
+ "step": 415
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3653410243663953e-05,
+ "loss": 0.0,
+ "step": 416
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3624380382837017e-05,
+ "loss": 0.0,
+ "step": 417
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3595315316381676e-05,
+ "loss": 0.0,
+ "step": 418
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3566215326623131e-05,
+ "loss": 0.0,
+ "step": 419
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3537080696225815e-05,
+ "loss": 0.0,
+ "step": 420
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3507911708190646e-05,
+ "loss": 0.0,
+ "step": 421
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3478708645852272e-05,
+ "loss": 0.0,
+ "step": 422
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3449471792876333e-05,
+ "loss": 0.0,
+ "step": 423
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.342020143325669e-05,
+ "loss": 0.0,
+ "step": 424
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3390897851312667e-05,
+ "loss": 0.0,
+ "step": 425
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.336156133168631e-05,
+ "loss": 0.0,
+ "step": 426
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3332192159339595e-05,
+ "loss": 0.0,
+ "step": 427
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3302790619551673e-05,
+ "loss": 0.0,
+ "step": 428
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3273356997916106e-05,
+ "loss": 0.0,
+ "step": 429
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3243891580338074e-05,
+ "loss": 0.0,
+ "step": 430
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3214394653031616e-05,
+ "loss": 0.0,
+ "step": 431
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3184866502516846e-05,
+ "loss": 0.0,
+ "step": 432
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3155307415617156e-05,
+ "loss": 0.0,
+ "step": 433
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3125717679456447e-05,
+ "loss": 0.0,
+ "step": 434
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.309609758145633e-05,
+ "loss": 0.0,
+ "step": 435
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3066447409333345e-05,
+ "loss": 0.0,
+ "step": 436
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3036767451096148e-05,
+ "loss": 0.0,
+ "step": 437
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.300705799504273e-05,
+ "loss": 0.0,
+ "step": 438
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.2977319329757616e-05,
+ "loss": 0.0,
+ "step": 439
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2947551744109044e-05,
+ "loss": 0.0,
+ "step": 440
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2917755527246179e-05,
+ "loss": 0.0,
+ "step": 441
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.28879309685963e-05,
+ "loss": 0.0,
+ "step": 442
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2858078357861979e-05,
+ "loss": 0.0,
+ "step": 443
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2828197985018276e-05,
+ "loss": 0.0,
+ "step": 444
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2798290140309924e-05,
+ "loss": 0.0,
+ "step": 445
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2768355114248493e-05,
+ "loss": 0.0,
+ "step": 446
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2738393197609602e-05,
+ "loss": 0.0,
+ "step": 447
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2708404681430054e-05,
+ "loss": 0.0,
+ "step": 448
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2678389857005033e-05,
+ "loss": 0.0,
+ "step": 449
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2648349015885272e-05,
+ "loss": 0.0,
+ "step": 450
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2618282449874221e-05,
+ "loss": 0.0,
+ "step": 451
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2588190451025209e-05,
+ "loss": 0.0,
+ "step": 452
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2558073311638604e-05,
+ "loss": 0.0,
+ "step": 453
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2527931324258975e-05,
+ "loss": 0.0,
+ "step": 454
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.249776478167227e-05,
+ "loss": 0.0,
+ "step": 455
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2467573976902936e-05,
+ "loss": 0.0,
+ "step": 456
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2437359203211109e-05,
+ "loss": 0.0,
+ "step": 457
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2407120754089733e-05,
+ "loss": 0.0,
+ "step": 458
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2376858923261732e-05,
+ "loss": 0.0,
+ "step": 459
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2346574004677154e-05,
+ "loss": 0.0,
+ "step": 460
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2316266292510305e-05,
+ "loss": 0.0,
+ "step": 461
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2285936081156897e-05,
+ "loss": 0.0,
+ "step": 462
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2255583665231196e-05,
+ "loss": 0.0,
+ "step": 463
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2225209339563144e-05,
+ "loss": 0.0,
+ "step": 464
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2194813399195518e-05,
+ "loss": 0.0,
+ "step": 465
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2164396139381029e-05,
+ "loss": 0.0,
+ "step": 466
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2133957855579501e-05,
+ "loss": 0.0,
+ "step": 467
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.210349884345496e-05,
+ "loss": 0.0,
+ "step": 468
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2073019398872778e-05,
+ "loss": 0.0,
+ "step": 469
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2042519817896805e-05,
+ "loss": 0.0,
+ "step": 470
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.2012000396786485e-05,
+ "loss": 0.0,
+ "step": 471
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1981461431993978e-05,
+ "loss": 0.0,
+ "step": 472
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1950903220161286e-05,
+ "loss": 0.0,
+ "step": 473
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1920326058117364e-05,
+ "loss": 0.0,
+ "step": 474
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1889730242875243e-05,
+ "loss": 0.0,
+ "step": 475
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1859116071629148e-05,
+ "loss": 0.0,
+ "step": 476
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1828483841751597e-05,
+ "loss": 0.0,
+ "step": 477
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1797833850790527e-05,
+ "loss": 0.0,
+ "step": 478
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1767166396466404e-05,
+ "loss": 0.0,
+ "step": 479
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1736481776669307e-05,
+ "loss": 0.0,
+ "step": 480
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1705780289456069e-05,
+ "loss": 0.0,
+ "step": 481
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1675062233047365e-05,
+ "loss": 0.0,
+ "step": 482
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1644327905824808e-05,
+ "loss": 0.0,
+ "step": 483
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1613577606328068e-05,
+ "loss": 0.0,
+ "step": 484
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1582811633251949e-05,
+ "loss": 0.0,
+ "step": 485
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1552030285443516e-05,
+ "loss": 0.0,
+ "step": 486
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1521233861899168e-05,
+ "loss": 0.0,
+ "step": 487
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1490422661761744e-05,
+ "loss": 0.0,
+ "step": 488
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1459596984317622e-05,
+ "loss": 0.0,
+ "step": 489
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1428757128993801e-05,
+ "loss": 0.0,
+ "step": 490
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1397903395354996e-05,
+ "loss": 0.0,
+ "step": 491
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1367036083100735e-05,
+ "loss": 0.0,
+ "step": 492
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1336155492062439e-05,
+ "loss": 0.0,
+ "step": 493
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.130526192220052e-05,
+ "loss": 0.0,
+ "step": 494
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1274355673601446e-05,
+ "loss": 0.0,
+ "step": 495
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1243437046474854e-05,
+ "loss": 0.0,
+ "step": 496
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1212506341150615e-05,
+ "loss": 0.0,
+ "step": 497
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.118156385807593e-05,
+ "loss": 0.0,
+ "step": 498
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1150609897812387e-05,
+ "loss": 0.0,
+ "step": 499
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1119644761033079e-05,
+ "loss": 0.0,
+ "step": 500
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1088668748519646e-05,
+ "loss": 0.0,
+ "step": 501
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.105768216115938e-05,
+ "loss": 0.0,
+ "step": 502
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.1026685299942286e-05,
+ "loss": 0.0,
+ "step": 503
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0995678465958168e-05,
+ "loss": 0.0,
+ "step": 504
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0964661960393703e-05,
+ "loss": 0.0,
+ "step": 505
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0933636084529507e-05,
+ "loss": 0.0,
+ "step": 506
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0902601139737225e-05,
+ "loss": 0.0,
+ "step": 507
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0871557427476585e-05,
+ "loss": 0.0,
+ "step": 508
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0840505249292477e-05,
+ "loss": 0.0,
+ "step": 509
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0809444906812034e-05,
+ "loss": 0.0,
+ "step": 510
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0778376701741688e-05,
+ "loss": 0.0,
+ "step": 511
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0747300935864245e-05,
+ "loss": 0.0,
+ "step": 512
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0716217911035952e-05,
+ "loss": 0.0,
+ "step": 513
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0685127929183567e-05,
+ "loss": 0.0,
+ "step": 514
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0654031292301432e-05,
+ "loss": 0.0,
+ "step": 515
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0622928302448523e-05,
+ "loss": 0.0,
+ "step": 516
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0591819261745528e-05,
+ "loss": 0.0,
+ "step": 517
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0560704472371919e-05,
+ "loss": 0.0,
+ "step": 518
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0529584236562995e-05,
+ "loss": 0.0,
+ "step": 519
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0498458856606972e-05,
+ "loss": 0.0,
+ "step": 520
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0467328634842024e-05,
+ "loss": 0.0,
+ "step": 521
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0436193873653362e-05,
+ "loss": 0.0,
+ "step": 522
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0405054875470287e-05,
+ "loss": 0.0,
+ "step": 523
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.037391194276326e-05,
+ "loss": 0.0,
+ "step": 524
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0342765378040953e-05,
+ "loss": 0.0,
+ "step": 525
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0311615483847333e-05,
+ "loss": 0.0,
+ "step": 526
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.028046256275869e-05,
+ "loss": 0.0,
+ "step": 527
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0249306917380731e-05,
+ "loss": 0.0,
+ "step": 528
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0218148850345613e-05,
+ "loss": 0.0,
+ "step": 529
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0186988664309023e-05,
+ "loss": 0.0,
+ "step": 530
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0155826661947232e-05,
+ "loss": 0.0,
+ "step": 531
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0124663145954152e-05,
+ "loss": 0.0,
+ "step": 532
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0093498419038394e-05,
+ "loss": 0.0,
+ "step": 533
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0062332783920337e-05,
+ "loss": 0.0,
+ "step": 534
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0031166543329179e-05,
+ "loss": 0.0,
+ "step": 535
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 536
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.968833456670824e-06,
+ "loss": 0.0,
+ "step": 537
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.937667216079665e-06,
+ "loss": 0.0,
+ "step": 538
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.90650158096161e-06,
+ "loss": 0.0,
+ "step": 539
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.87533685404585e-06,
+ "loss": 0.0,
+ "step": 540
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.844173338052771e-06,
+ "loss": 0.0,
+ "step": 541
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.81301133569098e-06,
+ "loss": 0.0,
+ "step": 542
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.78185114965439e-06,
+ "loss": 0.0,
+ "step": 543
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.750693082619274e-06,
+ "loss": 0.0,
+ "step": 544
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.719537437241311e-06,
+ "loss": 0.0,
+ "step": 545
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.68838451615267e-06,
+ "loss": 0.0,
+ "step": 546
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.65723462195905e-06,
+ "loss": 0.0,
+ "step": 547
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.626088057236745e-06,
+ "loss": 0.0,
+ "step": 548
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.594945124529718e-06,
+ "loss": 0.0,
+ "step": 549
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.563806126346643e-06,
+ "loss": 0.0,
+ "step": 550
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.532671365157979e-06,
+ "loss": 0.0,
+ "step": 551
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.501541143393028e-06,
+ "loss": 0.0,
+ "step": 552
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.470415763437003e-06,
+ "loss": 0.0,
+ "step": 553
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.439295527628083e-06,
+ "loss": 0.0,
+ "step": 554
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.408180738254472e-06,
+ "loss": 0.0,
+ "step": 555
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.377071697551479e-06,
+ "loss": 0.0,
+ "step": 556
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.34596870769857e-06,
+ "loss": 0.0,
+ "step": 557
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.314872070816435e-06,
+ "loss": 0.0,
+ "step": 558
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.28378208896405e-06,
+ "loss": 0.0,
+ "step": 559
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.252699064135759e-06,
+ "loss": 0.0,
+ "step": 560
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.221623298258315e-06,
+ "loss": 0.0,
+ "step": 561
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.190555093187968e-06,
+ "loss": 0.0,
+ "step": 562
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.159494750707527e-06,
+ "loss": 0.0,
+ "step": 563
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.128442572523418e-06,
+ "loss": 0.0,
+ "step": 564
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.097398860262777e-06,
+ "loss": 0.0,
+ "step": 565
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.066363915470494e-06,
+ "loss": 0.0,
+ "step": 566
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.0353380396063e-06,
+ "loss": 0.0,
+ "step": 567
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.004321534041836e-06,
+ "loss": 0.0,
+ "step": 568
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 8.973314700057717e-06,
+ "loss": 0.0,
+ "step": 569
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.942317838840625e-06,
+ "loss": 0.0,
+ "step": 570
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.911331251480357e-06,
+ "loss": 0.0,
+ "step": 571
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.880355238966923e-06,
+ "loss": 0.0,
+ "step": 572
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.849390102187615e-06,
+ "loss": 0.0,
+ "step": 573
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.818436141924072e-06,
+ "loss": 0.0,
+ "step": 574
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.787493658849387e-06,
+ "loss": 0.0,
+ "step": 575
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.756562953525151e-06,
+ "loss": 0.0,
+ "step": 576
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.72564432639856e-06,
+ "loss": 0.0,
+ "step": 577
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.694738077799487e-06,
+ "loss": 0.0,
+ "step": 578
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.663844507937563e-06,
+ "loss": 0.0,
+ "step": 579
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.632963916899268e-06,
+ "loss": 0.0,
+ "step": 580
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.602096604645009e-06,
+ "loss": 0.0,
+ "step": 581
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.571242871006202e-06,
+ "loss": 0.0,
+ "step": 582
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.540403015682382e-06,
+ "loss": 0.0,
+ "step": 583
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.509577338238255e-06,
+ "loss": 0.0,
+ "step": 584
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.478766138100834e-06,
+ "loss": 0.0,
+ "step": 585
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.447969714556484e-06,
+ "loss": 0.0,
+ "step": 586
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.417188366748051e-06,
+ "loss": 0.0,
+ "step": 587
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.386422393671934e-06,
+ "loss": 0.0,
+ "step": 588
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.355672094175192e-06,
+ "loss": 0.0,
+ "step": 589
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.324937766952638e-06,
+ "loss": 0.0,
+ "step": 590
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.294219710543931e-06,
+ "loss": 0.0,
+ "step": 591
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.263518223330698e-06,
+ "loss": 0.0,
+ "step": 592
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.232833603533601e-06,
+ "loss": 0.0,
+ "step": 593
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.202166149209475e-06,
+ "loss": 0.0,
+ "step": 594
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.171516158248406e-06,
+ "loss": 0.0,
+ "step": 595
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.140883928370855e-06,
+ "loss": 0.0,
+ "step": 596
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.11026975712476e-06,
+ "loss": 0.0,
+ "step": 597
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.079673941882639e-06,
+ "loss": 0.0,
+ "step": 598
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.04909677983872e-06,
+ "loss": 0.0,
+ "step": 599
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.018538568006027e-06,
+ "loss": 0.0,
+ "step": 600
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.987999603213518e-06,
+ "loss": 0.0,
+ "step": 601
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.957480182103198e-06,
+ "loss": 0.0,
+ "step": 602
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.926980601127225e-06,
+ "loss": 0.0,
+ "step": 603
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.896501156545044e-06,
+ "loss": 0.0,
+ "step": 604
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.866042144420502e-06,
+ "loss": 0.0,
+ "step": 605
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.835603860618973e-06,
+ "loss": 0.0,
+ "step": 606
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.805186600804489e-06,
+ "loss": 0.0,
+ "step": 607
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.774790660436857e-06,
+ "loss": 0.0,
+ "step": 608
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.744416334768809e-06,
+ "loss": 0.0,
+ "step": 609
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.714063918843106e-06,
+ "loss": 0.0,
+ "step": 610
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.6837337074897e-06,
+ "loss": 0.0,
+ "step": 611
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.653425995322852e-06,
+ "loss": 0.0,
+ "step": 612
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.623141076738271e-06,
+ "loss": 0.0,
+ "step": 613
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.592879245910273e-06,
+ "loss": 0.0,
+ "step": 614
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.562640796788893e-06,
+ "loss": 0.0,
+ "step": 615
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.532426023097063e-06,
+ "loss": 0.0,
+ "step": 616
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.50223521832773e-06,
+ "loss": 0.0,
+ "step": 617
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.472068675741024e-06,
+ "loss": 0.0,
+ "step": 618
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.4419266883614e-06,
+ "loss": 0.0,
+ "step": 619
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.411809548974792e-06,
+ "loss": 0.0,
+ "step": 620
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.38171755012578e-06,
+ "loss": 0.0,
+ "step": 621
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.3516509841147276e-06,
+ "loss": 0.0,
+ "step": 622
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.321610142994971e-06,
+ "loss": 0.0,
+ "step": 623
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.291595318569951e-06,
+ "loss": 0.0,
+ "step": 624
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2616068023904e-06,
+ "loss": 0.0,
+ "step": 625
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2316448857515076e-06,
+ "loss": 0.0,
+ "step": 626
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.201709859690081e-06,
+ "loss": 0.0,
+ "step": 627
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.171802014981726e-06,
+ "loss": 0.0,
+ "step": 628
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.141921642138025e-06,
+ "loss": 0.0,
+ "step": 629
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.112069031403704e-06,
+ "loss": 0.0,
+ "step": 630
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.082244472753823e-06,
+ "loss": 0.0,
+ "step": 631
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.052448255890958e-06,
+ "loss": 0.0,
+ "step": 632
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.022680670242387e-06,
+ "loss": 0.0,
+ "step": 633
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.992942004957271e-06,
+ "loss": 0.0,
+ "step": 634
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.963232548903853e-06,
+ "loss": 0.0,
+ "step": 635
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.933552590666659e-06,
+ "loss": 0.0,
+ "step": 636
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.903902418543671e-06,
+ "loss": 0.0,
+ "step": 637
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.874282320543557e-06,
+ "loss": 0.0,
+ "step": 638
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.844692584382848e-06,
+ "loss": 0.0,
+ "step": 639
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.815133497483157e-06,
+ "loss": 0.0,
+ "step": 640
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.785605346968387e-06,
+ "loss": 0.0,
+ "step": 641
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.7561084196619306e-06,
+ "loss": 0.0,
+ "step": 642
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.7266430020839e-06,
+ "loss": 0.0,
+ "step": 643
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.697209380448333e-06,
+ "loss": 0.0,
+ "step": 644
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.66780784066041e-06,
+ "loss": 0.0,
+ "step": 645
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.638438668313695e-06,
+ "loss": 0.0,
+ "step": 646
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.609102148687333e-06,
+ "loss": 0.0,
+ "step": 647
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.579798566743314e-06,
+ "loss": 0.0,
+ "step": 648
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.550528207123667e-06,
+ "loss": 0.0,
+ "step": 649
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.521291354147727e-06,
+ "loss": 0.0,
+ "step": 650
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.492088291809355e-06,
+ "loss": 0.0,
+ "step": 651
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.462919303774186e-06,
+ "loss": 0.0,
+ "step": 652
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.43378467337687e-06,
+ "loss": 0.0,
+ "step": 653
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.404684683618325e-06,
+ "loss": 0.0,
+ "step": 654
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.375619617162985e-06,
+ "loss": 0.0,
+ "step": 655
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.34658975633605e-06,
+ "loss": 0.0,
+ "step": 656
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.317595383120756e-06,
+ "loss": 0.0,
+ "step": 657
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.288636779155621e-06,
+ "loss": 0.0,
+ "step": 658
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.2597142257317185e-06,
+ "loss": 0.0,
+ "step": 659
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.230828003789949e-06,
+ "loss": 0.0,
+ "step": 660
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.201978393918291e-06,
+ "loss": 0.0,
+ "step": 661
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.173165676349103e-06,
+ "loss": 0.0,
+ "step": 662
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.144390130956384e-06,
+ "loss": 0.0,
+ "step": 663
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.115652037253054e-06,
+ "loss": 0.0,
+ "step": 664
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.086951674388252e-06,
+ "loss": 0.0,
+ "step": 665
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.058289321144608e-06,
+ "loss": 0.0,
+ "step": 666
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.02966525593555e-06,
+ "loss": 0.0,
+ "step": 667
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.001079756802592e-06,
+ "loss": 0.0,
+ "step": 668
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.97253310141263e-06,
+ "loss": 0.0,
+ "step": 669
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.944025567055251e-06,
+ "loss": 0.0,
+ "step": 670
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.91555743064004e-06,
+ "loss": 0.0,
+ "step": 671
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.887128968693887e-06,
+ "loss": 0.0,
+ "step": 672
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.858740457358298e-06,
+ "loss": 0.0,
+ "step": 673
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.830392172386723e-06,
+ "loss": 0.0,
+ "step": 674
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.802084389141862e-06,
+ "loss": 0.0,
+ "step": 675
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.773817382593008e-06,
+ "loss": 0.0,
+ "step": 676
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.745591427313365e-06,
+ "loss": 0.0,
+ "step": 677
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.717406797477371e-06,
+ "loss": 0.0,
+ "step": 678
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.689263766858072e-06,
+ "loss": 0.0,
+ "step": 679
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.66116260882442e-06,
+ "loss": 0.0,
+ "step": 680
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.633103596338631e-06,
+ "loss": 0.0,
+ "step": 681
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.6050870019535496e-06,
+ "loss": 0.0,
+ "step": 682
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.5771130978099896e-06,
+ "loss": 0.0,
+ "step": 683
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.549182155634076e-06,
+ "loss": 0.0,
+ "step": 684
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.521294446734637e-06,
+ "loss": 0.0,
+ "step": 685
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.493450242000546e-06,
+ "loss": 0.0,
+ "step": 686
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.465649811898098e-06,
+ "loss": 0.0,
+ "step": 687
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.43789342646837e-06,
+ "loss": 0.0,
+ "step": 688
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.410181355324622e-06,
+ "loss": 0.0,
+ "step": 689
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.382513867649663e-06,
+ "loss": 0.0,
+ "step": 690
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.354891232193225e-06,
+ "loss": 0.0,
+ "step": 691
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.32731371726938e-06,
+ "loss": 0.0,
+ "step": 692
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.299781590753916e-06,
+ "loss": 0.0,
+ "step": 693
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.2722951200817315e-06,
+ "loss": 0.0,
+ "step": 694
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.244854572244249e-06,
+ "loss": 0.0,
+ "step": 695
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.217460213786822e-06,
+ "loss": 0.0,
+ "step": 696
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.190112310806126e-06,
+ "loss": 0.0,
+ "step": 697
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.1628111289476025e-06,
+ "loss": 0.0,
+ "step": 698
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.135556933402862e-06,
+ "loss": 0.0,
+ "step": 699
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.108349988907111e-06,
+ "loss": 0.0,
+ "step": 700
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.081190559736569e-06,
+ "loss": 0.0,
+ "step": 701
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.054078909705926e-06,
+ "loss": 0.0,
+ "step": 702
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.027015302165768e-06,
+ "loss": 0.0,
+ "step": 703
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.000000000000003e-06,
+ "loss": 0.0,
+ "step": 704
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.973033265623333e-06,
+ "loss": 0.0,
+ "step": 705
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.946115360978696e-06,
+ "loss": 0.0,
+ "step": 706
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.919246547534709e-06,
+ "loss": 0.0,
+ "step": 707
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.892427086283147e-06,
+ "loss": 0.0,
+ "step": 708
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.865657237736397e-06,
+ "loss": 0.0,
+ "step": 709
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.838937261924933e-06,
+ "loss": 0.0,
+ "step": 710
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.812267418394784e-06,
+ "loss": 0.0,
+ "step": 711
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.78564796620502e-06,
+ "loss": 0.0,
+ "step": 712
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.759079163925223e-06,
+ "loss": 0.0,
+ "step": 713
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.732561269632992e-06,
+ "loss": 0.0,
+ "step": 714
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.706094540911429e-06,
+ "loss": 0.0,
+ "step": 715
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.679679234846636e-06,
+ "loss": 0.0,
+ "step": 716
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.6533156080252076e-06,
+ "loss": 0.0,
+ "step": 717
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.627003916531761e-06,
+ "loss": 0.0,
+ "step": 718
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.600744415946438e-06,
+ "loss": 0.0,
+ "step": 719
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.5745373613424075e-06,
+ "loss": 0.0,
+ "step": 720
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.548383007283412e-06,
+ "loss": 0.0,
+ "step": 721
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.522281607821288e-06,
+ "loss": 0.0,
+ "step": 722
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.496233416493481e-06,
+ "loss": 0.0,
+ "step": 723
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.470238686320606e-06,
+ "loss": 0.0,
+ "step": 724
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.444297669803981e-06,
+ "loss": 0.0,
+ "step": 725
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.418410618923163e-06,
+ "loss": 0.0,
+ "step": 726
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.392577785133521e-06,
+ "loss": 0.0,
+ "step": 727
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.3667994193637794e-06,
+ "loss": 0.0,
+ "step": 728
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.3410757720135886e-06,
+ "loss": 0.0,
+ "step": 729
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.315407092951078e-06,
+ "loss": 0.0,
+ "step": 730
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.289793631510449e-06,
+ "loss": 0.0,
+ "step": 731
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.264235636489542e-06,
+ "loss": 0.0,
+ "step": 732
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.238733356147414e-06,
+ "loss": 0.0,
+ "step": 733
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.213287038201943e-06,
+ "loss": 0.0,
+ "step": 734
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.187896929827414e-06,
+ "loss": 0.0,
+ "step": 735
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.162563277652104e-06,
+ "loss": 0.0,
+ "step": 736
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.137286327755913e-06,
+ "loss": 0.0,
+ "step": 737
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.112066325667954e-06,
+ "loss": 0.0,
+ "step": 738
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.086903516364179e-06,
+ "loss": 0.0,
+ "step": 739
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.061798144264986e-06,
+ "loss": 0.0,
+ "step": 740
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 4.03675045323286e-06,
+ "loss": 0.0,
+ "step": 741
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 4.0117606865699975e-06,
+ "loss": 0.0,
+ "step": 742
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.986829087015941e-06,
+ "loss": 0.0,
+ "step": 743
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.961955896745224e-06,
+ "loss": 0.0,
+ "step": 744
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.937141357365023e-06,
+ "loss": 0.0,
+ "step": 745
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.912385709912794e-06,
+ "loss": 0.0,
+ "step": 746
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.887689194853951e-06,
+ "loss": 0.0,
+ "step": 747
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.8630520520795275e-06,
+ "loss": 0.0,
+ "step": 748
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.838474520903825e-06,
+ "loss": 0.0,
+ "step": 749
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.8139568400621184e-06,
+ "loss": 0.0,
+ "step": 750
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.7894992477083226e-06,
+ "loss": 0.0,
+ "step": 751
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7651019814126656e-06,
+ "loss": 0.0,
+ "step": 752
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7407652781594094e-06,
+ "loss": 0.0,
+ "step": 753
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7164893743445274e-06,
+ "loss": 0.0,
+ "step": 754
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.692274505773419e-06,
+ "loss": 0.0,
+ "step": 755
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.6681209076586035e-06,
+ "loss": 0.0,
+ "step": 756
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.644028814617454e-06,
+ "loss": 0.0,
+ "step": 757
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.619998460669916e-06,
+ "loss": 0.0,
+ "step": 758
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5960300792362124e-06,
+ "loss": 0.0,
+ "step": 759
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5721239031346067e-06,
+ "loss": 0.0,
+ "step": 760
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5482801645791266e-06,
+ "loss": 0.0,
+ "step": 761
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.5244990951772972e-06,
+ "loss": 0.0,
+ "step": 762
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.5007809259279146e-06,
+ "loss": 0.0,
+ "step": 763
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.4771258872187917e-06,
+ "loss": 0.0,
+ "step": 764
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.453534208824507e-06,
+ "loss": 0.0,
+ "step": 765
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.4300061199041967e-06,
+ "loss": 0.0,
+ "step": 766
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.4065418489993118e-06,
+ "loss": 0.0,
+ "step": 767
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3831416240314085e-06,
+ "loss": 0.0,
+ "step": 768
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3598056722999185e-06,
+ "loss": 0.0,
+ "step": 769
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3365342204799613e-06,
+ "loss": 0.0,
+ "step": 770
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3133274946201333e-06,
+ "loss": 0.0,
+ "step": 771
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.290185720140301e-06,
+ "loss": 0.0,
+ "step": 772
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.267109121829428e-06,
+ "loss": 0.0,
+ "step": 773
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.2440979238433977e-06,
+ "loss": 0.0,
+ "step": 774
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.221152349702802e-06,
+ "loss": 0.0,
+ "step": 775
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.1982726222908046e-06,
+ "loss": 0.0,
+ "step": 776
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.1754589638509647e-06,
+ "loss": 0.0,
+ "step": 777
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.152711595985065e-06,
+ "loss": 0.0,
+ "step": 778
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.1300307396509833e-06,
+ "loss": 0.0,
+ "step": 779
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.10741661516053e-06,
+ "loss": 0.0,
+ "step": 780
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.0848694421773075e-06,
+ "loss": 0.0,
+ "step": 781
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.0623894397145837e-06,
+ "loss": 0.0,
+ "step": 782
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 3.0399768261331664e-06,
+ "loss": 0.0,
+ "step": 783
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 3.017631819139273e-06,
+ "loss": 0.0,
+ "step": 784
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.995354635782417e-06,
+ "loss": 0.0,
+ "step": 785
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.9731454924533086e-06,
+ "loss": 0.0,
+ "step": 786
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.95100460488175e-06,
+ "loss": 0.0,
+ "step": 787
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.9289321881345257e-06,
+ "loss": 0.0,
+ "step": 788
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.906928456613336e-06,
+ "loss": 0.0,
+ "step": 789
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.884993624052701e-06,
+ "loss": 0.0,
+ "step": 790
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.8631279035178796e-06,
+ "loss": 0.0,
+ "step": 791
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.8413315074028157e-06,
+ "loss": 0.0,
+ "step": 792
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.819604647428067e-06,
+ "loss": 0.0,
+ "step": 793
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7979475346387363e-06,
+ "loss": 0.0,
+ "step": 794
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.776360379402445e-06,
+ "loss": 0.0,
+ "step": 795
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7548433914072736e-06,
+ "loss": 0.0,
+ "step": 796
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7333967796597317e-06,
+ "loss": 0.0,
+ "step": 797
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.712020752482717e-06,
+ "loss": 0.0,
+ "step": 798
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.690715517513508e-06,
+ "loss": 0.0,
+ "step": 799
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.669481281701739e-06,
+ "loss": 0.0,
+ "step": 800
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 3.991516798003446e+17,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/training_args.bin b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ca4d892afdd453b26723a9aa94e432cb44cc953
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63da3a2d0bf1dde543b68e123590fcd7c42f45ec7eb68e86c6eadd439321f902
+size 6264
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-800/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..44e6d4e17930a42d0aa68dcd3790bd5f32ba4ec4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/config.json
@@ -0,0 +1,73 @@
+{
+ "_name_or_path": "../pretrained-models/llava-v1.6-mistral-7b",
+ "architectures": [
+ "LlavaLlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": false,
+ "freeze_mm_vision_resampler": false,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "pad",
+ "image_crop_resolution": 224,
+ "image_grid_pinpoints": [
+ [
+ 336,
+ 672
+ ],
+ [
+ 672,
+ 336
+ ],
+ [
+ 672,
+ 672
+ ],
+ [
+ 1008,
+ 336
+ ],
+ [
+ 336,
+ 1008
+ ]
+ ],
+ "image_split_resolution": 224,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "mm_hidden_size": 1024,
+ "mm_patch_merge_type": "flat",
+ "mm_projector_lr": null,
+ "mm_projector_type": "mlp2x_gelu",
+ "mm_resampler_type": null,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": false,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
+ "mm_vision_tower_lr": 2e-06,
+ "model_type": "llava_llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "tokenizer_model_max_length": 2048,
+ "tokenizer_padding_side": "right",
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.2",
+ "tune_mm_mlp_adapter": false,
+ "tune_mm_vision_resampler": false,
+ "unfreeze_mm_vision_tower": true,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vocab_size": 32000
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/generation_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..69b7806611a4865cd48c3e991dbd7d8312e0c5d3
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.2"
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/latest b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/latest
new file mode 100644
index 0000000000000000000000000000000000000000..4b10acccf3e8395339ff8799cea202bbc54d7f7d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/latest
@@ -0,0 +1 @@
+global_step900
\ No newline at end of file
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/model.safetensors.index.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..927da5be7e6e3ec29d3a967a09ba6a421d7a2191
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/model.safetensors.index.json
@@ -0,0 +1,694 @@
+{
+ "metadata": {
+ "total_size": 15132446720
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.image_newline": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.mm_projector.0.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.0.weight": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.bias": "model-00003-of-00004.safetensors",
+ "model.mm_projector.2.weight": "model-00003-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "model-00003-of-00004.safetensors",
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_0.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b346349ce12dd5a17d4b91ed2a5722bb52550950
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad8a35afd8967cbb748405387e44426e43ad127028e826eddc9b67d2ca873c85
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_1.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..68f3c6994456cb8d0592a5375d99503c8924b1c4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f338ce80d7c441076bfc8c53b84067a0181f5a14e80c13d5acb8150b659f4d73
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_2.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..be044f6ceeed587d30e80c2f72d5aa19fdc9947b
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9fbc9fa428939be10b46779f0eb5cd833e0da426b1cbdee77b3a55b6952235b
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_3.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fc825249656a9b858782542bd3f4386250f1dfe0
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac55dba0b79d5fa4699d239da2f966d52040d576d31234ac8d4632e6956481bc
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_4.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d30f52a44be563c152ae09db6ae934da6da0d3ed
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af2d0c015100768ffa23faf3b6c2d54ea89eb045603e30e55cd211e06ff34972
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_5.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c8715d27ab23ae545d58039cf949cc44ecc1da5e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c60a1b40608e34bc801c8231f97b81c53b5290dfaed1b9cd0ccbeca29574a991
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_6.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ed791b6ef76eadf0b0c55a5733411771e2ae027
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ad6a142a403eb9aafc4a3a9a856bca648fe31fd22d796867baca31fb13656aa
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_7.pth b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..800c3bbbc5edf7db01a8316069d439c5fb8d8c30
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38bc23a138cc800b22881742c0f3f9a71731a9a7111c6058a0077e6274d21773
+size 15984
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/scheduler.pt b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ce15623afb638ee28c575115527a0e4f70c730c4
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19d5eacdb1832860d506b1ab60e4ef78418bb865b82ed94d3b0294052717c4f2
+size 1064
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/special_tokens_map.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..14761dcf1466dc232bd41de9c21d4c617b15755e
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/tokenizer.model b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/tokenizer_config.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..23dcf70e8cfc9b16310b6ff3dc98fdbc5adc11f8
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 2048,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/trainer_state.json b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b52114ef0d3a1d5541d30d5d7e4dc5177ee7cc37
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/trainer_state.json
@@ -0,0 +1,5421 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.7307692307692308,
+ "eval_steps": 500,
+ "global_step": 900,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0,
+ "learning_rate": 6.25e-07,
+ "loss": 3.7473,
+ "step": 1
+ },
+ {
+ "epoch": 0.0,
+ "learning_rate": 1.25e-06,
+ "loss": 0.0,
+ "step": 2
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 1.8750000000000003e-06,
+ "loss": 0.0,
+ "step": 3
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 2.5e-06,
+ "loss": 0.0,
+ "step": 4
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.125e-06,
+ "loss": 0.0,
+ "step": 5
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 3.7500000000000005e-06,
+ "loss": 0.0,
+ "step": 6
+ },
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.3750000000000005e-06,
+ "loss": 0.0,
+ "step": 7
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5e-06,
+ "loss": 0.0,
+ "step": 8
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 5.625e-06,
+ "loss": 0.0,
+ "step": 9
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.25e-06,
+ "loss": 0.0,
+ "step": 10
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 6.875e-06,
+ "loss": 0.0,
+ "step": 11
+ },
+ {
+ "epoch": 0.02,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.0,
+ "step": 12
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.125000000000001e-06,
+ "loss": 0.0,
+ "step": 13
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 8.750000000000001e-06,
+ "loss": 0.0,
+ "step": 14
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 9.375000000000001e-06,
+ "loss": 0.0,
+ "step": 15
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 16
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.0625e-05,
+ "loss": 0.0,
+ "step": 17
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 1.125e-05,
+ "loss": 0.0,
+ "step": 18
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.1875e-05,
+ "loss": 0.0,
+ "step": 19
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.25e-05,
+ "loss": 0.0,
+ "step": 20
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.3125e-05,
+ "loss": 0.0,
+ "step": 21
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.375e-05,
+ "loss": 0.0,
+ "step": 22
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 1.4375e-05,
+ "loss": 0.0,
+ "step": 23
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 24
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.5625e-05,
+ "loss": 0.0,
+ "step": 25
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6250000000000002e-05,
+ "loss": 0.0,
+ "step": 26
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.6875e-05,
+ "loss": 0.0,
+ "step": 27
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 1.7500000000000002e-05,
+ "loss": 0.0,
+ "step": 28
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8125e-05,
+ "loss": 0.0,
+ "step": 29
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.8750000000000002e-05,
+ "loss": 0.0,
+ "step": 30
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9375e-05,
+ "loss": 0.0,
+ "step": 31
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 2e-05,
+ "loss": 0.0,
+ "step": 32
+ },
+ {
+ "epoch": 0.06,
+ "learning_rate": 1.9999951432210905e-05,
+ "loss": 0.0,
+ "step": 33
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9999805729315383e-05,
+ "loss": 0.0,
+ "step": 34
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999956289272873e-05,
+ "loss": 0.0,
+ "step": 35
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999922292480975e-05,
+ "loss": 0.0,
+ "step": 36
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998785828860744e-05,
+ "loss": 0.0,
+ "step": 37
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.9998251609127465e-05,
+ "loss": 0.0,
+ "step": 38
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 1.999762027079909e-05,
+ "loss": 0.0,
+ "step": 39
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996891820008165e-05,
+ "loss": 0.0,
+ "step": 40
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9996066263830533e-05,
+ "loss": 0.0,
+ "step": 41
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9995143610285275e-05,
+ "loss": 0.0,
+ "step": 42
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9994123868334655e-05,
+ "loss": 0.0,
+ "step": 43
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 1.9993007047883988e-05,
+ "loss": 0.0,
+ "step": 44
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999179315978157e-05,
+ "loss": 0.0,
+ "step": 45
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.999048221581858e-05,
+ "loss": 0.0,
+ "step": 46
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9989074228728942e-05,
+ "loss": 0.0,
+ "step": 47
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9987569212189224e-05,
+ "loss": 0.0,
+ "step": 48
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 1.9985967180818493e-05,
+ "loss": 0.0,
+ "step": 49
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998426815017817e-05,
+ "loss": 0.0,
+ "step": 50
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.998247213677188e-05,
+ "loss": 0.0,
+ "step": 51
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9980579158045322e-05,
+ "loss": 0.0,
+ "step": 52
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.9978589232386036e-05,
+ "loss": 0.0,
+ "step": 53
+ },
+ {
+ "epoch": 0.1,
+ "learning_rate": 1.997650237912329e-05,
+ "loss": 0.0,
+ "step": 54
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.997431861852785e-05,
+ "loss": 0.0,
+ "step": 55
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9972037971811802e-05,
+ "loss": 0.0,
+ "step": 56
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996966046112834e-05,
+ "loss": 0.0,
+ "step": 57
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.996718610957155e-05,
+ "loss": 0.0,
+ "step": 58
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 1.9964614941176194e-05,
+ "loss": 0.0,
+ "step": 59
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9961946980917457e-05,
+ "loss": 0.0,
+ "step": 60
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.995918225471073e-05,
+ "loss": 0.0,
+ "step": 61
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9956320789411338e-05,
+ "loss": 0.0,
+ "step": 62
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9953362612814294e-05,
+ "loss": 0.0,
+ "step": 63
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.9950307753654016e-05,
+ "loss": 0.0,
+ "step": 64
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 1.994715624160405e-05,
+ "loss": 0.0,
+ "step": 65
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.99439081072768e-05,
+ "loss": 0.0,
+ "step": 66
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9940563382223196e-05,
+ "loss": 0.0,
+ "step": 67
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9937122098932428e-05,
+ "loss": 0.0,
+ "step": 68
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9933584290831593e-05,
+ "loss": 0.0,
+ "step": 69
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 1.9929949992285397e-05,
+ "loss": 0.0,
+ "step": 70
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.992621923859581e-05,
+ "loss": 0.0,
+ "step": 71
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9922392066001724e-05,
+ "loss": 0.0,
+ "step": 72
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.99184685116786e-05,
+ "loss": 0.0,
+ "step": 73
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.9914448613738107e-05,
+ "loss": 0.0,
+ "step": 74
+ },
+ {
+ "epoch": 0.14,
+ "learning_rate": 1.991033241122776e-05,
+ "loss": 0.0,
+ "step": 75
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9906119944130527e-05,
+ "loss": 0.0,
+ "step": 76
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9901811253364458e-05,
+ "loss": 0.0,
+ "step": 77
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9897406380782262e-05,
+ "loss": 0.0,
+ "step": 78
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.989290536917093e-05,
+ "loss": 0.0,
+ "step": 79
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 1.9888308262251286e-05,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.988361510467761e-05,
+ "loss": 0.0,
+ "step": 81
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9878825942037147e-05,
+ "loss": 0.0,
+ "step": 82
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9873940820849714e-05,
+ "loss": 0.0,
+ "step": 83
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9868959788567213e-05,
+ "loss": 0.0,
+ "step": 84
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 1.9863882893573188e-05,
+ "loss": 0.0,
+ "step": 85
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.985871018518236e-05,
+ "loss": 0.0,
+ "step": 86
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9853441713640123e-05,
+ "loss": 0.0,
+ "step": 87
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9848077530122083e-05,
+ "loss": 0.0,
+ "step": 88
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9842617686733546e-05,
+ "loss": 0.0,
+ "step": 89
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.9837062236509013e-05,
+ "loss": 0.0,
+ "step": 90
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 1.983141123341168e-05,
+ "loss": 0.0,
+ "step": 91
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9825664732332886e-05,
+ "loss": 0.0,
+ "step": 92
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9819822789091597e-05,
+ "loss": 0.0,
+ "step": 93
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.981388546043388e-05,
+ "loss": 0.0,
+ "step": 94
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9807852804032306e-05,
+ "loss": 0.0,
+ "step": 95
+ },
+ {
+ "epoch": 0.18,
+ "learning_rate": 1.9801724878485438e-05,
+ "loss": 0.0,
+ "step": 96
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.979550174331724e-05,
+ "loss": 0.0,
+ "step": 97
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9789183458976485e-05,
+ "loss": 0.0,
+ "step": 98
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.97827700868362e-05,
+ "loss": 0.0,
+ "step": 99
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.977626168919305e-05,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 1.9769658329266718e-05,
+ "loss": 0.0,
+ "step": 101
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9762960071199334e-05,
+ "loss": 0.0,
+ "step": 102
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9756166980054812e-05,
+ "loss": 0.0,
+ "step": 103
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9749279121818235e-05,
+ "loss": 0.0,
+ "step": 104
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9742296563395218e-05,
+ "loss": 0.0,
+ "step": 105
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 1.9735219372611232e-05,
+ "loss": 0.0,
+ "step": 106
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9728047618210995e-05,
+ "loss": 0.0,
+ "step": 107
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9720781369857747e-05,
+ "loss": 0.0,
+ "step": 108
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9713420698132614e-05,
+ "loss": 0.0,
+ "step": 109
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.970596567453391e-05,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.21,
+ "learning_rate": 1.9698416371476434e-05,
+ "loss": 0.0,
+ "step": 111
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.969077286229078e-05,
+ "loss": 0.0,
+ "step": 112
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9683035221222617e-05,
+ "loss": 0.0,
+ "step": 113
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9675203523431964e-05,
+ "loss": 0.0,
+ "step": 114
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9667277844992476e-05,
+ "loss": 0.0,
+ "step": 115
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 1.9659258262890683e-05,
+ "loss": 0.0,
+ "step": 116
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9651144855025265e-05,
+ "loss": 0.0,
+ "step": 117
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.964293770020628e-05,
+ "loss": 0.0,
+ "step": 118
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9634636878154393e-05,
+ "loss": 0.0,
+ "step": 119
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.962624246950012e-05,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9617754555783045e-05,
+ "loss": 0.0,
+ "step": 121
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 1.9609173219450998e-05,
+ "loss": 0.0,
+ "step": 122
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.960049854385929e-05,
+ "loss": 0.0,
+ "step": 123
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9591730613269878e-05,
+ "loss": 0.0,
+ "step": 124
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.9582869512850576e-05,
+ "loss": 0.0,
+ "step": 125
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.957391532867418e-05,
+ "loss": 0.0,
+ "step": 126
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 1.956486814771769e-05,
+ "loss": 0.0,
+ "step": 127
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.955572805786141e-05,
+ "loss": 0.0,
+ "step": 128
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9546495147888134e-05,
+ "loss": 0.0,
+ "step": 129
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.953716950748227e-05,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9527751227228964e-05,
+ "loss": 0.0,
+ "step": 131
+ },
+ {
+ "epoch": 0.25,
+ "learning_rate": 1.9518240398613226e-05,
+ "loss": 0.0,
+ "step": 132
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9508637114019037e-05,
+ "loss": 0.0,
+ "step": 133
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9498941466728462e-05,
+ "loss": 0.0,
+ "step": 134
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.9489153550920726e-05,
+ "loss": 0.0,
+ "step": 135
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.947927346167132e-05,
+ "loss": 0.0,
+ "step": 136
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 1.946930129495106e-05,
+ "loss": 0.0,
+ "step": 137
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.945923714762516e-05,
+ "loss": 0.0,
+ "step": 138
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9449081117452304e-05,
+ "loss": 0.0,
+ "step": 139
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9438833303083677e-05,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9428493804062013e-05,
+ "loss": 0.0,
+ "step": 141
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 1.9418062720820636e-05,
+ "loss": 0.0,
+ "step": 142
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9407540154682473e-05,
+ "loss": 0.0,
+ "step": 143
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9396926207859085e-05,
+ "loss": 0.0,
+ "step": 144
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9386220983449652e-05,
+ "loss": 0.0,
+ "step": 145
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9375424585439994e-05,
+ "loss": 0.0,
+ "step": 146
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.9364537118701542e-05,
+ "loss": 0.0,
+ "step": 147
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 1.935355868899034e-05,
+ "loss": 0.0,
+ "step": 148
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9342489402945997e-05,
+ "loss": 0.0,
+ "step": 149
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9331329368090664e-05,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.932007869282799e-05,
+ "loss": 0.0,
+ "step": 151
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9308737486442045e-05,
+ "loss": 0.0,
+ "step": 152
+ },
+ {
+ "epoch": 0.29,
+ "learning_rate": 1.9297305859096305e-05,
+ "loss": 0.0,
+ "step": 153
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9285783921832537e-05,
+ "loss": 0.0,
+ "step": 154
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.927417178656975e-05,
+ "loss": 0.0,
+ "step": 155
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.926246956610309e-05,
+ "loss": 0.0,
+ "step": 156
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9250677374102752e-05,
+ "loss": 0.0,
+ "step": 157
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 1.9238795325112867e-05,
+ "loss": 0.0,
+ "step": 158
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9226823534550418e-05,
+ "loss": 0.0,
+ "step": 159
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.921476211870408e-05,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9202611194733107e-05,
+ "loss": 0.0,
+ "step": 161
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9190370880666206e-05,
+ "loss": 0.0,
+ "step": 162
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 1.9178041295400383e-05,
+ "loss": 0.0,
+ "step": 163
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9165622558699763e-05,
+ "loss": 0.0,
+ "step": 164
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9153114791194475e-05,
+ "loss": 0.0,
+ "step": 165
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9140518114379433e-05,
+ "loss": 0.0,
+ "step": 166
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.912783265061319e-05,
+ "loss": 0.0,
+ "step": 167
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 1.9115058523116734e-05,
+ "loss": 0.0,
+ "step": 168
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9102195855972287e-05,
+ "loss": 0.0,
+ "step": 169
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.908924477412211e-05,
+ "loss": 0.0,
+ "step": 170
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9076205403367287e-05,
+ "loss": 0.0,
+ "step": 171
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.9063077870366504e-05,
+ "loss": 0.0,
+ "step": 172
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.90498623026348e-05,
+ "loss": 0.0,
+ "step": 173
+ },
+ {
+ "epoch": 0.33,
+ "learning_rate": 1.903655882854237e-05,
+ "loss": 0.0,
+ "step": 174
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.9023167577313267e-05,
+ "loss": 0.0,
+ "step": 175
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.900968867902419e-05,
+ "loss": 0.0,
+ "step": 176
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8996122264603202e-05,
+ "loss": 0.0,
+ "step": 177
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.898246846582844e-05,
+ "loss": 0.0,
+ "step": 178
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 1.8968727415326885e-05,
+ "loss": 0.0,
+ "step": 179
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.895489924657301e-05,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.894098409388754e-05,
+ "loss": 0.0,
+ "step": 181
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8926982092436117e-05,
+ "loss": 0.0,
+ "step": 182
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8912893378227984e-05,
+ "loss": 0.0,
+ "step": 183
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.8898718088114688e-05,
+ "loss": 0.0,
+ "step": 184
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8884456359788725e-05,
+ "loss": 0.0,
+ "step": 185
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.887010833178222e-05,
+ "loss": 0.0,
+ "step": 186
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8855674143465567e-05,
+ "loss": 0.0,
+ "step": 187
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8841153935046098e-05,
+ "loss": 0.0,
+ "step": 188
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 1.8826547847566692e-05,
+ "loss": 0.0,
+ "step": 189
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8811856022904423e-05,
+ "loss": 0.0,
+ "step": 190
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8797078603769184e-05,
+ "loss": 0.0,
+ "step": 191
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8782215733702286e-05,
+ "loss": 0.0,
+ "step": 192
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.876726755707508e-05,
+ "loss": 0.0,
+ "step": 193
+ },
+ {
+ "epoch": 0.37,
+ "learning_rate": 1.8752234219087538e-05,
+ "loss": 0.0,
+ "step": 194
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8737115865766865e-05,
+ "loss": 0.0,
+ "step": 195
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8721912643966055e-05,
+ "loss": 0.0,
+ "step": 196
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8706624701362485e-05,
+ "loss": 0.0,
+ "step": 197
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8691252186456465e-05,
+ "loss": 0.0,
+ "step": 198
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.8675795248569816e-05,
+ "loss": 0.0,
+ "step": 199
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 1.866025403784439e-05,
+ "loss": 0.0,
+ "step": 200
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8644628705240636e-05,
+ "loss": 0.0,
+ "step": 201
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.862891940253613e-05,
+ "loss": 0.0,
+ "step": 202
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8613126282324092e-05,
+ "loss": 0.0,
+ "step": 203
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.8597249498011906e-05,
+ "loss": 0.0,
+ "step": 204
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 1.858128920381963e-05,
+ "loss": 0.0,
+ "step": 205
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8565245554778516e-05,
+ "loss": 0.0,
+ "step": 206
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.854911870672947e-05,
+ "loss": 0.0,
+ "step": 207
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8532908816321557e-05,
+ "loss": 0.0,
+ "step": 208
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8516616041010495e-05,
+ "loss": 0.0,
+ "step": 209
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 1.8500240539057093e-05,
+ "loss": 0.0,
+ "step": 210
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.848378246952574e-05,
+ "loss": 0.0,
+ "step": 211
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8467241992282842e-05,
+ "loss": 0.0,
+ "step": 212
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.8450619267995283e-05,
+ "loss": 0.0,
+ "step": 213
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.843391445812886e-05,
+ "loss": 0.0,
+ "step": 214
+ },
+ {
+ "epoch": 0.41,
+ "learning_rate": 1.84171277249467e-05,
+ "loss": 0.0,
+ "step": 215
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8400259231507716e-05,
+ "loss": 0.0,
+ "step": 216
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8383309141664992e-05,
+ "loss": 0.0,
+ "step": 217
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.83662776200642e-05,
+ "loss": 0.0,
+ "step": 218
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8349164832142015e-05,
+ "loss": 0.0,
+ "step": 219
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.833197094412449e-05,
+ "loss": 0.0,
+ "step": 220
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 1.8314696123025456e-05,
+ "loss": 0.0,
+ "step": 221
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8297340536644877e-05,
+ "loss": 0.0,
+ "step": 222
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.827990435356725e-05,
+ "loss": 0.0,
+ "step": 223
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.826238774315995e-05,
+ "loss": 0.0,
+ "step": 224
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8244790875571582e-05,
+ "loss": 0.0,
+ "step": 225
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 1.8227113921730336e-05,
+ "loss": 0.0,
+ "step": 226
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8209357053342325e-05,
+ "loss": 0.0,
+ "step": 227
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.819152044288992e-05,
+ "loss": 0.0,
+ "step": 228
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8173604263630066e-05,
+ "loss": 0.0,
+ "step": 229
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8155608689592604e-05,
+ "loss": 0.0,
+ "step": 230
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 1.8137533895578585e-05,
+ "loss": 0.0,
+ "step": 231
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.811938005715857e-05,
+ "loss": 0.0,
+ "step": 232
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8101147350670905e-05,
+ "loss": 0.0,
+ "step": 233
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8082835953220055e-05,
+ "loss": 0.0,
+ "step": 234
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.806444604267483e-05,
+ "loss": 0.0,
+ "step": 235
+ },
+ {
+ "epoch": 0.45,
+ "learning_rate": 1.8045977797666685e-05,
+ "loss": 0.0,
+ "step": 236
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8027431397587993e-05,
+ "loss": 0.0,
+ "step": 237
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.8008807022590283e-05,
+ "loss": 0.0,
+ "step": 238
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7990104853582494e-05,
+ "loss": 0.0,
+ "step": 239
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7971325072229227e-05,
+ "loss": 0.0,
+ "step": 240
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 1.7952467860948975e-05,
+ "loss": 0.0,
+ "step": 241
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7933533402912354e-05,
+ "loss": 0.0,
+ "step": 242
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.791452188204031e-05,
+ "loss": 0.0,
+ "step": 243
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7895433483002356e-05,
+ "loss": 0.0,
+ "step": 244
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7876268391214756e-05,
+ "loss": 0.0,
+ "step": 245
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.785702679283874e-05,
+ "loss": 0.0,
+ "step": 246
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 1.7837708874778683e-05,
+ "loss": 0.0,
+ "step": 247
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.78183148246803e-05,
+ "loss": 0.0,
+ "step": 248
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7798844830928818e-05,
+ "loss": 0.0,
+ "step": 249
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.777929908264715e-05,
+ "loss": 0.0,
+ "step": 250
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.775967776969405e-05,
+ "loss": 0.0,
+ "step": 251
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 1.7739981082662275e-05,
+ "loss": 0.0,
+ "step": 252
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.772020921287674e-05,
+ "loss": 0.0,
+ "step": 253
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7700362352392632e-05,
+ "loss": 0.0,
+ "step": 254
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7680440693993586e-05,
+ "loss": 0.0,
+ "step": 255
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.766044443118978e-05,
+ "loss": 0.0,
+ "step": 256
+ },
+ {
+ "epoch": 0.49,
+ "learning_rate": 1.7640373758216075e-05,
+ "loss": 0.0,
+ "step": 257
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.762022887003011e-05,
+ "loss": 0.0,
+ "step": 258
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7600009962310417e-05,
+ "loss": 0.0,
+ "step": 259
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.757971723145453e-05,
+ "loss": 0.0,
+ "step": 260
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.7559350874577066e-05,
+ "loss": 0.0,
+ "step": 261
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 1.75389110895078e-05,
+ "loss": 0.0,
+ "step": 262
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7518398074789776e-05,
+ "loss": 0.0,
+ "step": 263
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7497812029677344e-05,
+ "loss": 0.0,
+ "step": 264
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7477153154134244e-05,
+ "loss": 0.0,
+ "step": 265
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.7456421648831658e-05,
+ "loss": 0.0,
+ "step": 266
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 1.743561771514626e-05,
+ "loss": 0.0,
+ "step": 267
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.741474155515827e-05,
+ "loss": 0.0,
+ "step": 268
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.739379337164946e-05,
+ "loss": 0.0,
+ "step": 269
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.737277336810124e-05,
+ "loss": 0.0,
+ "step": 270
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7351681748692622e-05,
+ "loss": 0.0,
+ "step": 271
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.7330518718298263e-05,
+ "loss": 0.0,
+ "step": 272
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7309284482486494e-05,
+ "loss": 0.0,
+ "step": 273
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7287979247517285e-05,
+ "loss": 0.0,
+ "step": 274
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7266603220340273e-05,
+ "loss": 0.0,
+ "step": 275
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7245156608592727e-05,
+ "loss": 0.0,
+ "step": 276
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7223639620597556e-05,
+ "loss": 0.0,
+ "step": 277
+ },
+ {
+ "epoch": 0.53,
+ "learning_rate": 1.7202052465361268e-05,
+ "loss": 0.0,
+ "step": 278
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.718039535257194e-05,
+ "loss": 0.0,
+ "step": 279
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7158668492597186e-05,
+ "loss": 0.0,
+ "step": 280
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7136872096482123e-05,
+ "loss": 0.0,
+ "step": 281
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7115006375947304e-05,
+ "loss": 0.0,
+ "step": 282
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 1.7093071543386667e-05,
+ "loss": 0.0,
+ "step": 283
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7071067811865477e-05,
+ "loss": 0.0,
+ "step": 284
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7048995395118253e-05,
+ "loss": 0.0,
+ "step": 285
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7026854507546694e-05,
+ "loss": 0.0,
+ "step": 286
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.7004645364217584e-05,
+ "loss": 0.0,
+ "step": 287
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 1.698236818086073e-05,
+ "loss": 0.0,
+ "step": 288
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6960023173866834e-05,
+ "loss": 0.0,
+ "step": 289
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.693761056028542e-05,
+ "loss": 0.0,
+ "step": 290
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.6915130557822698e-05,
+ "loss": 0.0,
+ "step": 291
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.689258338483947e-05,
+ "loss": 0.0,
+ "step": 292
+ },
+ {
+ "epoch": 0.56,
+ "learning_rate": 1.686996926034902e-05,
+ "loss": 0.0,
+ "step": 293
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6847288404014937e-05,
+ "loss": 0.0,
+ "step": 294
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.682454103614904e-05,
+ "loss": 0.0,
+ "step": 295
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6801727377709195e-05,
+ "loss": 0.0,
+ "step": 296
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.67788476502972e-05,
+ "loss": 0.0,
+ "step": 297
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6755902076156606e-05,
+ "loss": 0.0,
+ "step": 298
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 1.6732890878170573e-05,
+ "loss": 0.0,
+ "step": 299
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.67098142798597e-05,
+ "loss": 0.0,
+ "step": 300
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.668667250537987e-05,
+ "loss": 0.0,
+ "step": 301
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6663465779520042e-05,
+ "loss": 0.0,
+ "step": 302
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6640194327700087e-05,
+ "loss": 0.0,
+ "step": 303
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 1.6616858375968596e-05,
+ "loss": 0.0,
+ "step": 304
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.659345815100069e-05,
+ "loss": 0.0,
+ "step": 305
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6569993880095807e-05,
+ "loss": 0.0,
+ "step": 306
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6546465791175498e-05,
+ "loss": 0.0,
+ "step": 307
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6522874112781213e-05,
+ "loss": 0.0,
+ "step": 308
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 1.6499219074072087e-05,
+ "loss": 0.0,
+ "step": 309
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6475500904822707e-05,
+ "loss": 0.0,
+ "step": 310
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.645171983542088e-05,
+ "loss": 0.0,
+ "step": 311
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6427876096865394e-05,
+ "loss": 0.0,
+ "step": 312
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.640396992076379e-05,
+ "loss": 0.0,
+ "step": 313
+ },
+ {
+ "epoch": 0.6,
+ "learning_rate": 1.6380001539330088e-05,
+ "loss": 0.0,
+ "step": 314
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6355971185382547e-05,
+ "loss": 0.0,
+ "step": 315
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6331879092341402e-05,
+ "loss": 0.0,
+ "step": 316
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6307725494226586e-05,
+ "loss": 0.0,
+ "step": 317
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6283510625655474e-05,
+ "loss": 0.0,
+ "step": 318
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 1.6259234721840595e-05,
+ "loss": 0.0,
+ "step": 319
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6234898018587336e-05,
+ "loss": 0.0,
+ "step": 320
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6210500752291682e-05,
+ "loss": 0.0,
+ "step": 321
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6186043159937884e-05,
+ "loss": 0.0,
+ "step": 322
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.616152547909618e-05,
+ "loss": 0.0,
+ "step": 323
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.6136947947920477e-05,
+ "loss": 0.0,
+ "step": 324
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 1.611231080514605e-05,
+ "loss": 0.0,
+ "step": 325
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.608761429008721e-05,
+ "loss": 0.0,
+ "step": 326
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.606285864263498e-05,
+ "loss": 0.0,
+ "step": 327
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.6038044103254775e-05,
+ "loss": 0.0,
+ "step": 328
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.601317091298406e-05,
+ "loss": 0.0,
+ "step": 329
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 1.5988239313430004e-05,
+ "loss": 0.0,
+ "step": 330
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5963249546767144e-05,
+ "loss": 0.0,
+ "step": 331
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5938201855735017e-05,
+ "loss": 0.0,
+ "step": 332
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5913096483635827e-05,
+ "loss": 0.0,
+ "step": 333
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5887933674332048e-05,
+ "loss": 0.0,
+ "step": 334
+ },
+ {
+ "epoch": 0.64,
+ "learning_rate": 1.5862713672244092e-05,
+ "loss": 0.0,
+ "step": 335
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5837436722347902e-05,
+ "loss": 0.0,
+ "step": 336
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5812103070172592e-05,
+ "loss": 0.0,
+ "step": 337
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.578671296179806e-05,
+ "loss": 0.0,
+ "step": 338
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.5761266643852587e-05,
+ "loss": 0.0,
+ "step": 339
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 1.573576436351046e-05,
+ "loss": 0.0,
+ "step": 340
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5710206368489555e-05,
+ "loss": 0.0,
+ "step": 341
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5684592907048925e-05,
+ "loss": 0.0,
+ "step": 342
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.5658924227986415e-05,
+ "loss": 0.0,
+ "step": 343
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.563320058063622e-05,
+ "loss": 0.0,
+ "step": 344
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 1.560742221486648e-05,
+ "loss": 0.0,
+ "step": 345
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5581589381076843e-05,
+ "loss": 0.0,
+ "step": 346
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5555702330196024e-05,
+ "loss": 0.0,
+ "step": 347
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5529761313679396e-05,
+ "loss": 0.0,
+ "step": 348
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5503766583506522e-05,
+ "loss": 0.0,
+ "step": 349
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 1.5477718392178716e-05,
+ "loss": 0.0,
+ "step": 350
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.545161699271659e-05,
+ "loss": 0.0,
+ "step": 351
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5425462638657597e-05,
+ "loss": 0.0,
+ "step": 352
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5399255584053568e-05,
+ "loss": 0.0,
+ "step": 353
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5372996083468242e-05,
+ "loss": 0.0,
+ "step": 354
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5346684391974792e-05,
+ "loss": 0.0,
+ "step": 355
+ },
+ {
+ "epoch": 0.68,
+ "learning_rate": 1.5320320765153367e-05,
+ "loss": 0.0,
+ "step": 356
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.529390545908857e-05,
+ "loss": 0.0,
+ "step": 357
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.526743873036701e-05,
+ "loss": 0.0,
+ "step": 358
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5240920836074777e-05,
+ "loss": 0.0,
+ "step": 359
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5214352033794981e-05,
+ "loss": 0.0,
+ "step": 360
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 1.5187732581605217e-05,
+ "loss": 0.0,
+ "step": 361
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5161062738075068e-05,
+ "loss": 0.0,
+ "step": 362
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5134342762263606e-05,
+ "loss": 0.0,
+ "step": 363
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5107572913716859e-05,
+ "loss": 0.0,
+ "step": 364
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.5080753452465296e-05,
+ "loss": 0.0,
+ "step": 365
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 1.505388463902131e-05,
+ "loss": 0.0,
+ "step": 366
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.502696673437667e-05,
+ "loss": 0.0,
+ "step": 367
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.5000000000000002e-05,
+ "loss": 0.0,
+ "step": 368
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4972984697834238e-05,
+ "loss": 0.0,
+ "step": 369
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4945921090294076e-05,
+ "loss": 0.0,
+ "step": 370
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 1.4918809440263435e-05,
+ "loss": 0.0,
+ "step": 371
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4891650011092896e-05,
+ "loss": 0.0,
+ "step": 372
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.486444306659714e-05,
+ "loss": 0.0,
+ "step": 373
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4837188871052399e-05,
+ "loss": 0.0,
+ "step": 374
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4809887689193878e-05,
+ "loss": 0.0,
+ "step": 375
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4782539786213184e-05,
+ "loss": 0.0,
+ "step": 376
+ },
+ {
+ "epoch": 0.72,
+ "learning_rate": 1.4755145427755755e-05,
+ "loss": 0.0,
+ "step": 377
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4727704879918272e-05,
+ "loss": 0.0,
+ "step": 378
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4700218409246087e-05,
+ "loss": 0.0,
+ "step": 379
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4672686282730622e-05,
+ "loss": 0.0,
+ "step": 380
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4645108767806778e-05,
+ "loss": 0.0,
+ "step": 381
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 1.4617486132350343e-05,
+ "loss": 0.0,
+ "step": 382
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4589818644675378e-05,
+ "loss": 0.0,
+ "step": 383
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4562106573531632e-05,
+ "loss": 0.0,
+ "step": 384
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4534350188101905e-05,
+ "loss": 0.0,
+ "step": 385
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4506549757999456e-05,
+ "loss": 0.0,
+ "step": 386
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 1.4478705553265363e-05,
+ "loss": 0.0,
+ "step": 387
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4450817844365924e-05,
+ "loss": 0.0,
+ "step": 388
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4422886902190014e-05,
+ "loss": 0.0,
+ "step": 389
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4394912998046451e-05,
+ "loss": 0.0,
+ "step": 390
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.436689640366137e-05,
+ "loss": 0.0,
+ "step": 391
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 1.4338837391175582e-05,
+ "loss": 0.0,
+ "step": 392
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4310736233141926e-05,
+ "loss": 0.0,
+ "step": 393
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4282593202522627e-05,
+ "loss": 0.0,
+ "step": 394
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4254408572686642e-05,
+ "loss": 0.0,
+ "step": 395
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4226182617406996e-05,
+ "loss": 0.0,
+ "step": 396
+ },
+ {
+ "epoch": 0.76,
+ "learning_rate": 1.4197915610858143e-05,
+ "loss": 0.0,
+ "step": 397
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4169607827613284e-05,
+ "loss": 0.0,
+ "step": 398
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4141259542641706e-05,
+ "loss": 0.0,
+ "step": 399
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4112871031306118e-05,
+ "loss": 0.0,
+ "step": 400
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4084442569359964e-05,
+ "loss": 0.0,
+ "step": 401
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 1.4055974432944753e-05,
+ "loss": 0.0,
+ "step": 402
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.4027466898587375e-05,
+ "loss": 0.0,
+ "step": 403
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3998920243197408e-05,
+ "loss": 0.0,
+ "step": 404
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3970334744064451e-05,
+ "loss": 0.0,
+ "step": 405
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3941710678855396e-05,
+ "loss": 0.0,
+ "step": 406
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.391304832561175e-05,
+ "loss": 0.0,
+ "step": 407
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 1.3884347962746949e-05,
+ "loss": 0.0,
+ "step": 408
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3855609869043618e-05,
+ "loss": 0.0,
+ "step": 409
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3826834323650899e-05,
+ "loss": 0.0,
+ "step": 410
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3798021606081713e-05,
+ "loss": 0.0,
+ "step": 411
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3769171996210053e-05,
+ "loss": 0.0,
+ "step": 412
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 1.3740285774268282e-05,
+ "loss": 0.0,
+ "step": 413
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.371136322084438e-05,
+ "loss": 0.0,
+ "step": 414
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3682404616879246e-05,
+ "loss": 0.0,
+ "step": 415
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3653410243663953e-05,
+ "loss": 0.0,
+ "step": 416
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3624380382837017e-05,
+ "loss": 0.0,
+ "step": 417
+ },
+ {
+ "epoch": 0.8,
+ "learning_rate": 1.3595315316381676e-05,
+ "loss": 0.0,
+ "step": 418
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3566215326623131e-05,
+ "loss": 0.0,
+ "step": 419
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3537080696225815e-05,
+ "loss": 0.0,
+ "step": 420
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3507911708190646e-05,
+ "loss": 0.0,
+ "step": 421
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3478708645852272e-05,
+ "loss": 0.0,
+ "step": 422
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 1.3449471792876333e-05,
+ "loss": 0.0,
+ "step": 423
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.342020143325669e-05,
+ "loss": 0.0,
+ "step": 424
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3390897851312667e-05,
+ "loss": 0.0,
+ "step": 425
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.336156133168631e-05,
+ "loss": 0.0,
+ "step": 426
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3332192159339595e-05,
+ "loss": 0.0,
+ "step": 427
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3302790619551673e-05,
+ "loss": 0.0,
+ "step": 428
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 1.3273356997916106e-05,
+ "loss": 0.0,
+ "step": 429
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3243891580338074e-05,
+ "loss": 0.0,
+ "step": 430
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3214394653031616e-05,
+ "loss": 0.0,
+ "step": 431
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3184866502516846e-05,
+ "loss": 0.0,
+ "step": 432
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3155307415617156e-05,
+ "loss": 0.0,
+ "step": 433
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 1.3125717679456447e-05,
+ "loss": 0.0,
+ "step": 434
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.309609758145633e-05,
+ "loss": 0.0,
+ "step": 435
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3066447409333345e-05,
+ "loss": 0.0,
+ "step": 436
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.3036767451096148e-05,
+ "loss": 0.0,
+ "step": 437
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.300705799504273e-05,
+ "loss": 0.0,
+ "step": 438
+ },
+ {
+ "epoch": 0.84,
+ "learning_rate": 1.2977319329757616e-05,
+ "loss": 0.0,
+ "step": 439
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2947551744109044e-05,
+ "loss": 0.0,
+ "step": 440
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2917755527246179e-05,
+ "loss": 0.0,
+ "step": 441
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.28879309685963e-05,
+ "loss": 0.0,
+ "step": 442
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2858078357861979e-05,
+ "loss": 0.0,
+ "step": 443
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 1.2828197985018276e-05,
+ "loss": 0.0,
+ "step": 444
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2798290140309924e-05,
+ "loss": 0.0,
+ "step": 445
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2768355114248493e-05,
+ "loss": 0.0,
+ "step": 446
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2738393197609602e-05,
+ "loss": 0.0,
+ "step": 447
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2708404681430054e-05,
+ "loss": 0.0,
+ "step": 448
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 1.2678389857005033e-05,
+ "loss": 0.0,
+ "step": 449
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2648349015885272e-05,
+ "loss": 0.0,
+ "step": 450
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2618282449874221e-05,
+ "loss": 0.0,
+ "step": 451
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2588190451025209e-05,
+ "loss": 0.0,
+ "step": 452
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2558073311638604e-05,
+ "loss": 0.0,
+ "step": 453
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 1.2527931324258975e-05,
+ "loss": 0.0,
+ "step": 454
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.249776478167227e-05,
+ "loss": 0.0,
+ "step": 455
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2467573976902936e-05,
+ "loss": 0.0,
+ "step": 456
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2437359203211109e-05,
+ "loss": 0.0,
+ "step": 457
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2407120754089733e-05,
+ "loss": 0.0,
+ "step": 458
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2376858923261732e-05,
+ "loss": 0.0,
+ "step": 459
+ },
+ {
+ "epoch": 0.88,
+ "learning_rate": 1.2346574004677154e-05,
+ "loss": 0.0,
+ "step": 460
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2316266292510305e-05,
+ "loss": 0.0,
+ "step": 461
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2285936081156897e-05,
+ "loss": 0.0,
+ "step": 462
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2255583665231196e-05,
+ "loss": 0.0,
+ "step": 463
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2225209339563144e-05,
+ "loss": 0.0,
+ "step": 464
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 1.2194813399195518e-05,
+ "loss": 0.0,
+ "step": 465
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2164396139381029e-05,
+ "loss": 0.0,
+ "step": 466
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2133957855579501e-05,
+ "loss": 0.0,
+ "step": 467
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.210349884345496e-05,
+ "loss": 0.0,
+ "step": 468
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2073019398872778e-05,
+ "loss": 0.0,
+ "step": 469
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 1.2042519817896805e-05,
+ "loss": 0.0,
+ "step": 470
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.2012000396786485e-05,
+ "loss": 0.0,
+ "step": 471
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1981461431993978e-05,
+ "loss": 0.0,
+ "step": 472
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1950903220161286e-05,
+ "loss": 0.0,
+ "step": 473
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1920326058117364e-05,
+ "loss": 0.0,
+ "step": 474
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 1.1889730242875243e-05,
+ "loss": 0.0,
+ "step": 475
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1859116071629148e-05,
+ "loss": 0.0,
+ "step": 476
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1828483841751597e-05,
+ "loss": 0.0,
+ "step": 477
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1797833850790527e-05,
+ "loss": 0.0,
+ "step": 478
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1767166396466404e-05,
+ "loss": 0.0,
+ "step": 479
+ },
+ {
+ "epoch": 0.92,
+ "learning_rate": 1.1736481776669307e-05,
+ "loss": 0.0,
+ "step": 480
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1705780289456069e-05,
+ "loss": 0.0,
+ "step": 481
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1675062233047365e-05,
+ "loss": 0.0,
+ "step": 482
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1644327905824808e-05,
+ "loss": 0.0,
+ "step": 483
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1613577606328068e-05,
+ "loss": 0.0,
+ "step": 484
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1582811633251949e-05,
+ "loss": 0.0,
+ "step": 485
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 1.1552030285443516e-05,
+ "loss": 0.0,
+ "step": 486
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1521233861899168e-05,
+ "loss": 0.0,
+ "step": 487
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1490422661761744e-05,
+ "loss": 0.0,
+ "step": 488
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1459596984317622e-05,
+ "loss": 0.0,
+ "step": 489
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1428757128993801e-05,
+ "loss": 0.0,
+ "step": 490
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 1.1397903395354996e-05,
+ "loss": 0.0,
+ "step": 491
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1367036083100735e-05,
+ "loss": 0.0,
+ "step": 492
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1336155492062439e-05,
+ "loss": 0.0,
+ "step": 493
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.130526192220052e-05,
+ "loss": 0.0,
+ "step": 494
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1274355673601446e-05,
+ "loss": 0.0,
+ "step": 495
+ },
+ {
+ "epoch": 0.95,
+ "learning_rate": 1.1243437046474854e-05,
+ "loss": 0.0,
+ "step": 496
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1212506341150615e-05,
+ "loss": 0.0,
+ "step": 497
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.118156385807593e-05,
+ "loss": 0.0,
+ "step": 498
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1150609897812387e-05,
+ "loss": 0.0,
+ "step": 499
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1119644761033079e-05,
+ "loss": 0.0,
+ "step": 500
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 1.1088668748519646e-05,
+ "loss": 0.0,
+ "step": 501
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.105768216115938e-05,
+ "loss": 0.0,
+ "step": 502
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.1026685299942286e-05,
+ "loss": 0.0,
+ "step": 503
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0995678465958168e-05,
+ "loss": 0.0,
+ "step": 504
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0964661960393703e-05,
+ "loss": 0.0,
+ "step": 505
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0933636084529507e-05,
+ "loss": 0.0,
+ "step": 506
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 1.0902601139737225e-05,
+ "loss": 0.0,
+ "step": 507
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0871557427476585e-05,
+ "loss": 0.0,
+ "step": 508
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0840505249292477e-05,
+ "loss": 0.0,
+ "step": 509
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0809444906812034e-05,
+ "loss": 0.0,
+ "step": 510
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0778376701741688e-05,
+ "loss": 0.0,
+ "step": 511
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 1.0747300935864245e-05,
+ "loss": 0.0,
+ "step": 512
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0716217911035952e-05,
+ "loss": 0.0,
+ "step": 513
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0685127929183567e-05,
+ "loss": 0.0,
+ "step": 514
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0654031292301432e-05,
+ "loss": 0.0,
+ "step": 515
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0622928302448523e-05,
+ "loss": 0.0,
+ "step": 516
+ },
+ {
+ "epoch": 0.99,
+ "learning_rate": 1.0591819261745528e-05,
+ "loss": 0.0,
+ "step": 517
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0560704472371919e-05,
+ "loss": 0.0,
+ "step": 518
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0529584236562995e-05,
+ "loss": 0.0,
+ "step": 519
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0498458856606972e-05,
+ "loss": 0.0,
+ "step": 520
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0467328634842024e-05,
+ "loss": 0.0,
+ "step": 521
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 1.0436193873653362e-05,
+ "loss": 0.0,
+ "step": 522
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0405054875470287e-05,
+ "loss": 0.0,
+ "step": 523
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.037391194276326e-05,
+ "loss": 0.0,
+ "step": 524
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0342765378040953e-05,
+ "loss": 0.0,
+ "step": 525
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.0311615483847333e-05,
+ "loss": 0.0,
+ "step": 526
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 1.028046256275869e-05,
+ "loss": 0.0,
+ "step": 527
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0249306917380731e-05,
+ "loss": 0.0,
+ "step": 528
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0218148850345613e-05,
+ "loss": 0.0,
+ "step": 529
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0186988664309023e-05,
+ "loss": 0.0,
+ "step": 530
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0155826661947232e-05,
+ "loss": 0.0,
+ "step": 531
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0124663145954152e-05,
+ "loss": 0.0,
+ "step": 532
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 1.0093498419038394e-05,
+ "loss": 0.0,
+ "step": 533
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0062332783920337e-05,
+ "loss": 0.0,
+ "step": 534
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1.0031166543329179e-05,
+ "loss": 0.0,
+ "step": 535
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 1e-05,
+ "loss": 0.0,
+ "step": 536
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.968833456670824e-06,
+ "loss": 0.0,
+ "step": 537
+ },
+ {
+ "epoch": 1.03,
+ "learning_rate": 9.937667216079665e-06,
+ "loss": 0.0,
+ "step": 538
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.90650158096161e-06,
+ "loss": 0.0,
+ "step": 539
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.87533685404585e-06,
+ "loss": 0.0,
+ "step": 540
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.844173338052771e-06,
+ "loss": 0.0,
+ "step": 541
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.81301133569098e-06,
+ "loss": 0.0,
+ "step": 542
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 9.78185114965439e-06,
+ "loss": 0.0,
+ "step": 543
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.750693082619274e-06,
+ "loss": 0.0,
+ "step": 544
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.719537437241311e-06,
+ "loss": 0.0,
+ "step": 545
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.68838451615267e-06,
+ "loss": 0.0,
+ "step": 546
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.65723462195905e-06,
+ "loss": 0.0,
+ "step": 547
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 9.626088057236745e-06,
+ "loss": 0.0,
+ "step": 548
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.594945124529718e-06,
+ "loss": 0.0,
+ "step": 549
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.563806126346643e-06,
+ "loss": 0.0,
+ "step": 550
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.532671365157979e-06,
+ "loss": 0.0,
+ "step": 551
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.501541143393028e-06,
+ "loss": 0.0,
+ "step": 552
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 9.470415763437003e-06,
+ "loss": 0.0,
+ "step": 553
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.439295527628083e-06,
+ "loss": 0.0,
+ "step": 554
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.408180738254472e-06,
+ "loss": 0.0,
+ "step": 555
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.377071697551479e-06,
+ "loss": 0.0,
+ "step": 556
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.34596870769857e-06,
+ "loss": 0.0,
+ "step": 557
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.314872070816435e-06,
+ "loss": 0.0,
+ "step": 558
+ },
+ {
+ "epoch": 1.07,
+ "learning_rate": 9.28378208896405e-06,
+ "loss": 0.0,
+ "step": 559
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.252699064135759e-06,
+ "loss": 0.0,
+ "step": 560
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.221623298258315e-06,
+ "loss": 0.0,
+ "step": 561
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.190555093187968e-06,
+ "loss": 0.0,
+ "step": 562
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.159494750707527e-06,
+ "loss": 0.0,
+ "step": 563
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 9.128442572523418e-06,
+ "loss": 0.0,
+ "step": 564
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.097398860262777e-06,
+ "loss": 0.0,
+ "step": 565
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.066363915470494e-06,
+ "loss": 0.0,
+ "step": 566
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.0353380396063e-06,
+ "loss": 0.0,
+ "step": 567
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 9.004321534041836e-06,
+ "loss": 0.0,
+ "step": 568
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 8.973314700057717e-06,
+ "loss": 0.0,
+ "step": 569
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.942317838840625e-06,
+ "loss": 0.0,
+ "step": 570
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.911331251480357e-06,
+ "loss": 0.0,
+ "step": 571
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.880355238966923e-06,
+ "loss": 0.0,
+ "step": 572
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.849390102187615e-06,
+ "loss": 0.0,
+ "step": 573
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 8.818436141924072e-06,
+ "loss": 0.0,
+ "step": 574
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.787493658849387e-06,
+ "loss": 0.0,
+ "step": 575
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.756562953525151e-06,
+ "loss": 0.0,
+ "step": 576
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.72564432639856e-06,
+ "loss": 0.0,
+ "step": 577
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.694738077799487e-06,
+ "loss": 0.0,
+ "step": 578
+ },
+ {
+ "epoch": 1.11,
+ "learning_rate": 8.663844507937563e-06,
+ "loss": 0.0,
+ "step": 579
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.632963916899268e-06,
+ "loss": 0.0,
+ "step": 580
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.602096604645009e-06,
+ "loss": 0.0,
+ "step": 581
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.571242871006202e-06,
+ "loss": 0.0,
+ "step": 582
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.540403015682382e-06,
+ "loss": 0.0,
+ "step": 583
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.509577338238255e-06,
+ "loss": 0.0,
+ "step": 584
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 8.478766138100834e-06,
+ "loss": 0.0,
+ "step": 585
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.447969714556484e-06,
+ "loss": 0.0,
+ "step": 586
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.417188366748051e-06,
+ "loss": 0.0,
+ "step": 587
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.386422393671934e-06,
+ "loss": 0.0,
+ "step": 588
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.355672094175192e-06,
+ "loss": 0.0,
+ "step": 589
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 8.324937766952638e-06,
+ "loss": 0.0,
+ "step": 590
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.294219710543931e-06,
+ "loss": 0.0,
+ "step": 591
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.263518223330698e-06,
+ "loss": 0.0,
+ "step": 592
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.232833603533601e-06,
+ "loss": 0.0,
+ "step": 593
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.202166149209475e-06,
+ "loss": 0.0,
+ "step": 594
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 8.171516158248406e-06,
+ "loss": 0.0,
+ "step": 595
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.140883928370855e-06,
+ "loss": 0.0,
+ "step": 596
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.11026975712476e-06,
+ "loss": 0.0,
+ "step": 597
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.079673941882639e-06,
+ "loss": 0.0,
+ "step": 598
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.04909677983872e-06,
+ "loss": 0.0,
+ "step": 599
+ },
+ {
+ "epoch": 1.15,
+ "learning_rate": 8.018538568006027e-06,
+ "loss": 0.0,
+ "step": 600
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.987999603213518e-06,
+ "loss": 0.0,
+ "step": 601
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.957480182103198e-06,
+ "loss": 0.0,
+ "step": 602
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.926980601127225e-06,
+ "loss": 0.0,
+ "step": 603
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.896501156545044e-06,
+ "loss": 0.0,
+ "step": 604
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 7.866042144420502e-06,
+ "loss": 0.0,
+ "step": 605
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.835603860618973e-06,
+ "loss": 0.0,
+ "step": 606
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.805186600804489e-06,
+ "loss": 0.0,
+ "step": 607
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.774790660436857e-06,
+ "loss": 0.0,
+ "step": 608
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.744416334768809e-06,
+ "loss": 0.0,
+ "step": 609
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 7.714063918843106e-06,
+ "loss": 0.0,
+ "step": 610
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.6837337074897e-06,
+ "loss": 0.0,
+ "step": 611
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.653425995322852e-06,
+ "loss": 0.0,
+ "step": 612
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.623141076738271e-06,
+ "loss": 0.0,
+ "step": 613
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.592879245910273e-06,
+ "loss": 0.0,
+ "step": 614
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.562640796788893e-06,
+ "loss": 0.0,
+ "step": 615
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 7.532426023097063e-06,
+ "loss": 0.0,
+ "step": 616
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.50223521832773e-06,
+ "loss": 0.0,
+ "step": 617
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.472068675741024e-06,
+ "loss": 0.0,
+ "step": 618
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.4419266883614e-06,
+ "loss": 0.0,
+ "step": 619
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.411809548974792e-06,
+ "loss": 0.0,
+ "step": 620
+ },
+ {
+ "epoch": 1.19,
+ "learning_rate": 7.38171755012578e-06,
+ "loss": 0.0,
+ "step": 621
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.3516509841147276e-06,
+ "loss": 0.0,
+ "step": 622
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.321610142994971e-06,
+ "loss": 0.0,
+ "step": 623
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.291595318569951e-06,
+ "loss": 0.0,
+ "step": 624
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2616068023904e-06,
+ "loss": 0.0,
+ "step": 625
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 7.2316448857515076e-06,
+ "loss": 0.0,
+ "step": 626
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.201709859690081e-06,
+ "loss": 0.0,
+ "step": 627
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.171802014981726e-06,
+ "loss": 0.0,
+ "step": 628
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.141921642138025e-06,
+ "loss": 0.0,
+ "step": 629
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.112069031403704e-06,
+ "loss": 0.0,
+ "step": 630
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 7.082244472753823e-06,
+ "loss": 0.0,
+ "step": 631
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.052448255890958e-06,
+ "loss": 0.0,
+ "step": 632
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 7.022680670242387e-06,
+ "loss": 0.0,
+ "step": 633
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.992942004957271e-06,
+ "loss": 0.0,
+ "step": 634
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.963232548903853e-06,
+ "loss": 0.0,
+ "step": 635
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 6.933552590666659e-06,
+ "loss": 0.0,
+ "step": 636
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.903902418543671e-06,
+ "loss": 0.0,
+ "step": 637
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.874282320543557e-06,
+ "loss": 0.0,
+ "step": 638
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.844692584382848e-06,
+ "loss": 0.0,
+ "step": 639
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.815133497483157e-06,
+ "loss": 0.0,
+ "step": 640
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.785605346968387e-06,
+ "loss": 0.0,
+ "step": 641
+ },
+ {
+ "epoch": 1.23,
+ "learning_rate": 6.7561084196619306e-06,
+ "loss": 0.0,
+ "step": 642
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.7266430020839e-06,
+ "loss": 0.0,
+ "step": 643
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.697209380448333e-06,
+ "loss": 0.0,
+ "step": 644
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.66780784066041e-06,
+ "loss": 0.0,
+ "step": 645
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.638438668313695e-06,
+ "loss": 0.0,
+ "step": 646
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 6.609102148687333e-06,
+ "loss": 0.0,
+ "step": 647
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.579798566743314e-06,
+ "loss": 0.0,
+ "step": 648
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.550528207123667e-06,
+ "loss": 0.0,
+ "step": 649
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.521291354147727e-06,
+ "loss": 0.0,
+ "step": 650
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.492088291809355e-06,
+ "loss": 0.0,
+ "step": 651
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 6.462919303774186e-06,
+ "loss": 0.0,
+ "step": 652
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.43378467337687e-06,
+ "loss": 0.0,
+ "step": 653
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.404684683618325e-06,
+ "loss": 0.0,
+ "step": 654
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.375619617162985e-06,
+ "loss": 0.0,
+ "step": 655
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.34658975633605e-06,
+ "loss": 0.0,
+ "step": 656
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 6.317595383120756e-06,
+ "loss": 0.0,
+ "step": 657
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.288636779155621e-06,
+ "loss": 0.0,
+ "step": 658
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.2597142257317185e-06,
+ "loss": 0.0,
+ "step": 659
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.230828003789949e-06,
+ "loss": 0.0,
+ "step": 660
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.201978393918291e-06,
+ "loss": 0.0,
+ "step": 661
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.173165676349103e-06,
+ "loss": 0.0,
+ "step": 662
+ },
+ {
+ "epoch": 1.27,
+ "learning_rate": 6.144390130956384e-06,
+ "loss": 0.0,
+ "step": 663
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.115652037253054e-06,
+ "loss": 0.0,
+ "step": 664
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.086951674388252e-06,
+ "loss": 0.0,
+ "step": 665
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.058289321144608e-06,
+ "loss": 0.0,
+ "step": 666
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.02966525593555e-06,
+ "loss": 0.0,
+ "step": 667
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 6.001079756802592e-06,
+ "loss": 0.0,
+ "step": 668
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.97253310141263e-06,
+ "loss": 0.0,
+ "step": 669
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.944025567055251e-06,
+ "loss": 0.0,
+ "step": 670
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.91555743064004e-06,
+ "loss": 0.0,
+ "step": 671
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.887128968693887e-06,
+ "loss": 0.0,
+ "step": 672
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 5.858740457358298e-06,
+ "loss": 0.0,
+ "step": 673
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.830392172386723e-06,
+ "loss": 0.0,
+ "step": 674
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.802084389141862e-06,
+ "loss": 0.0,
+ "step": 675
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.773817382593008e-06,
+ "loss": 0.0,
+ "step": 676
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.745591427313365e-06,
+ "loss": 0.0,
+ "step": 677
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 5.717406797477371e-06,
+ "loss": 0.0,
+ "step": 678
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.689263766858072e-06,
+ "loss": 0.0,
+ "step": 679
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.66116260882442e-06,
+ "loss": 0.0,
+ "step": 680
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.633103596338631e-06,
+ "loss": 0.0,
+ "step": 681
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.6050870019535496e-06,
+ "loss": 0.0,
+ "step": 682
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 5.5771130978099896e-06,
+ "loss": 0.0,
+ "step": 683
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.549182155634076e-06,
+ "loss": 0.0,
+ "step": 684
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.521294446734637e-06,
+ "loss": 0.0,
+ "step": 685
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.493450242000546e-06,
+ "loss": 0.0,
+ "step": 686
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.465649811898098e-06,
+ "loss": 0.0,
+ "step": 687
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.43789342646837e-06,
+ "loss": 0.0,
+ "step": 688
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 5.410181355324622e-06,
+ "loss": 0.0,
+ "step": 689
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.382513867649663e-06,
+ "loss": 0.0,
+ "step": 690
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.354891232193225e-06,
+ "loss": 0.0,
+ "step": 691
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.32731371726938e-06,
+ "loss": 0.0,
+ "step": 692
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.299781590753916e-06,
+ "loss": 0.0,
+ "step": 693
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 5.2722951200817315e-06,
+ "loss": 0.0,
+ "step": 694
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.244854572244249e-06,
+ "loss": 0.0,
+ "step": 695
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.217460213786822e-06,
+ "loss": 0.0,
+ "step": 696
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.190112310806126e-06,
+ "loss": 0.0,
+ "step": 697
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.1628111289476025e-06,
+ "loss": 0.0,
+ "step": 698
+ },
+ {
+ "epoch": 1.34,
+ "learning_rate": 5.135556933402862e-06,
+ "loss": 0.0,
+ "step": 699
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.108349988907111e-06,
+ "loss": 0.0,
+ "step": 700
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.081190559736569e-06,
+ "loss": 0.0,
+ "step": 701
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.054078909705926e-06,
+ "loss": 0.0,
+ "step": 702
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.027015302165768e-06,
+ "loss": 0.0,
+ "step": 703
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 5.000000000000003e-06,
+ "loss": 0.0,
+ "step": 704
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.973033265623333e-06,
+ "loss": 0.0,
+ "step": 705
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.946115360978696e-06,
+ "loss": 0.0,
+ "step": 706
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.919246547534709e-06,
+ "loss": 0.0,
+ "step": 707
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.892427086283147e-06,
+ "loss": 0.0,
+ "step": 708
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 4.865657237736397e-06,
+ "loss": 0.0,
+ "step": 709
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.838937261924933e-06,
+ "loss": 0.0,
+ "step": 710
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.812267418394784e-06,
+ "loss": 0.0,
+ "step": 711
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.78564796620502e-06,
+ "loss": 0.0,
+ "step": 712
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.759079163925223e-06,
+ "loss": 0.0,
+ "step": 713
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 4.732561269632992e-06,
+ "loss": 0.0,
+ "step": 714
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.706094540911429e-06,
+ "loss": 0.0,
+ "step": 715
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.679679234846636e-06,
+ "loss": 0.0,
+ "step": 716
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.6533156080252076e-06,
+ "loss": 0.0,
+ "step": 717
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.627003916531761e-06,
+ "loss": 0.0,
+ "step": 718
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.600744415946438e-06,
+ "loss": 0.0,
+ "step": 719
+ },
+ {
+ "epoch": 1.38,
+ "learning_rate": 4.5745373613424075e-06,
+ "loss": 0.0,
+ "step": 720
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.548383007283412e-06,
+ "loss": 0.0,
+ "step": 721
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.522281607821288e-06,
+ "loss": 0.0,
+ "step": 722
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.496233416493481e-06,
+ "loss": 0.0,
+ "step": 723
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.470238686320606e-06,
+ "loss": 0.0,
+ "step": 724
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 4.444297669803981e-06,
+ "loss": 0.0,
+ "step": 725
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.418410618923163e-06,
+ "loss": 0.0,
+ "step": 726
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.392577785133521e-06,
+ "loss": 0.0,
+ "step": 727
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.3667994193637794e-06,
+ "loss": 0.0,
+ "step": 728
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.3410757720135886e-06,
+ "loss": 0.0,
+ "step": 729
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 4.315407092951078e-06,
+ "loss": 0.0,
+ "step": 730
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.289793631510449e-06,
+ "loss": 0.0,
+ "step": 731
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.264235636489542e-06,
+ "loss": 0.0,
+ "step": 732
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.238733356147414e-06,
+ "loss": 0.0,
+ "step": 733
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.213287038201943e-06,
+ "loss": 0.0,
+ "step": 734
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 4.187896929827414e-06,
+ "loss": 0.0,
+ "step": 735
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.162563277652104e-06,
+ "loss": 0.0,
+ "step": 736
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.137286327755913e-06,
+ "loss": 0.0,
+ "step": 737
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.112066325667954e-06,
+ "loss": 0.0,
+ "step": 738
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.086903516364179e-06,
+ "loss": 0.0,
+ "step": 739
+ },
+ {
+ "epoch": 1.42,
+ "learning_rate": 4.061798144264986e-06,
+ "loss": 0.0,
+ "step": 740
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 4.03675045323286e-06,
+ "loss": 0.0,
+ "step": 741
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 4.0117606865699975e-06,
+ "loss": 0.0,
+ "step": 742
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.986829087015941e-06,
+ "loss": 0.0,
+ "step": 743
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.961955896745224e-06,
+ "loss": 0.0,
+ "step": 744
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.937141357365023e-06,
+ "loss": 0.0,
+ "step": 745
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 3.912385709912794e-06,
+ "loss": 0.0,
+ "step": 746
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.887689194853951e-06,
+ "loss": 0.0,
+ "step": 747
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.8630520520795275e-06,
+ "loss": 0.0,
+ "step": 748
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.838474520903825e-06,
+ "loss": 0.0,
+ "step": 749
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.8139568400621184e-06,
+ "loss": 0.0,
+ "step": 750
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 3.7894992477083226e-06,
+ "loss": 0.0,
+ "step": 751
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7651019814126656e-06,
+ "loss": 0.0,
+ "step": 752
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7407652781594094e-06,
+ "loss": 0.0,
+ "step": 753
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.7164893743445274e-06,
+ "loss": 0.0,
+ "step": 754
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.692274505773419e-06,
+ "loss": 0.0,
+ "step": 755
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 3.6681209076586035e-06,
+ "loss": 0.0,
+ "step": 756
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.644028814617454e-06,
+ "loss": 0.0,
+ "step": 757
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.619998460669916e-06,
+ "loss": 0.0,
+ "step": 758
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5960300792362124e-06,
+ "loss": 0.0,
+ "step": 759
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5721239031346067e-06,
+ "loss": 0.0,
+ "step": 760
+ },
+ {
+ "epoch": 1.46,
+ "learning_rate": 3.5482801645791266e-06,
+ "loss": 0.0,
+ "step": 761
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.5244990951772972e-06,
+ "loss": 0.0,
+ "step": 762
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.5007809259279146e-06,
+ "loss": 0.0,
+ "step": 763
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.4771258872187917e-06,
+ "loss": 0.0,
+ "step": 764
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.453534208824507e-06,
+ "loss": 0.0,
+ "step": 765
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 3.4300061199041967e-06,
+ "loss": 0.0,
+ "step": 766
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.4065418489993118e-06,
+ "loss": 0.0,
+ "step": 767
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3831416240314085e-06,
+ "loss": 0.0,
+ "step": 768
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3598056722999185e-06,
+ "loss": 0.0,
+ "step": 769
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3365342204799613e-06,
+ "loss": 0.0,
+ "step": 770
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.3133274946201333e-06,
+ "loss": 0.0,
+ "step": 771
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 3.290185720140301e-06,
+ "loss": 0.0,
+ "step": 772
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.267109121829428e-06,
+ "loss": 0.0,
+ "step": 773
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.2440979238433977e-06,
+ "loss": 0.0,
+ "step": 774
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.221152349702802e-06,
+ "loss": 0.0,
+ "step": 775
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.1982726222908046e-06,
+ "loss": 0.0,
+ "step": 776
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 3.1754589638509647e-06,
+ "loss": 0.0,
+ "step": 777
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.152711595985065e-06,
+ "loss": 0.0,
+ "step": 778
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.1300307396509833e-06,
+ "loss": 0.0,
+ "step": 779
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.10741661516053e-06,
+ "loss": 0.0,
+ "step": 780
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.0848694421773075e-06,
+ "loss": 0.0,
+ "step": 781
+ },
+ {
+ "epoch": 1.5,
+ "learning_rate": 3.0623894397145837e-06,
+ "loss": 0.0,
+ "step": 782
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 3.0399768261331664e-06,
+ "loss": 0.0,
+ "step": 783
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 3.017631819139273e-06,
+ "loss": 0.0,
+ "step": 784
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.995354635782417e-06,
+ "loss": 0.0,
+ "step": 785
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.9731454924533086e-06,
+ "loss": 0.0,
+ "step": 786
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.95100460488175e-06,
+ "loss": 0.0,
+ "step": 787
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.9289321881345257e-06,
+ "loss": 0.0,
+ "step": 788
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.906928456613336e-06,
+ "loss": 0.0,
+ "step": 789
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.884993624052701e-06,
+ "loss": 0.0,
+ "step": 790
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.8631279035178796e-06,
+ "loss": 0.0,
+ "step": 791
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.8413315074028157e-06,
+ "loss": 0.0,
+ "step": 792
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.819604647428067e-06,
+ "loss": 0.0,
+ "step": 793
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7979475346387363e-06,
+ "loss": 0.0,
+ "step": 794
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.776360379402445e-06,
+ "loss": 0.0,
+ "step": 795
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7548433914072736e-06,
+ "loss": 0.0,
+ "step": 796
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.7333967796597317e-06,
+ "loss": 0.0,
+ "step": 797
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.712020752482717e-06,
+ "loss": 0.0,
+ "step": 798
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.690715517513508e-06,
+ "loss": 0.0,
+ "step": 799
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.669481281701739e-06,
+ "loss": 0.0,
+ "step": 800
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.6483182513073835e-06,
+ "loss": 0.0,
+ "step": 801
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.6272266318987606e-06,
+ "loss": 0.0,
+ "step": 802
+ },
+ {
+ "epoch": 1.54,
+ "learning_rate": 2.6062066283505404e-06,
+ "loss": 0.0,
+ "step": 803
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.5852584448417327e-06,
+ "loss": 0.0,
+ "step": 804
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.564382284853738e-06,
+ "loss": 0.0,
+ "step": 805
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.5435783511683444e-06,
+ "loss": 0.0,
+ "step": 806
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.5228468458657585e-06,
+ "loss": 0.0,
+ "step": 807
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.502187970322657e-06,
+ "loss": 0.0,
+ "step": 808
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.4816019252102274e-06,
+ "loss": 0.0,
+ "step": 809
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.461088910492202e-06,
+ "loss": 0.0,
+ "step": 810
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.440649125422937e-06,
+ "loss": 0.0,
+ "step": 811
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.420282768545469e-06,
+ "loss": 0.0,
+ "step": 812
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3999900376895844e-06,
+ "loss": 0.0,
+ "step": 813
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.3797711299698924e-06,
+ "loss": 0.0,
+ "step": 814
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.3596262417839256e-06,
+ "loss": 0.0,
+ "step": 815
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.339555568810221e-06,
+ "loss": 0.0,
+ "step": 816
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.319559306006417e-06,
+ "loss": 0.0,
+ "step": 817
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.2996376476073724e-06,
+ "loss": 0.0,
+ "step": 818
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.2797907871232673e-06,
+ "loss": 0.0,
+ "step": 819
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.2600189173377263e-06,
+ "loss": 0.0,
+ "step": 820
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.240322230305951e-06,
+ "loss": 0.0,
+ "step": 821
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.2207009173528528e-06,
+ "loss": 0.0,
+ "step": 822
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.201155169071184e-06,
+ "loss": 0.0,
+ "step": 823
+ },
+ {
+ "epoch": 1.58,
+ "learning_rate": 2.1816851753197023e-06,
+ "loss": 0.0,
+ "step": 824
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.1622911252213195e-06,
+ "loss": 0.0,
+ "step": 825
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.1429732071612653e-06,
+ "loss": 0.0,
+ "step": 826
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.1237316087852465e-06,
+ "loss": 0.0,
+ "step": 827
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.104566516997647e-06,
+ "loss": 0.0,
+ "step": 828
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.0854781179596937e-06,
+ "loss": 0.0,
+ "step": 829
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.0664665970876496e-06,
+ "loss": 0.0,
+ "step": 830
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.0475321390510262e-06,
+ "loss": 0.0,
+ "step": 831
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.0286749277707783e-06,
+ "loss": 0.0,
+ "step": 832
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.009895146417512e-06,
+ "loss": 0.0,
+ "step": 833
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 1.9911929774097216e-06,
+ "loss": 0.0,
+ "step": 834
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 1.9725686024120093e-06,
+ "loss": 0.0,
+ "step": 835
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 1.9540222023333165e-06,
+ "loss": 0.0,
+ "step": 836
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 1.9355539573251737e-06,
+ "loss": 0.0,
+ "step": 837
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 1.9171640467799478e-06,
+ "loss": 0.0,
+ "step": 838
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 1.8988526493290948e-06,
+ "loss": 0.0,
+ "step": 839
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 1.880619942841435e-06,
+ "loss": 0.0,
+ "step": 840
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 1.8624661044214154e-06,
+ "loss": 0.0,
+ "step": 841
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 1.8443913104073984e-06,
+ "loss": 0.0,
+ "step": 842
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 1.826395736369937e-06,
+ "loss": 0.0,
+ "step": 843
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 1.808479557110081e-06,
+ "loss": 0.0,
+ "step": 844
+ },
+ {
+ "epoch": 1.62,
+ "learning_rate": 1.7906429466576768e-06,
+ "loss": 0.0,
+ "step": 845
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 1.7728860782696666e-06,
+ "loss": 0.0,
+ "step": 846
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 1.7552091244284197e-06,
+ "loss": 0.0,
+ "step": 847
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 1.7376122568400533e-06,
+ "loss": 0.0,
+ "step": 848
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 1.7200956464327512e-06,
+ "loss": 0.0,
+ "step": 849
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 1.7026594633551252e-06,
+ "loss": 0.0,
+ "step": 850
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 1.6853038769745466e-06,
+ "loss": 0.0,
+ "step": 851
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 1.6680290558755119e-06,
+ "loss": 0.0,
+ "step": 852
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 1.6508351678579882e-06,
+ "loss": 0.0,
+ "step": 853
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 1.6337223799358025e-06,
+ "loss": 0.0,
+ "step": 854
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 1.6166908583350138e-06,
+ "loss": 0.0,
+ "step": 855
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 1.599740768492286e-06,
+ "loss": 0.0,
+ "step": 856
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 1.582872275053301e-06,
+ "loss": 0.0,
+ "step": 857
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 1.566085541871145e-06,
+ "loss": 0.0,
+ "step": 858
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 1.5493807320047183e-06,
+ "loss": 0.0,
+ "step": 859
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 1.5327580077171589e-06,
+ "loss": 0.0,
+ "step": 860
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 1.5162175304742633e-06,
+ "loss": 0.0,
+ "step": 861
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 1.499759460942909e-06,
+ "loss": 0.0,
+ "step": 862
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 1.4833839589895072e-06,
+ "loss": 0.0,
+ "step": 863
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 1.467091183678444e-06,
+ "loss": 0.0,
+ "step": 864
+ },
+ {
+ "epoch": 1.66,
+ "learning_rate": 1.4508812932705364e-06,
+ "loss": 0.0,
+ "step": 865
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 1.4347544452214869e-06,
+ "loss": 0.0,
+ "step": 866
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 1.4187107961803704e-06,
+ "loss": 0.0,
+ "step": 867
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 1.4027505019880972e-06,
+ "loss": 0.0,
+ "step": 868
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 1.3868737176759105e-06,
+ "loss": 0.0,
+ "step": 869
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 1.3710805974638697e-06,
+ "loss": 0.0,
+ "step": 870
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 1.3553712947593655e-06,
+ "loss": 0.0,
+ "step": 871
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 1.339745962155613e-06,
+ "loss": 0.0,
+ "step": 872
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 1.324204751430186e-06,
+ "loss": 0.0,
+ "step": 873
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 1.3087478135435361e-06,
+ "loss": 0.0,
+ "step": 874
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 1.293375298637518e-06,
+ "loss": 0.0,
+ "step": 875
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 1.278087356033947e-06,
+ "loss": 0.0,
+ "step": 876
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 1.2628841342331389e-06,
+ "loss": 0.0,
+ "step": 877
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 1.2477657809124632e-06,
+ "loss": 0.0,
+ "step": 878
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 1.2327324429249232e-06,
+ "loss": 0.0,
+ "step": 879
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 1.2177842662977136e-06,
+ "loss": 0.0,
+ "step": 880
+ },
+ {
+ "epoch": 1.69,
+ "learning_rate": 1.2029213962308172e-06,
+ "loss": 0.0,
+ "step": 881
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.188143977095576e-06,
+ "loss": 0.0,
+ "step": 882
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.1734521524333087e-06,
+ "loss": 0.0,
+ "step": 883
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.1588460649539036e-06,
+ "loss": 0.0,
+ "step": 884
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.1443258565344329e-06,
+ "loss": 0.0,
+ "step": 885
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.129891668217783e-06,
+ "loss": 0.0,
+ "step": 886
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.1155436402112785e-06,
+ "loss": 0.0,
+ "step": 887
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.1012819118853147e-06,
+ "loss": 0.0,
+ "step": 888
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.0871066217720173e-06,
+ "loss": 0.0,
+ "step": 889
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.073017907563887e-06,
+ "loss": 0.0,
+ "step": 890
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.0590159061124606e-06,
+ "loss": 0.0,
+ "step": 891
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.0451007534269908e-06,
+ "loss": 0.0,
+ "step": 892
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.0312725846731174e-06,
+ "loss": 0.0,
+ "step": 893
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.0175315341715598e-06,
+ "loss": 0.0,
+ "step": 894
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.003877735396801e-06,
+ "loss": 0.0,
+ "step": 895
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 9.903113209758098e-07,
+ "loss": 0.0,
+ "step": 896
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 9.768324226867353e-07,
+ "loss": 0.0,
+ "step": 897
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 9.634411714576353e-07,
+ "loss": 0.0,
+ "step": 898
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 9.501376973651999e-07,
+ "loss": 0.0,
+ "step": 899
+ },
+ {
+ "epoch": 1.73,
+ "learning_rate": 9.369221296335007e-07,
+ "loss": 0.0,
+ "step": 900
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 1040,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 2,
+ "save_steps": 100,
+ "total_flos": 4.4903984085231206e+17,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/training_args.bin b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2ca4d892afdd453b26723a9aa94e432cb44cc953
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63da3a2d0bf1dde543b68e123590fcd7c42f45ec7eb68e86c6eadd439321f902
+size 6264
diff --git a/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/zero_to_fp32.py b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c98caae31534368be22b67fc4ae906836c992a8d
--- /dev/null
+++ b/llava-v1.6-mistral-7b-unk-vqa-v1.0/checkpoint-900/zero_to_fp32.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example: python zero_to_fp32.py . pytorch_model.bin
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in param_shapes.items():
+
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ """
+
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+ print(f"Saving fp32 state dict to {output_file}")
+ torch.save(state_dict, output_file)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument(
+ "output_file",
+ type=str,
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)