{ "architectures": [ "Blip2ForImageTextRetrieval" ], "image_text_hidden_size": 256, "initializer_factor": 1.0, "initializer_range": 0.02, "model_type": "blip-2", "num_query_tokens": 32, "qformer_config": { "model_type": "blip_2_qformer", "qformer_text_input": true, "vocab_size": 30523 }, "torch_dtype": "float32", "transformers_version": "4.35.0.dev0", "vision_config": { "model_type": "blip_2_vision_model" } }