{ | |
"_attn_implementation_autoset": true, | |
"answer_space": [ | |
"circle", | |
"green", | |
"red", | |
"gray", | |
"yes", | |
"teal", | |
"black", | |
"rectangle", | |
"yellow", | |
"triangle", | |
"brown", | |
"blue", | |
"no" | |
], | |
"base_model_name": "blip2", | |
"classification_input_dim": 768, | |
"dataset_name": "easy-vqa", | |
"image_text_hidden_size": 256, | |
"image_token_index": null, | |
"initializer_factor": 1.0, | |
"initializer_range": 0.02, | |
"interm_dim": 1024, | |
"model_type": "blip-2", | |
"multi_class_classifier": true, | |
"num_query_tokens": 32, | |
"qformer_config": { | |
"model_type": "blip_2_qformer" | |
}, | |
"text_config": { | |
"model_type": "opt" | |
}, | |
"transformers_version": "4.46.1", | |
"use_decoder_only_language_model": true, | |
"vision_config": { | |
"model_type": "blip_2_vision_model" | |
} | |
} | |