Transformers
PyTorch
English
bridgetower
Inference Endpoints
anahita-b's picture
Add vision_config, cleanup
aa27c77
raw
history blame
1.55 kB
{
"drop_rate":0.1,
"head_hidden_scale":2,
"hidden_act":"gelu",
"hidden_size":768,
"image_size":288,
"input_text_embed_size":768,
"input_image_embed_size":768,
"is_encoder_decoder":false,
"layer_norm_eps":1e-5,
"link_tower_shared":false,
"link_tower_type":"add",
"max_text_len":50,
"mlp_ratio":4,
"num_attention_heads":12,
"num_hidden_layers":6,
"stop_gradient":false,
"tie_word_embeddings":false,
"vocab_size":50265,
"text_config_dict": null,
"text_config":{
"architectures": ["BridgeTowerTextModel"],
"classifier_dropout": null,
"vocab_size": 50265,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 514,
"type_vocab_size": 1,
"initializer_range": 0.02,
"layer_norm_eps": 1e-05,
"pad_token_id": 1,
"bos_token_id": 0,
"eos_token_id": 2,
"position_embedding_type": "absolute",
"use_cache": true
},
"vision_config_dict": null,
"vision_config":{
"architectures": ["BridgeTowerVisionModel"],
"embed_dim": 512,
"input_resolution": 224,
"width": 768,
"layers": 12,
"patch_size": 16,
"transformer_width": 512,
"output_resolution": 288,
"stop_gradient": false,
"vit_layernorm_shared": true,
"vit_remove_last": false,
}
}