Transformers
PyTorch
clip
Inference Endpoints
nllb-clip-large / config.json
visheratin's picture
Upload model
22174f1
raw
history blame contribute delete
744 Bytes
{
"_name_or_path": "./hf/nllb-clip-large",
"architectures": [
"NLLBCLIPModel"
],
"initializer_factor": 1.0,
"logit_scale_init_value": 2.6592,
"model_type": "clip",
"projection_dim": 512,
"text_config": {
"encoder_ffn_dim": 8192,
"encoder_layerdrop": 0,
"encoder_layers": 24,
"model_type": "clip_text_model",
"num_hidden_layers": 24,
"vocab_size": 256206
},
"torch_dtype": "float32",
"transformers_version": "4.33.1",
"vision_config": {
"dropout": 0.0,
"hidden_act": "gelu",
"hidden_size": 1280,
"intermediate_size": 5120,
"model_type": "clip_vision_model",
"num_attention_heads": 16,
"num_hidden_layers": 32,
"patch_size": 14,
"projection_dim": 1024
}
}