garg-amit's picture
Added model files
d93d2f6
raw
history blame
4.63 kB
{
"_name_or_path": "Phi-4-multimodal-instruct",
"architectures": [
"Phi4MMForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"audio_processor": {
"config": {
"activation": "swish",
"activation_checkpointing": {
"interval": 1,
"module": "transformer",
"offload": false
},
"attention_dim": 1024,
"attention_heads": 16,
"batch_norm": false,
"bias_in_glu": true,
"causal": true,
"chunk_size": -1,
"cnn_layer_norm": true,
"conv_activation": "swish",
"conv_glu_type": "swish",
"depthwise_multiplier": 1,
"depthwise_seperable_out_channel": 1024,
"dropout_rate": 0.0,
"encoder_embedding_config": {
"input_size": 80
},
"ext_pw_kernel_size": 1,
"ext_pw_out_channel": 1024,
"input_layer": "nemo_conv",
"input_size": 80,
"kernel_size": 3,
"left_chunk": 18,
"linear_units": 1536,
"nemo_conv_settings": {
"conv_channels": 1024
},
"num_blocks": 24,
"relative_attention_bias_args": {
"t5_bias_max_distance": 500,
"type": "t5"
},
"time_reduction": 8
},
"name": "cascades"
},
"auto_map": {
"AutoConfig": "configuration_phi4mm.Phi4MMConfig",
"AutoModelForCausalLM": "modeling_phi4mm.Phi4MMForCausalLM",
"AutoTokenizer": "Xenova/gpt-4o"
},
"bos_token_id": 199999,
"embd_layer": {
"audio_embd_layer": {
"compression_rate": 8,
"downsample_rate": 1,
"embedding_cls": "audio",
"enable_gradient_checkpointing": true,
"projection_cls": "mlp",
"use_conv_downsample": false,
"use_qformer": false
},
"embedding_cls": "image_audio",
"image_embd_layer": {
"crop_size": 448,
"embedding_cls": "tune_image",
"enable_gradient_checkpointing": true,
"hd_transform_order": "sub_glb",
"image_token_compression_cls": "avg_pool_2d",
"projection_cls": "mlp",
"use_hd_transform": true,
"with_learnable_separator": true
}
},
"embd_pdrop": 0.0,
"eos_token_id": 199999,
"full_attn_mod": 1,
"hidden_act": "silu",
"hidden_size": 3072,
"initializer_range": 0.02,
"intermediate_size": 8192,
"interpolate_factor": 1,
"lm_head_bias": false,
"vision_lora": {
"dp": 0.0,
"layer": "layers.*((self_attn\\.(qkv_proj|o_proj))|(mlp\\.(gate_up|down)_proj))",
"lora_alpha": 512,
"r": 256
},
"speech_lora": {
"dp": 0.01,
"layer": "((layers.*self_attn\\.(qkv|o)_proj)|(layers.*mlp\\.(gate_up|down)_proj))",
"lora_alpha": 640,
"r": 320
},
"max_position_embeddings": 131072,
"mlp_bias": false,
"model_type": "phi4mm",
"num_attention_heads": 24,
"num_hidden_layers": 32,
"num_key_value_heads": 8,
"original_max_position_embeddings": 4096,
"pad_token_id": 199999,
"partial_rotary_factor": 0.75,
"resid_pdrop": 0.0,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"long_factor": [
1,
1.118320672,
1.250641126,
1.398617824,
1.564103225,
1.74916897,
1.956131817,
2.187582649,
2.446418898,
2.735880826,
3.059592084,
3.421605075,
3.826451687,
4.279200023,
4.785517845,
5.351743533,
5.984965424,
6.693110555,
7.485043894,
8.370679318,
9.36110372,
10.4687158,
11.70738129,
13.09260651,
14.64173252,
16.37415215,
18.31155283,
20.47818807,
22.90118105,
25.61086418,
28.64115884,
32.03,
32.1,
32.13,
32.23,
32.6,
32.61,
32.64,
32.66,
32.7,
32.71,
32.93,
32.97,
33.28,
33.49,
33.5,
44.16,
47.77
],
"short_factor": [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0
],
"type": "longrope"
},
"rope_theta": 10000.0,
"sliding_window": 262144,
"tie_word_embeddings": true,
"torch_dtype": "bfloat16",
"transformers_version": "4.46.1",
"use_cache": true,
"vocab_size": 200064,
"_attn_implementation": "flash_attention_2"
}